metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jondurrant/twinThing",
"score": 2
} |
#### File: twinThing/py/topic.py
```python
class Topic:
# handle a topic message
# @param topicName - string name of the topic
# @param data - string data
# @param twin - twin interface to use for any responce or getting to state
#
def handle(self, topicName : str, data : object, twin):
return ;
```
#### File: twinThing/py/twinFake.py
```python
from twin import Twin
import json
from serial import Serial
import logging
#===============================================================================
# Twin class to talk over a serial port
#===============================================================================
class TwinFake(Twin):
#===========================================================================
# Constructor
#===========================================================================
def __init__(self):
super(TwinFake, self).__init__()
self.logging = logging.getLogger(__name__)
#===========================================================================
# Write string to comms channel and therefore twin
#===========================================================================
def outputJson(self, s: str):
logging.debug("OutputJson(%s)"%s)
return
#===========================================================================
# read and process a line from the Twin
#===========================================================================
def readLine(self):
return
``` |
{
"source": "jondwaite/3tapp",
"score": 2
} |
#### File: app/status/statusapi.py
```python
import os
import flask
from flask_cors import CORS
import subprocess
import socket
import time
from ping3 import ping
from dotenv import load_dotenv
# pip3 install list:
# - Flask
# - ping3
# - python-dotenv
# Class to check the status of a systemd service unit:
class ServiceMonitor(object):
def __init__(self, service):
self.service = service
def is_active(self):
cmd = '/bin/systemctl status %s.service' % self.service
proc = subprocess.Popen(cmd, shell=True,stdout=subprocess.PIPE,encoding='utf8')
stdout_list = proc.communicate()[0].split('\n')
for line in stdout_list:
if 'Active:' in line:
if '(running)' in line:
return True
return False
# Returns True if the given host can be connected on the given port
def checktcpconnection(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
result = sock.connect_ex((host,port))
if result == 0:
return True
else:
return False
# Returns uptime of the system formatted nicely:
def uptime():
status,result = subprocess.getstatusoutput("uptime -p")
return result
# Ping test - NOTE: only waits for 100ms (1/10th second) for response - may not work in all networks
def pingcheck(host):
delay = ping(host, timeout=0.1, unit='ms')
if delay:
if (delay < 1):
return "<1 ms"
elif (delay < 1000):
return str(int(delay)) + ' ms'
else:
return str(int(delay/1000)) + ' s'
else:
return False
# Test if hostname resolves:
def hostname_resolves(hostname):
try:
socket.gethostbyname(hostname)
return True
except socket.error:
return False
def reset_dns_cache():
cmd = '/usr/bin/systemd-resolve --flush-caches'
proc = subprocess.Popen(cmd, shell=True,stdout=subprocess.PIPE,encoding='utf8')
port = os.environ['PORT']
app = flask.Flask(__name__)
app.config["DEBUG"] = False
CORS(app)
# Use .env file to get hostnames for each component:
load_dotenv()
db_host = os.getenv('DB_HOSTNAME')
app_host = os.getenv('APP_HOSTNAME')
web_host = os.getenv('WEB_HOSTNAME')
# Make sure all our hosts are available in DNS before proceeding:
while not (hostname_resolves(db_host)):
time.sleep(5)
reset_dns_cache()
while not (hostname_resolves(app_host)):
time.sleep(5)
reset_dns_cache()
while not (hostname_resolves(web_host)):
time.sleep(5)
reset_dns_cache()
@app.route('/', methods=['GET'])
def home():
respvalues = {}
respvalues["role"] = 'app'
respvalues["uptime"] = uptime()
respvalues["3tapp_app_running"] = True if ServiceMonitor('3tapp-app').is_active() else False
respvalues["app_3002_open"] = checktcpconnection(app_host,3002)
respvalues["db_3306_open"] = checktcpconnection(db_host,3306)
respvalues["web_80_open"] = checktcpconnection(web_host,80)
respvalues["web_3000_open"] = checktcpconnection(web_host,3000)
db_ping = pingcheck(db_host)
respvalues["db_ping"] = db_ping if db_ping else False
app_ping = pingcheck(app_host)
respvalues["app_ping"] = app_ping if app_ping else False
web_ping = pingcheck(web_host)
respvalues["web_ping"] = web_ping if web_ping else False
response = flask.make_response(
flask.jsonify(
{"status": respvalues}
),200
)
response.headers["Content-Type"] = "application/json"
return response
app.run(host='0.0.0.0', port=port)
``` |
{
"source": "jondwoo/sermon-update",
"score": 3
} |
#### File: jondwoo/sermon-update/page_generator.py
```python
import os
import database
import config
import datetime
from pprint import pprint
pageTemplatePath = "/templates/page-template.html"
sermonTemplatePath = "/templates/sermon-template.html"
sermonPagePath = "/sermons.html"
def get_file(filename):
if os.path.isfile(filename):
with open(filename) as f:
return f.read()
def update_page(content):
filename = os.getcwd() + sermonPagePath
if os.path.isfile(filename):
with open(filename, "w") as writer:
for html in content:
writer.write(html)
print("done")
def is_generated(content):
if isinstance(content, str):
return True
else:
return False
def generate_page():
sermonBody = []
count = 0
pageTemplate = get_file(os.getcwd() + pageTemplatePath)
sermonTemplate = get_file(os.getcwd() + sermonTemplatePath)
# retrieve sermons from database
print(f"Retrieving last {config.limit_val} complete sermons...")
recent_sermons = database.get_sermon_list(config.limit_val)
sermonBody.append('<div class="row">')
# replace HTML {{FIELD}} for every sermon
for sermon in recent_sermons["data"]:
formatted_date = (
datetime.datetime.strptime(sermon["date"], "%Y-%m-%d")
.date()
.strftime("%B %d, %Y")
)
try:
# at every config.col value, make a new row
if count != config.col:
sermonBody.append(
sermonTemplate.replace("{{YOUTUBEID}}", sermon["youtube_id"])
.replace("{{DATE}}", formatted_date)
.replace("{{TITLE}}", sermon["sermon_title"])
.replace("{{SPEAKER}}", sermon["speaker"])
.replace("{{SCRIPTURE}}", sermon["scripture"])
)
count += 1 # keep track of each card
else:
sermonBody.append("</div>")
sermonBody.append('<div class="row">')
sermonBody.append(
sermonTemplate.replace("{{YOUTUBEID}}", sermon["youtube_id"])
.replace("{{DATE}}", formatted_date)
.replace("{{TITLE}}", sermon["sermon_title"])
.replace("{{SPEAKER}}", sermon["speaker"])
.replace("{{SCRIPTURE}}", sermon["scripture"])
)
count = 1 # reset row
except TypeError:
print(f"Sermon for {sermon['sermon_title']} has type NULL in a field")
# combine entire HTML element to one HTML element at index 0
mergedSermonBody = ["".join(sermonBody[0:])]
# generate and return the final page
return pageTemplate.replace("{{SERMONS}}", mergedSermonBody[0])
```
#### File: jondwoo/sermon-update/PCO.py
```python
import requests
import tokens
import json
def get_plan_list():
url = "https://api.planningcenteronline.com/services/v2/service_types/764160/plans?offset=105/"
return _call_pco(url)
def get_plan_details(id):
url = f"https://api.planningcenteronline.com/services/v2/service_types/764160/plans/{id}/"
return _call_pco(url)
def get_plan_items(id):
url = f"https://api.planningcenteronline.com/services/v2/service_types/764160/plans/{id}/items"
return _call_pco(url)
def get_plan_team_members(id):
url = f"https://api.planningcenteronline.com/services/v2/service_types/764160/plans/{id}/team_members"
return _call_pco(url)
def _call_pco(url):
r = requests.get(url, auth=(tokens.APP_ID, tokens.SECRET))
if r.status_code == 200:
return json.loads(r.text)
return None
``` |
{
"source": "jondye/blogger-email-sender",
"score": 3
} |
#### File: jondye/blogger-email-sender/gmail.py
```python
import base64
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Gmail(object):
def __init__(self, gmail_service):
self.service = gmail_service
def send(self, mail_tos, subject, text, html):
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['To'] = ', '.join(mail_tos)
part1 = MIMEText(text, 'plain')
msg.attach(part1)
part2 = MIMEText(html, 'html', 'utf-8')
msg.attach(part2)
body = {'raw': base64.urlsafe_b64encode(bytes(msg)).decode()}
self.service.users().messages().send(userId='me', body=body).execute()
``` |
{
"source": "jondye/buildstatus",
"score": 3
} |
#### File: buildstatus/buildstatus/buildstatus.py
```python
import logging
import os
import time
from urllib.parse import urlparse
from requests.exceptions import ConnectionError
from jenkins import Jenkins
from gpiozero import StatusBoard
def main():
debug = os.environ.get('DEBUG')
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
delay = int(os.environ.get('POLL_PERIOD', '60'))
server = os.environ['JENKINS_URI']
status = StatusBoard(pwm=True)
job_names = [os.environ.get('JENKINS_JOB_%d' % (i+1)) for i in range(5)]
job_color = [None for _ in job_names]
username = os.environ.get('JENKINS_USERNAME', None)
password = os.environ.get('JENKINS_PASSWORD', None)
while True:
try:
poll_server(
Jenkins(server, username=username, password=password),
status,
job_names,
job_color)
time.sleep(delay)
except ConnectionError:
logging.exception("Unable to connect")
display_warning(delay, status)
job_color = [None for _ in job_names]
def poll_server(j, status, job_names, job_color):
for i, name in enumerate(job_names):
lights = status[i].lights
if not name:
lights.off()
continue
color = get_job_color(j, name)
logging.info(
"Job %s was %s and is now %s",
name, job_color[i], color)
if color == job_color[i]:
continue
set_status(color, lights)
job_color[i] = color
def get_job_color(j, name):
try:
job = j.job(name)
color = job.info['color']
logging.debug("Job %s is %s", name, color)
return color
except ConnectionError:
logging.error("Can\'t retrieve info on job %s", name)
return None
def set_status(color, lights):
if not color:
lights.off()
elif color.startswith('blue'):
lights.red.off()
if color.endswith('_anime'):
lights.green.pulse()
else:
lights.green.on()
elif color.startswith('red'):
lights.green.off()
if color.endswith('_anime'):
lights.red.pulse()
else:
lights.red.on()
else:
lights.on()
def display_warning(delay, status):
start = time.time()
while time.time() - start <= delay:
for s in status:
status.off()
s.lights.red.on()
time.sleep(1)
if __name__ == '__main__':
main()
``` |
{
"source": "jondye/rfid-squeezebox-py",
"score": 3
} |
#### File: jondye/rfid-squeezebox-py/db.py
```python
import ujson
def load(key):
try:
with open("%s.json" % key, 'r') as f:
return ujson.load(f)
except OSError:
return None
def save(key, data):
with open("%s.json" % key, 'w') as f:
f.write(ujson.dumps(data))
``` |
{
"source": "jondy/mkepub",
"score": 3
} |
#### File: mkepub/readers/__init__.py
```python
import os
from importlib import import_module
COVER_SUFFIX = '-封面.jpg'
_readers = []
def find_reader(filename):
ext = os.path.splitext(filename)[1].lower()
for r in _readers:
if r.is_support(ext):
return r
def search_readers(path=None):
if path is None:
path = os.path.dirname(__file__)
for filename in os.listdir(path):
if filename.startswith('reader_'):
m = import_module('readers.%s' % os.path.splitext(filename)[0])
if hasattr(m, 'register_reader'):
_readers.append(m.register_reader())
search_readers()
```
#### File: mkepub/readers/reader_text.py
```python
import chardet
import os
import re
from ebooklib import epub
from . import COVER_SUFFIX
PAT_TITLE = r'^(#+)\s*(.*)\s*$'
PAT_EMPTY = r'<p>\s*</p>'
PAT_COMMENT = '!#'
PAT_ALIGN_RIGHT = '#:'
PAT_INLINE_IMAGE = r'!\[(.+)\]\((.+)\)'
TEMPLATE_PARA = '<p>{0}</p>'
TEMPLATE_INLINE_IMAGE = '<div class="picture"><img src="../Images/{0}" alt="{1}"/><p>{1}</p></div>'
TEMPLATE_RIGHT_PARA = '<p class="text-right">{0}</p>'
MAX_META_ROW = 100
PAT_METADATAS = {
'title': ('书名:', '书名:'),
'author': ('作者:', '作者:'),
'publisher': ('出版者:', '出版者:'),
'date': ('出版时间:', '出版时间:'),
'ISBN': ('ISBN:', 'ISBN:'),
}
def build_toc(sections):
if not sections:
return None
def make_node(t):
return epub.Section(t) if isinstance(t, str) else t, []
level, page = sections[0]
node = make_node(page)
stack = [node]
toc = [node]
def reform_node():
if not stack[0][1]:
parent = stack[1][1] if len(stack) > 1 else toc
temp = parent.pop()
parent.append(temp[0])
ref = level
for level, page in sections[1:]:
node = make_node(page)
n = level - ref
if n >= len(stack):
if not isinstance(page, str):
for parent in stack:
if not isinstance(parent[0], epub.Section):
break
if parent[0].href == '':
parent[0].href = page.get_name()
stack[0][1].append(node)
stack.insert(0, node)
else:
reform_node()
if n == 0:
stack = [node]
toc.append(node)
else:
stack[:n] = []
stack[0][1].append(node)
reform_node()
return toc
class TextReader:
def __init__(self):
self._filename = None
self._toc = None
self._encoding = None
self._pat_titles = [re.compile(PAT_TITLE)]
self._pat_empty = re.compile(PAT_EMPTY)
self._pat_inline_image = re.compile(PAT_INLINE_IMAGE)
def is_support(self, ext):
return ext in ('.txt',)
def _iter_lines(self):
if self._filename is None:
return
with open(self._filename, encoding=self._encoding,
errors='ignore') as f:
yield from f
def get_cover(self):
cover = os.path.join(self._filename[:-4] + COVER_SUFFIX)
return cover if os.path.exists(cover) else None
def open(self, filename):
self._filename = filename
self._images = []
with open(self._filename, 'rb') as f:
buf = f.read()
charinfo = chardet.detect(buf)
self._encoding = charinfo['encoding']
def close(self):
self._filename = None
self._toc = None
def get_metadata(self):
row = 0
meta = {}
for line in self._iter_lines():
for k, pats in PAT_METADATAS.items():
if meta.get(k):
continue
for s in pats:
if line.startswith(s):
meta[k] = line[len(s):].strip()
break
row += 1
if row > MAX_META_ROW:
break
return meta
def get_toc(self):
return build_toc(self._toc)
def images(self):
yield from self._images
def stylesheets(self):
return []
def contents(self):
self._pindex = 0
self._row = 0
self._toc = []
paras = None
titles = []
def write_page():
self._pindex += 1
file_name = 'Text/chapter%02d.xhtml' % self._pindex
level, title = self._toc.pop()
page = epub.EpubHtml(title=title, file_name=file_name)
page.set_content('<div>%s</div>' % ''.join(paras))
self._toc.append((level, page))
return page
def not_empty(paras):
if paras:
for p in paras:
if not self._pat_empty.match(p):
return True
for line in self._iter_lines():
self._row += 1
if line.startswith(PAT_COMMENT):
continue
header = self._is_title(line)
if header:
if not_empty(paras):
yield write_page()
paras = None
self._toc.append(header)
elif self._toc:
if paras is None:
paras = []
titles = []
level, title = self._toc[-1]
for n, t in reversed(self._toc):
if n > level or (not isinstance(t, str)):
break
paras.insert(0, '<h{0}>{1}</h{0}>'.format(n, t))
titles.append(t)
level = n
if line.strip() and line.strip() not in titles:
if line.startswith(PAT_ALIGN_RIGHT):
n = len(PAT_ALIGN_RIGHT)
paras.append(TEMPLATE_RIGHT_PARA.format(line[n:]))
else:
m = self._pat_inline_image.match(line.strip())
if m:
title, url = m.group(1, 2)
fname = os.path.join(os.path.dirname(self._filename), url)
media_type = 'images/' + url.rsplit('.')[-1]
with open(fname, 'rb') as f:
img = epub.EpubItem(
file_name='Images/' + url,
media_type=media_type,
content=f.read())
self._images.append(img)
paras.append(TEMPLATE_INLINE_IMAGE.format(url, title))
else:
paras.append(TEMPLATE_PARA.format(line))
if not_empty(paras):
yield write_page()
def _is_title(self, line):
if line.startswith(PAT_ALIGN_RIGHT):
return
for pat in self._pat_titles:
m = pat.match(line)
if m:
return len(m.group(1)), m.group(2)
def register_reader():
return TextReader()
if __name__ == '__main__':
r = TextReader()
print(r._is_title('##Title2\n'))
# filename = '__test__.txt'
# with open(filename, 'w') as f:
# f.write('##Title\nPara1\n###Title3\nPara2')
# r.open('__test__.txt')
# print(list(r.contents()))
# r.close()
# os.remove(filename)
r.open('test2.txt')
list(r._iter_lines())
``` |
{
"source": "Jone1/chessPyQt",
"score": 3
} |
#### File: chessPyQt/pieces/pawn.py
```python
from pieces.piece import AbstractPiece
__author__ = 'Jone'
class Pawn(AbstractPiece):
src_white = "D:/workspace/chessQt/chessQt/gfx/pw.png"
src_black = "D:/workspace/chessQt/chessQt/gfx/pb.png"
def __init__(self, x, y, color):
super(Pawn, self).__init__(x, y, color)
```
#### File: chessPyQt/pieces/queen.py
```python
from pieces.piece import AbstractPiece
__author__ = 'Jone'
class Queen(AbstractPiece):
src_white = "D:/workspace/chessQt/chessQt/gfx/qw.png"
src_black = "D:/workspace/chessQt/chessQt/gfx/qb.png"
def __init__(self, x, y, color):
super(Queen, self).__init__(x, y, color)
def moveValidator(self, x, y):
return self.isEmptyTo(x, y)
``` |
{
"source": "JoneBakkevig/jkbmm",
"score": 2
} |
#### File: JoneBakkevig/jkbmm/generated_test.py
```python
import h5py
import wtte.weibull as weibull
import wtte.wtte as wtte
from wtte.wtte import WeightWatcher
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
from scipy.stats import binom
from datetime import datetime, timedelta
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM,GRU
from keras.layers import Lambda
from keras.optimizers import RMSprop,adam
from keras.callbacks import History
from keras.callbacks import ModelCheckpoint
# partial parsing function for getting y values
def fix_end_seq(y):
n = 0
for x in range(len(y)):
if y[x][0] == 0:
n = x + 1
break
n = n or len(y) + 1
# a = zip(range(1, len(y[:n])), [0.] * n)
a = [[i, 0.] for i in range(1, n)]
b = y[(n-1):]
return list(a) + b
# parsing funciton for getting y values
def get_y(x, sample_len):
#assert len(x) % sample_len == 0
x_rev = list(x)[:]
x_rev.reverse()
y = []
event_seen = False
tte = 0
for i in x_rev:
if i == 1:
tte += 1
y.append([tte, event_seen * 1.])
tte = -1
event_seen = True
else:
tte += 1
y.append((tte, event_seen * 1.))
n = len(x) - sample_len
sample = y[-sample_len:][:]
if sample[0][0] is not 0:
sample = fix_end_seq(sample)
sample.reverse()
y_samples = [sample]
for i in range(1, n+1):
sample = y[-(i + sample_len):-i][:]
if sample[0][0] is not 0:
sample = fix_end_seq(sample)
sample.reverse()
y_samples.append(sample)
return y_samples
# modifying generated test set
df_gen = pd.read_csv('../../data/generated_data.csv',header=0,skiprows=1,engine='python')
df_gen.set_index('date', inplace=True)
df_gen.index = pd.DatetimeIndex(df_gen.index)
df_gen = df_gen.reindex(index = pd.date_range('07-01-2010','02-11-2105'), fill_value=0)
df_gen = df_gen.amount.apply(lambda x: 1. if x>0 else 0.)
# len 34559 80% train = 27647, 20% test = 6912
test_gen = df_gen[0:6912]
test_gen = test_gen.reset_index()
test_gen = [value[1] for value in test_gen.values]
train_gen = df_gen.values
x_gen = train_gen[:27647]
x_gen = list(x_gen)
sample_len = 42
n = len(x_gen) - sample_len
xg_train = []
for i in range(0, n + 1):
sample = x_gen[i:(i + sample_len)]
xg_train.append([[k] for k in sample])
xg_train = np.array(xg_train)
yg_train = np.array(get_y(x_gen, sample_len))
xg_t, yg_t = test_gen[:3456], test_gen[:3456]
xg_t = list(xg_t)
n = len(xg_t) - sample_len
xg_test = []
for i in range(0, n + 1):
sample = xg_t[i:(i + sample_len)]
xg_test.append([[k] for k in sample])
xg_test = np.array(xg_test)
yg_test = np.array(get_y(xg_t,sample_len))
print('x:',xg_train.shape, xg_test.shape)
print('y:',yg_train.shape, yg_test.shape)
tte_mean_train = np.nanmean(yg_train[:,:,0])
init_alpha = -1.0/np.log(1.0-1.0/(tte_mean_train+1.0) )
mean_u = np.nanmean(yg_train[:,:,1])
init_alpha = init_alpha/mean_u
print('init_alpha: ',init_alpha,'mean uncensored: ',mean_u)
history = History()
weightwatcher = WeightWatcher()
n_features = 1
# Start building the model
model = Sequential()
model.add(GRU(1,input_shape=(xg_train.shape[1:]),activation='tanh',return_sequences=True))
model.add(Dense(2))
model.add(Lambda(wtte.output_lambda,
arguments={"init_alpha":init_alpha,
"max_beta_value":4.0}))
loss = wtte.loss(kind='discrete').loss_function
#model.load_weights('load_weight.hdf5')
model.compile(loss=loss, optimizer=adam(lr=.01))
# checkpoints
filepath = 'gen_cp/{epoch:02d}-{val_loss:.2f}.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
callbacks_list = [checkpoint]
model.summary()
model.fit(xg_train,yg_train,
epochs=60,
batch_size=xg_train.shape[0]//10,
verbose=1,
validation_data=(xg_test, yg_test),
# sample_weight = sample_weights # If varying length
callbacks=[checkpoint, history, weightwatcher])
# predict
predicted = model.predict(xg_test, batch_size=xg_train.shape[0]//10, verbose = 1)
# granular view
a_predicted = predicted[-42:]
# plot alpha and beta values
plt.imshow(a_predicted[:,:,0],interpolation="none",cmap='binary',aspect='auto')
plt.title('alpha')
plt.colorbar(orientation="horizontal")
plt.show()
plt.imshow(a_predicted[:,:,1],interpolation="none",cmap='binary',aspect='auto')
plt.title('beta')
plt.colorbar(orientation="horizontal")
plt.show()
drawstyle = 'steps-post'
print('numpy array of predictions:',predicted)
n_timesteps = 42
alpha = predicted[:,:,0]
beta = predicted[:,:,1]
print('alpha:',alpha[0])
print('beta:',beta[0])
a = alpha[0,-1]
b = beta[0,-1]
prob_on = []
prob_within = [0]
timesteps = []
for i in range(n_timesteps):
timesteps.append(i)
prob_on.append(weibull.pmf(i,a,b))
prob_within.append(weibull.cmf(i,a,b))
print('List of probabilties of occurring on index+1 day:',prob_on)
print('List of probabilities of occurring within index+1 days:',prob_within)
# plotting probability of event occurring within 4 steps; for visuals
timesteps = timesteps[0:4]
prob_within = prob_within[0:4]
plt.plot(timesteps, prob_within, color='grey')
plt.xticks(timesteps)
plt.xlabel('timesteps')
plt.ylabel('probability on occurring by timestep')
plt.show()
```
#### File: jkbmm/HTM/main.py
```python
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.prediction_metrics_manager import MetricsManager
import nupic_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
"NOTE: You must run ./swarm.py before this, because model parameters\n"
"are required to run NuPIC.\n"
)
FILE_NAME = "csv_file"
DATA_DIR = "data/"
MODEL_PARAMS_DIR = "./model_0"
DATE_FORMAT = "%Y-%m-%d"
_METRIC_SPECS = (
MetricSpec(field='tte', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'rmse', 'window': 1000, 'steps': 1}),
MetricSpec(field='tte', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'rmse', 'window': 1000, 'steps': 1}),
MetricSpec(field='tte', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'rmse', 'window': 1000, 'steps': 1}),
MetricSpec(field='tte', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'rmse', 'window': 1000, 'steps': 1}),
)
def create_model(model_params):
model = ModelFactory.create(model_params)
model.enableInference({"predictedField": "tte"})
return model
def get_model_params_from_name(file_name):
import_name = "model_0.model_params"
print "Importing model params from %s" % import_name
try:
imported_model_params = importlib.import_module(import_name).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% file_name)
return imported_model_params
def run_io_through_nupic(input_data, model, file_name, plot, print_results):
input_file = open(input_data, "rb")
csv_reader = csv.reader(input_file)
# skip header rows
csv_reader.next()
csv_reader.next()
csv_reader.next()
shifter = InferenceShifter()
if plot:
output = nupic_output.NuPICPlotOutput([file_name])
else:
output = nupic_output.NuPICFileOutput([file_name])
metrics_manager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
counter = 0
timestamp = None
consumption = None
result = None
for row in csv_reader:
counter += 1
timestamp = datetime.datetime.strptime(row[1], DATE_FORMAT)
consumption = int(row[2])
amount = float(row[0])
result = model.run({
"amount": amount,
"date": timestamp,
"tte": consumption
})
result.metrics = metrics_manager.update(result)
if counter % 100 == 0 or counter % 384 == 0:
print "Read %i lines..." % counter
print ("After %i records, rmse=%f" % (counter,
result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='rmse':steps=1:window=1000:"
"field=tte"]))
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
output.write([timestamp], [consumption], [prediction])
if print_results:
print("date:", timestamp.strftime("%y-%m-%d"), "actual:", consumption, "predicted:", prediction)
if plot and counter % 20 == 0:
output.refresh_gui()
#if plot and counter % 1000 == 0:
# break
input_file.close()
output.close()
def run_model(file_name, plot=False, print_results=False):
print "Creating model from %s..." % file_name
model = create_model(get_model_params_from_name(file_name))
input_data = "%s/%s.csv" % (DATA_DIR, file_name.replace(" ", "_"))
run_io_through_nupic(input_data, model, file_name, plot, print_results)
if __name__ == "__main__":
print DESCRIPTION
plot = False
print_results = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
if "--print" in args:
print_results = True
run_model(FILE_NAME, plot=plot, print_results=print_results)
``` |
{
"source": "jonEbird/artofmemory",
"score": 3
} |
#### File: artofmemory/tests/test_artofmemory.py
```python
import unittest
from artofmemory.major import NaiveMajorSystem
class TestArtOfMemory(unittest.TestCase):
"""
Test all the things in artofmemory
"""
def test_ts(self):
ret = NaiveMajorSystem().word_to_major("test")
self.assertEqual(ret, "101")
def test_word_with_ph(self):
ret = NaiveMajorSystem().word_to_major("phone")
self.assertEqual(ret, "92")
def test_word_with_ch(self):
ret = NaiveMajorSystem().word_to_major("chad")
self.assertEqual(ret, "71")
def test_word_with_ck(self):
ret = NaiveMajorSystem().word_to_major("jack")
self.assertEqual(ret, "677")
def test_word_with_sh(self):
ret = NaiveMajorSystem().word_to_major("shut")
self.assertEqual(ret, "01")
def test_word_with_th(self):
ret = NaiveMajorSystem().word_to_major("the")
self.assertEqual(ret, "1")
def test_word_with_double_letter(self):
ret = NaiveMajorSystem().word_to_major("basketball")
self.assertEqual(ret, "9071955")
def test_regex_builder_empty(self):
ms = NaiveMajorSystem()
ms.MAPPING = {}
ret = ms._regex_from_letter_mapping()
self.assertEqual(ret, "()")
def test_regex_builder_full(self):
ret = NaiveMajorSystem()._regex_from_letter_mapping()
self.assertEqual(ret, "(s|z|t|d|n|m|r|l|j|g|c|k|q|v|f|p|b)")
``` |
{
"source": "Jon-Eckberg/ffopy",
"score": 3
} |
#### File: Jon-Eckberg/ffopy/mirror.py
```python
import numpy as np
def mirror(diameter,
rmswfe,
N = 256,
pindex = 2.5,
pcutoff = 4.,
psd = None,
circular = False,
period = period,
aperture = aperture,
hole = 0.0,
renormalize = False,
oversize = 1.0,
keep_defocus = False):
#Default keyword values
width = diameter.min() * oversize
dx = np.divide(diameter,
N - 1)
#CCK 2015-Feb-20# dx keyword finally implemented!
k = np.arange(N) #wavenumber scale, arbitrary units
period = np.divide(2.0 * width,
k) #period in same units as diameter
#NOTE:
#In keeping with psd2wfe, an N-element PSD leads to an NxN WFE.
#The following table may help to clarify the meaning of the
#frequency elements:
#
# k interpretation
# ----------------------------------
# 0 DC.
# 1 1/2 period per map width
# 2 1 period per map width
# ...
# N-1 Nyquist frequency
#Implement power law PSD
psd = np.power(k, -pindex)
psd[0] = 0
#first k with nonzero power is 1 per diameter
#Implement cutoff frequency
ss = np.where(period < pcutoff)
if (ss[0] != -1):
psd[ss] = 0
#Calculate and return WFE
wfe = psd2wfe(psd,
rms = rmswfe,
circular = False,
oversize = False,
keep_defocus = False)
#Drill hole if desired
if hole > 0.0:
radius = np.divide(N * hole, 2.0 * width).round() #calculate radius in pixels
axis = np.linspace(-N/2, N/2, N)
########## fix dist
radii = np.sqrt(axis**2 + axis[:, np.newaxis]**2)
ss = np.where(radii < radius) #find where the hole lives
wfe[ss] = 1e9j #drill the hole
#Make elliptical aperture if desired
if diameter.ndim == 2:
xy = np.divide(width * (np.arange(N) - N//2), N) #x or y linear coordinate
x = xy # replicate(1.0,N) #x coordinate array
y = np.ones(N) # xy #y coordinate array
ss = np.where(4.0 * np.square([x / diameter[0], y / diameter[1]]).sum() > 1.0 )
wfe[ss] = 1e9j
aperture = np.where(wfe.imag < 1.0 )
if renormalize:
wfe[aperture] *= np.divide(rmswfe, wfe[aperture].std())
return wfe
```
#### File: Jon-Eckberg/ffopy/powerspec2d.py
```python
import numpy as np
def powerspec2d(data,
Nfreq = 16,
retain_dc = False,
dx = dx,
dy = dy,
kx = kx,
ky = ky,
big_kx = big_kx,
big_ky = big_ky,
radians = radians):
Nx, Ny = data.shape()
Nfreq = round(Nfreq[0]) > 4 #Don't allow non-integer or smaller than 4.
NsegX = np.ceil(np.divide(2.0 * Nx, Nfreq)) - 1.
NsegY = np.ceil(np.divide(2.0 * Ny, Nfreq)) - 1.
#How many segments of length Nfreq fit in the interval, if we overlap them like
#2 courses of bricks?
if (NsegX < 2 | NsegY < 2):
print('Data array too short to compute power spectrum using specified segment size.')
xsep = np.divide(Nx - Nfreq, NsegX - 1) * (1 - 1e-6)
ysep = np.divide(Ny - Nfreq, NsegY - 1) * (1 - 1e-6)
#The last factor is to ensure we can fit Nseg segments within the data array.
spectra = np.full([NsegX, NsegY], Nfreq)
for i in range(NsegX):
x0 = np.round(i * xsep) #lower x index of the segment
x1 = x0 + Nfreq - 1 #upper x index of the segment
for j in range(NsegY):
y0 = np.round(j * ysep) #lower y index of the segment
y1 = y0 + Nfreq - 1 #upper y index of the segment
segment = data[x0:x1, y0:y1]
spectra[i, j] = periodogram_fft(segment,
retain_dc = retain_dc)
power = np.nanmean(np.nanmean(spectra, axis = 1), axis = 1)
#pick off leading dimension twice!
k_magnitude = k_arr2d(Nfreq,
Nfreq,
dx =dx,
dy =dy,
radians =radians,
kx =kx,
ky =ky,
big_kx =big_kx,
big_ky =big_ky)
return power #, k_magnitude
```
#### File: Jon-Eckberg/ffopy/psf2mtf.py
```python
from scipy.ndimage import rotate
import numpy as np
def psf2mtf(psf,
theta = 0,
lsf = lsf,
dx = dx,
karr = karr):
Nx = psf.shape[0]
psf_rot = rotate(psf, theta)
lsf = psf_rot.sum(axis = 2) #linespread function
mtf = np.abs(np.fft.fft(lsf))[:Nx/2] #leave out the redundant negative frequencies
karr = (k_arr(Nx,
dx = dx))[:Nx/2]
return np.divide(mtf, mtf[0]) #properly normalized, unity DC response one hopes!
```
#### File: Jon-Eckberg/ffopy/spiders.py
```python
import numpy as np
from scipy.ndimage import rotate
def spiders(N,
widths,
angles):
Nspiders = widths.size
if angles.size != Nspiders:
print('Number of widths =/= number of angles.')
#Initialize the mask.
mask = np.zeros([N, N], dtype = np.complex)
#Create each spider and add it into the mask.
for i in range(Nspiders):
spider = np.zeros([N, N], dtype = np.complex)
baseline = np.round(np.divide(N-widths[i] - 0.0,
2))
spider[N/2:N, baseline:round(baseline+widths[i])] = 1e6j
mask += rotate(spider, -angles[i])
return mask
```
#### File: Jon-Eckberg/ffopy/wfe2slope.py
```python
def wfe2slope(wfe,
integration_length,
filter_length,
dx = 1.0,
lam = 1.0,
surf=surf):
Nx, Ny = wfe.shape()
#CREATE DIFFERENTIATION OPERATORS (CONVOLUTION KERNELS)
Npix = 1 + 2 * np.floor(0.5 * (1.0 + integration_length / dx))
#Size of the convolution kernel we'll use for slopes.
#This is the nearest odd integer to 1+integration_length/dx.
integration_length_used = (Npix - 1) * dx
#The integration length we are really using by forming
#the convolution kernel with Npix pixels.
d_dx = np.ones(Npix) #kernel for differentiation with respect to x.
d_dx[0] = -1.0
#d_dx[Npix - 1] = 1.0
d_dx *= np.divide(lam, integration_length_used)
d_dy = np.ones((1, Npix)) #kernel for differentiation wrt y.
d_dy[0] = -1.0
#CREATE PROPERLY PADDED, MASKED AND FILTERED WFE ARRAY
Npad = Npix/2 + 1 #Leave room for the kernels to hang off the edge.
wfe_big_real = np.full(Nx+Npad, Ny+Npad, np.nan)
#Real part. Pad with NaN
wfe_big_real[0:Nx-1, 0:Ny-1] = wfe.real
#Note that wfe_big has to be hit with convol(/edge_wrap) to take
#advantage of padding on left & lower edges by wrapping.
wfe_big_imaginary = np.full((Nx+Npad, Ny+Npad), 1e10)
#This puts the padding outside the aperture, E field --> 0.
wfe_big_imaginary[0:Nx-1, 0:Ny-1] = wfe.imag
ss = np.where(wfe_big_imaginary > 1.0)
#Out-of-aperture indices, a somewhat arbitrary definition.
#Where the imaginary part equals 0, the intensity is E^2 = 1.
#Where the imaginary part equals 1, the intensity is E^2 = 3.5e-6.
wfe_big_real[ss] = np.nan #Put NaN in out-of_aperture locations
#FILTER THE REAL PART
Nfilter = np.divide(filter_length, dx).round()
wfe_big_real = smooth(wfe_big_real, Nfilter, /edge_wrap, /NaN)
#This will result in some cross-talk between horizontal and vertical gradients
#around the edge of a non-square mirror. However, this is the sort of filtering
#that appears to be ISO standard.
wfe_big_real[Nx:*,*] = np.nan #Restore the NaN padding that was smeared over by smooth.
wfe_big_real[*,Ny:*] = np.nan #Restore the NaN padding that was smeared over by smooth.
#COMPUTE GRADIENT
slope_x = convol(wfe_big_real, d_dx, /edge_wrap)
slope_y = convol(wfe_big_real, d_dy, /edge_wrap)
if surf:
slope_x /= 2.0
slope_y /= 2.0
#COMPUTE RMS values
#?? It would be even better to use imaginary part of wfe to weight the means ??
slope_x_rms = np.sqrt(np.nanmean(np.square(slope_x)))
slope_y_rms = np.sqrt(np.nanmean(np.square(slope_y)))
slope_rms = np.sqrt(np.square([slope_x_rms, slope_y_rms]).sum())
#I am assuming that there are similar numbers of
#finite results in slope_x and slope_y, even though the
#domains may not perfectly overlap!
return slope_rms
``` |
{
"source": "jon-edward/py-autoclicker",
"score": 3
} |
#### File: jon-edward/py-autoclicker/clicker_util.py
```python
from pynput.keyboard import Key, KeyCode
from pynput.mouse import Button
from pynput.mouse import Controller as MouseController
from pynput.keyboard import Listener as KeyboardListener
from pynput.keyboard import Controller as KeyboardController
from pynput.mouse import Listener as MouseListener
from threading import Thread, Event
import time
import json
from typing import List
import numpy as np
import random
import os
def from_file(file_path: str):
if os.path.exists(file_path):
return Config(**json.load(open(file_path, 'r')))
else:
return Config()
class Config:
def __init__(self, **kwargs):
"""Defines default behavior for AutoClickerThread.
:keyword wait_time: Base time to wait between clicks in seconds. (float)
:keyword deviation_time: Amount of time to randomly deviate between clicks in seconds. (float)
:keyword distribution_type: Kind of distribution to use for randomized deviation time. `0` for uniform, `1` for normal. (int)
:keyword toggle: Toggle clicking based on input. (bool)
:keyword input_mode: Use keyboard or mouse for input. `0` for keyboard, `1` for mouse. (int)
:keyword alt_modifier: Use an alt modifier for keyboard input. (bool)
:keyword key_combination: Characters to use for keyboard hotkey. (List[char])
:keyword special_mouse_press: Use special mouse key for input. (int)
:keyword output_type: Specifies whether mouse or keyboard for output.
:keyword output_sequence: Specifies what sequence to repeat on keyboard for input.
:keyword mouse_output: Specifies which button to push on mouse for input.
:keyword hold_time: Defines hold time for keyboard keys.
"""
self.wait_time: float = kwargs.get("wait_time", 0.)
self.deviation_time: float = kwargs.get("deviation_time", 0.0)
self.distribution_type: int = kwargs.get("distribution_type", 0)
self.toggle: bool = kwargs.get("toggle", False)
self.input_mode: int = kwargs.get("input_mode", 0)
self.alt_modifier: bool = kwargs.get("alt_modifier", False)
self.key_combination: List[chr] = kwargs.get("key_combination", [])
self.special_mouse_press: int = kwargs.get("special_mouse_press", 0)
self.output_type: int = kwargs.get("output_type", 0)
self.output_sequence: List[chr] = kwargs.get("output_sequence", [])
self.mouse_output: int = kwargs.get("mouse_output", 0)
self.hold_time: float = kwargs.get("hold_time", 0.0)
def to_file(self, file_path):
json.dump(self.__dict__, open(file_path, 'w'), indent=1)
class AutoClickerThread(Thread):
def __init__(self, parent_ui=None, config=Config()):
super(AutoClickerThread, self).__init__()
self.ui = parent_ui
self.config = config
self.thread = None
self._stop_event = Event()
self.mouse_button = None
self.current_keys = set()
self.accepted_keys = set()
self.reload_config()
self.activated = False
self.mouse_controller = MouseController()
self.keyboard_controller = KeyboardController()
self.last_state = False
self.sequence_index = 0
self.sequence_length = len(self.config.output_sequence)
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def run(self):
self.thread.start()
while not self.stopped():
if self.activated:
if self.config.output_type == 0:
# Mouse output
to_press = Button.right if self.config.mouse_output == 1 else Button.left
self.mouse_controller.press(to_press)
self.mouse_controller.release(to_press)
elif self.config.output_type == 1 and self.config.output_sequence:
# Keyboard output
current_key = self.config.output_sequence[self.sequence_index]
to_press = KeyCode.from_char(current_key)
self.keyboard_controller.press(to_press)
time.sleep(self.config.hold_time)
self.keyboard_controller.release(to_press)
self.sequence_index = (self.sequence_index + 1) % self.sequence_length
if self.config.distribution_type == 0:
time.sleep(self.config.wait_time + random.uniform(0, self.config.deviation_time))
else:
time.sleep(abs(np.random.normal(loc=self.config.wait_time, scale=self.config.deviation_time)))
self.last_state = self.activated
self.thread.stop()
def set_activated(self, value):
if self.ui:
self.ui.update_clicking(value)
self.activated = value
def on_press(self, key):
if key in self.accepted_keys:
self.current_keys.add(key)
if self.accepted_keys.issubset(self.current_keys):
if self.config.toggle:
self.set_activated(not self.activated)
else:
self.set_activated(True)
def on_click(self, x, y, button, pressed):
if button == self.mouse_button:
if self.config.toggle and pressed:
self.set_activated(not self.activated)
elif not self.config.toggle and pressed:
self.set_activated(True)
elif not self.config.toggle and not pressed:
self.set_activated(False)
def on_release(self, key):
try:
if key in self.accepted_keys:
if not self.config.toggle:
self.set_activated(False)
self.current_keys.remove(key)
except KeyError:
pass
def set_config(self, config):
self.config = config
self.reload_config()
def reload_config(self):
self.accepted_keys = set()
if self.config.alt_modifier:
self.accepted_keys.add(Key.alt_l)
for key in self.config.key_combination:
self.accepted_keys.add(KeyCode.from_char(key))
self.thread = {0: KeyboardListener(on_press=self.on_press, on_release=self.on_release),
1: MouseListener(on_click=self.on_click)}.get(self.config.input_mode)
self.mouse_button = {0: Button.x1,
1: Button.x2}.get(self.config.special_mouse_press)
self.sequence_length = len(self.config.output_sequence)
``` |
{
"source": "jon-edward/py_switch",
"score": 3
} |
#### File: py_switch/switch/__init__.py
```python
from __future__ import annotations
from typing import Type, Any
class _InvalidCase:
"""Sentinel object for defining when a case is not met."""
_INVALID_CASE = _InvalidCase()
class _UndefinedEval:
"""Sentinel object for defining when a switch statement has not been evaluated yet."""
_UNDEFINED_EVAL = _UndefinedEval()
_CASE_FLAG_NAME = "_is_case_method"
# A case is 'default' when its predicate is always true.
# Declaring a variable named 'default' is simply to increase
# readability for the user.
default = True
def resolve(s: Type[switch]) -> Any:
"""Decorator for auto-resolving switch statement after its declaration."""
return s.eval()
class switch:
"""
Is a switch-case implementation.
Use by inheriting from this class, decorating case methods with
`case(predicate)`, and optionally decorating subclass with `resolve`
to evaluate the switch-case statement immediately after declaration.
"""
__slots__ = []
_cached_eval = _UNDEFINED_EVAL
@staticmethod
def case(predicate):
"""A decorator that defines default behavior for case function definitions in a class."""
is_correct_case = bool(predicate)
def decorator(function):
def wrapper(*args, **kwargs):
if not is_correct_case:
return _INVALID_CASE
result = function(*args, **kwargs)
return result
wrapper.__setattr__(_CASE_FLAG_NAME, True)
return wrapper
return decorator
@classmethod
def eval(cls):
"""Resolves the switch statement, and returns the accepted case's returning value."""
if cls._cached_eval is not _UNDEFINED_EVAL:
return cls._cached_eval
case_methods = [
x
for x in cls.__dict__.values()
if callable(x) and x.__dict__.get(_CASE_FLAG_NAME, False)
]
for func in case_methods:
result = func(cls)
if result is not _INVALID_CASE:
cls._cached_eval = result
return result
raise ValueError("There is no case with a True predicate.")
def __new__(cls, *args, **kwargs):
raise TypeError(f"{cls} cannot be instantiated.")
``` |
{
"source": "jon-edward/wiki_dump",
"score": 3
} |
#### File: wiki_dump/wiki_data_dump/api_response.py
```python
from typing import Optional, Union, List
import copy
from dataclasses import dataclass
import re
@dataclass
class File:
"""
Holds file size, sha1 sum, and url for downloading.
Also contains md5, though this is unused.
"""
size: int
url: str
md5: str = None
sha1: str = None
@dataclass
class Job:
"""Holds job status, update time,
and file mapping from name to File."""
status: str
updated: str
files: Optional[dict] = None
def __post_init__(self):
if not self.files:
return
to_delete = set()
for name, file in self.files.items():
if file:
self.files[name] = File(**copy.deepcopy(file))
else:
to_delete.add(name)
for delete_member in to_delete:
del self.files[delete_member]
def get_file(self, key: Union[str, re.Pattern]) -> File:
"""Query file names by the first name that contains a match
for a regex Pattern or get the exact matching file name."""
if isinstance(key, str):
return self.files[key]
try:
name = next(_k for _k in self.files.keys() if key.search(_k))
return self.files[name]
except StopIteration:
# pylint: disable=W0707
raise KeyError(f"{key}")
# pylint: enable=W0707
def get_files(self, re_key: re.Pattern) -> List[File]:
"""Queries file names to find all files that contain a match for the supplied re_key."""
return [self.files[_k] for _k in self.files.keys() if re_key.search(_k)]
@dataclass
class Wiki:
"""Contains a mapping from job name to Job, and a version string."""
jobs: dict
version: str
def __post_init__(self):
for name, job in self.jobs.items():
if not isinstance(job, Job):
self.jobs[name] = Job(**copy.deepcopy(job))
``` |
{
"source": "jonejkim/httpfs",
"score": 2
} |
#### File: httpfs/client_typora/noref_remover.py
```python
import sys
from typing import *
from pathlib import PosixPath
class Bcolors:
HEADER = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# core_dir = PosixPath('.').parent.absolute() / 'core'
core_dir = PosixPath(__file__).parent.parent / 'core'
sys.path.append(str(core_dir))
from httpfs.common import FsConf, UPLOAD_SUBDIR_NAME, build_confs_from_json
FSCONFS = build_confs_from_json()
printlist = lambda lst: [print(entry) for entry in lst]
def printbig(string:str):
testNameLine = f'== {string} =='
divider = '='*len(testNameLine)
print('')
print(f'{divider}')
print(f'{testNameLine}')
print(f'{divider}')
print('')
if __name__ == '__main__':
for fsname, fsconf in FSCONFS.items():
printbig(f'fsroot name: {fsname} at {fsconf.fsroot}')
if fsconf.readonly:
print(f'(skipping {fsconf.fsname} at {fsconf.fsroot} as it is readonly (ie. not writable to {UPLOAD_SUBDIR_NAME} directory)')
continue
elif fsconf.fsname == 'default':
print(f'(skipping {fsconf.fsname} at {fsconf.fsroot} as it is reserved for default routing for image uploading for unspecified fsname.)')
continue
elif fsconf.fsname == 'tmp':
print(f'(skipping {fsconf.fsname} at {fsconf.fsroot} as it is reserved for Typora upload testing only.)')
continue
all_uploaded = fsconf.list_uploadDir_furls(recursive=False)
all_refereds = fsconf.list_md_refs()
print(f'##====[ listA: URLs in ${str(fsconf.uploadDir)}/* ]====##')
print(f'length: {len(all_uploaded)}', end='\n\n')
printlist(all_uploaded)
print('', end='\n\n')
print(f'##====[ listB: URL+URI referenced in {str(fsconf.fsroot)}/**/*.md ]====##')
print(f'length: {len(all_refereds)}', end='\n\n')
printlist(all_refereds)
print('', end='\n\n')
print('##====[ listD: set(listA) - set(listB) ]====##')
not_refereds = set(all_uploaded) - set(all_refereds)
print(f'length: {len(not_refereds)}', end='\n\n')
printlist(not_refereds)
print('', end='\n\n')
print('##====[ listP: listD mapped to equivalent posix paths ]====##')
posixpaths = [fsconf.url2path(noref) for noref in not_refereds]
print(f'length: {len(posixpaths)}', end='\n\n')
printlist(posixpaths)
print('', end='\n\n')
print('')
if len(posixpaths) == 0:
print('There are files with no reference to be relocated. Proceeding to next available httpfs.')
continue
def relocate(posixpaths:PosixPath, fsconf:FsConf, dryrun:bool):
renamedPath_pairs = []
for posixpath in posixpaths:
# secure new unique filename for conflicting ones instead of overwrite
desired_fname = posixpath.name
unique_fname:str = fsconf.secure_unique_fname(desired_fname , fsconf.norefDir)
renamedPath = fsconf.norefDir / unique_fname
if not dryrun:
renamedPath = posixpath.rename(renamedPath)
renamedPath_pairs.append([posixpath, renamedPath])
return renamedPath_pairs
print('[INPUT REQUIRED]')
print(f'For fs of fsname : \"{fsconf.fsname}\", located at \"{str(fsconf.fsroot)}\",')
print(f'files in {str(fsconf.uploadDir.name)}/* with no markdown files referencing (shown in listP above) will be relocated to {str(fsconf.norefDir.name)}/*')
userInp_dryrun = input(f'Dry run first? [y/n]: ')
print('')
if userInp_dryrun.lower() == 'y':
renamedPath_pairs = relocate(posixpaths, fsconf, dryrun=True)
print(f'##====[ (DRY RUN) listR: listP relocated to {fsconf.norefDir}/* ]====##')
print(f'length: {len(renamedPath_pairs)}', end='\n\n')
for originalPath, renamedPath in renamedPath_pairs:
print(f'{originalPath.parent.parent}/{Bcolors.BOLD}{Bcolors.YELLOW}{originalPath.parent.name}{Bcolors.ENDC}/{originalPath.name}')
print(f'-> {renamedPath.parent.parent}/{Bcolors.BOLD}{Bcolors.RED}{renamedPath.parent.name}{Bcolors.ENDC}/{renamedPath.name}')
print('', end='\n\n')
elif userInp_dryrun.lower() == 'n':
pass
else:
raise Exception('Bad input: must be one of \'y\' or \'n\'')
userInp_proceed = input(f'Proceed relocating? [y/n]: ')
if userInp_proceed.lower() == 'y':
renamedPath_pairs = relocate(posixpaths, fsconf, dryrun=False)
print(f'##====[ listR: listP relocated to {fsconf.norefDir}/* ]====##')
print(f'length: {len(renamedPath_pairs)}', end='\n\n')
for originalPath, renamedPath in renamedPath_pairs:
print(f'{originalPath.parent.parent}/{Bcolors.BOLD}{originalPath.parent.name}{Bcolors.ENDC}/{originalPath.name}')
print(f'-> {renamedPath.parent.parent}/{Bcolors.BOLD}{renamedPath.parent.name}{Bcolors.ENDC}/{renamedPath.name}')
print('', end='\n\n')
elif userInp_proceed.lower() == 'n':
pass
else:
raise Exception('Bad input: must be one of \'y\' or \'n\'')
print('\nEnd of the script.', end='\n\n')
```
#### File: httpfs/httpfs/common.py
```python
from typing import *
import json
from pathlib import PosixPath
from urllib.parse import urlparse
from html.parser import HTMLParser
import requests
import markdown
from .constants import *
# Central class for accessing httpfs configurations
# - each FsConf contains all properties and methods needed for each fs
class FsConf(object):
def __init__(self, fsconf:dict):
self.fsname = fsconf['fsname']
self.readonly = fsconf['readonly']
self.fsroot:PosixPath = fsconf['fsroot']
self.uploadDir:PosixPath = fsconf['uploadDir']
self.norefDir:PosixPath = fsconf['norefDir']
self.fs_url = SERVER_URL + '/' + self.fsname
def __list_xDir_fpaths(self, xDir:PosixPath, fname_only=False, asPathObj=False, recursive:bool=True) -> Union[List[str], List[PosixPath]]:
if xDir != self.uploadDir and xDir != self.norefDir:
raise Exception(f'bad argument xDir: {xDir} is not a writable subdirectory managed by this object\'s fsroot')
if recursive:
existing_posixpaths:List[PosixPath] = list(xDir.glob('**/*')) # '**/*' allows search into subdirectories manually made under '.imgs', not just tree depth 1
else:
existing_posixpaths:List[PosixPath] = list(filter(lambda item: not item.is_dir(), xDir.glob('*')))
# output format as function of arguments
if fname_only and asPathObj:
raise Exception('argument input: ( not(fullpath) && asPathObj ) is not supported.')
elif fname_only and not asPathObj:
return [posixpath.name for posixpath in existing_posixpaths]
elif not fname_only and asPathObj:
return existing_posixpaths
elif not fname_only and not asPathObj:
return [str(posixpath) for posixpath in existing_posixpaths]
def list_uploadDir_fpaths(self, fname_only=False, asPathObj=False, recursive:bool=True) -> Union[List[str], List[PosixPath]]:
return self.__list_xDir_fpaths(self.uploadDir, fname_only, asPathObj, recursive)
def list_norefDir_fpaths(self, fname_only=False, asPathObj=False, recursive:bool=True) -> Union[List[str], List[PosixPath]]:
return self.__list_xDir_fpaths(self.norefDir, fname_only, asPathObj, recursive)
def list_uploadDir_furls(self, recursive:bool=True) -> List[str]:
return [self.path2url(path) for path in self.list_uploadDir_fpaths(recursive=recursive)]
def list_norefDir_furls(self, recursive:bool=True) -> List[str]:
return [self.path2url(path) for path in self.list_norefDir_fpaths(recursive=recursive)]
def secure_unique_fname(self, desired_fname:str, xDir:PosixPath) -> str:
# compares proposed filename with existing,
# and ensures returning a non-duplicate filename
# (by adding suffixes if there's existing duplicate)
if xDir == self.uploadDir:
existing_fnames:str = self.list_uploadDir_fpaths(fname_only=True, asPathObj=False)
elif xDir == self.norefDir:
existing_fnames:str = self.list_norefDir_fpaths(fname_only=True, asPathObj=False)
else:
raise Exception(f'bad argument xDir: {xDir} is not a writable subdirectory managed by this object\'s fsroot')
initial_desired_fname = PosixPath(desired_fname)
idx = 0
while(True):
if desired_fname in existing_fnames:
desired_fname = initial_desired_fname.stem + f'_{idx}' + initial_desired_fname.suffix
idx += 1
else:
break
return desired_fname
def list_md_refs(self, with_mdpaths=False) -> Union[List[str], List[Tuple[PosixPath,str]]]:
# lists all references in md file except of the external web urls
mds = self.fsroot.glob('**/*.md')
mdpaths = []
refs = []
for md in mds:
with open(md, 'r') as f:
string = f.read()
html = markdown.markdown(string)
htmlparser = HTMLRefTagParser()
try:
htmlparser.feed(html)
except Exception as e:
print(f'Error: {e}, {md}')
refs.extend(htmlparser.parsedrefs)
mdpaths.extend([md]*len(htmlparser.parsedrefs))
if with_mdpaths:
return mdpaths, refs
else:
return refs
def path2url(self, path:Union[PosixPath, str]) -> str:
assert(self.path_valid(path)), f'{path} is not a valid path for {self.fsroot}'
assert(self.path_exists(path)), f'{path} is not a member of {self.fsroot}'
path = str(path).replace(str(self.fsroot), '')
url = self.fs_url + str(path)
assert(self.url_exists(url)), f'url {url} is not member of {self.fs_url}'
return url
def url2path(self, url:str) -> PosixPath:
assert(self.url_valid(url)), f'{url} is not a valid url for {self.fs_url}'
assert(self.url_exists(url)), f'{url} is not a member of {self.fs_url}'
subpath = url.replace(self.fs_url+'/', '')
posixpath = PosixPath(self.fsroot / subpath)
assert(self.path_exists(posixpath)), f'posixpath {posixpath} is not member of {self.fsroot}'
return posixpath
def path_valid(self, path:Union[PosixPath, str]) -> bool:
return (str(self.fsroot)+'/') in str(path)
def url_valid(self, url:str) -> bool:
return (self.fs_url+'/') in url
def path_exists(self, path:Union[PosixPath, str]) -> bool:
return self.path_valid(path) and PosixPath(path).exists()
def url_exists(self, url:str) -> bool:
# inquire directly to server
return self.url_valid(url) and requests.get(url).ok
# Markdown Image References Parsing (using html compiled from markdown)
class HTMLRefTagParser(HTMLParser):
def __init__(self):
super().__init__()
self.parsedrefs = []
def handle_starttag(self, tag, attrs):
if tag == "img":
img_uri = dict(attrs)["src"]
self.parsedrefs.append(img_uri)
elif tag == 'a':
href_uri = dict(attrs)["href"]
try:
# if fails attempt to convert to uri (ie. file://), it means it is likely of http:// or https:// format
# if attempt successful, append whether it is possible to check if the file path actually exists or not.
# (e.g. Network Attached Storage, etc)
assert(PosixPath(href_uri).as_uri())
self.parsedrefs.append(href_uri)
except Exception as e:
# if it is a httpfs server url, then append
if SERVER_URL in href_uri:
self.parsedrefs.append(href_uri)
def build_confs_from_json(fsconf_fpath:PosixPath=FSCONF_FPATH):
confs = read_conf_json(fsconf_fpath)
# convert raw string values into appropriate python datatype
for conf in confs:
# process readonly value to boolean
if conf['readonly'] != 'True' and conf['readonly'] != 'False':
raise ValueError('\'fsname \' {fsname} : property \'readonly\' must be \"True\" or \"False\", actual value: \"{readonly}\"'.format(fsname=conf['fsname'], readonly=conf['readonly']))
conf['readonly'] = True if conf['readonly'] == 'True' else False
# process fsroot value to PosixPath
fsroot = PosixPath(conf['fsroot'])
if not fsroot.exists():
print('directory \'fsname \' {fsname} : fsroot path does not exist, therefore will be created.'.format(fsname=conf['fsname']))
fsroot.mkdir(parents=True, exist_ok=True)
conf['fsroot'] = fsroot
# create upload/noref directories needed if managed fsroot will be writable by server
for conf in confs:
if conf['readonly']:
conf['uploadDir'] = None
conf['norefDir'] = None
continue
uploadDir = PosixPath(conf['fsroot']) / UPLOAD_SUBDIR_NAME
uploadDir.mkdir(exist_ok=True)
conf['uploadDir'] = uploadDir
norefDir = PosixPath(conf['fsroot']) / NOREF_SUBDIR_NAME
norefDir.mkdir(exist_ok=True)
conf['norefDir'] = norefDir
# construct dictionary of {key : value} => {fsname : fsconf object}
fsconfs = dict()
for conf in confs:
fsconf = FsConf(conf)
fsconfs[fsconf.fsname] = fsconf
return fsconfs
def read_conf_json(conf_path:PosixPath) -> dict:
## call server configuration json file into dict
with open(conf_path.as_posix(), 'r') as f:
string = f.read()
confs = json.loads(string)
return confs
``` |
{
"source": "jonejone/tourney",
"score": 2
} |
#### File: management/commands/waitinglist.py
```python
from django.core.management.base import BaseCommand, CommandError
from tourney.tournament.models import Tournament, TournamentPlayer
from optparse import make_option
class Command(BaseCommand):
args = '<tournament_slug> --sync'
help = 'Manage waiting list'
option_list = BaseCommand.option_list + (
make_option('--accept-player',
action='store',
dest='acceptplayer',
default=False,
help='Accept player into tournament and remove from waiting list'),
make_option('--accept',
action='store_true',
dest='accept',
default=False,
help='Accept new players on waiting list based on available spots'),
)
def handle(self, *args, **options):
try:
t_slug = args[0]
tournament = Tournament.objects.get(slug=t_slug)
self.tournament = tournament
except IndexError:
raise CommandError('Please enter a tournament slug')
except Tournament.DoesNotExist:
raise CommandError('Tournament slug not found')
if options['acceptplayer']:
self.accept(options['acceptplayer'])
elif options['accept']:
self.accept()
else:
self.print_waiting_list()
def accept(self, player_id=None):
if player_id:
try:
tp = self.tournament.tournamentplayer_set.get(
id=player_id)
except TournamentPlayer.DoesNotExist:
raise CommandError('Not a valid player ' +
'in this tournament')
players = [tp, ]
else:
players = self.tournament.tournamentplayer_set.filter(
is_waiting_list=True).order_by('-registered')
spots = self.tournament.get_available_spots()
if spots == 0:
raise CommandError('This tournament does not have'
+ ' any free spots for taking.')
if len(players) > spots:
players = players[:spots]
for player in players:
player.accept_player()
self.stdout.write('Accepted %i players' % len(players))
def print_waiting_list(self):
players = self.tournament.tournamentplayer_set.filter(
is_waiting_list=True)
print 'Waiting list for %s, %i players, %i open spots' % (
self.tournament.name,
players.count(),
self.tournament.get_available_spots())
for player in players:
print '- %s [%i]' % (player.player.name, player.id)
```
#### File: tourney/tournament/middleware.py
```python
from django.conf import settings
from django.contrib.sites.models import Site
from .models import Tournament, TournamentSite, TournamentAdmin
from localeurl import utils
class TournamentMiddleware:
def process_request(self, request):
# See if we can find a tournament
# attached to our current site
current = Site.objects.get_current()
try:
ts = TournamentSite.objects.get(
site=current)
except TournamentSite.DoesNotExist:
pass
else:
# Tournament was found, lets add it to our
# request object as well as change to
# tournament-specific urlconf.
request.tournament = ts.tournament
request.urlconf = 'tourney.tournament.urls'
class TournamentAdminMiddleware:
def process_request(self, request):
# Only applicable if we are in a
# tournament context
if hasattr(request, 'tournament'):
# See if current user is admin
request.is_tournament_admin = False
if request.user.is_authenticated():
if request.user.is_staff:
request.is_tournament_admin = True
try:
admin = request.user.tournamentadmin_set.get(
tournament=request.tournament)
except TournamentAdmin.DoesNotExist:
pass
else:
request.is_tournament_admin = True
class TournamentLanguageMiddleware:
def process_request(self, request):
# Only applicable if we are in a
# tournament context
if hasattr(request, 'tournament'):
# By setting request.LANGUAGE_CODE, the localeurl
# lib we are using will pick it up as the currently
# active language.
request.LANGUAGE_CODE = request.tournament.language_code
``` |
{
"source": "jonekoo/CoarseMC",
"score": 3
} |
#### File: CoarseMC/src/update_json.py
```python
import sys
import json
def main():
# Load "master" json used for updating
fn_master = sys.argv[1]
f = open(fn_master, 'r')
d_master = json.load(f)
f.close()
# Load "slave" json to be updated
fn_slaves = sys.argv[2:]
for fn_slave in fn_slaves:
f = open(fn_slave, 'r')
d_slave = json.load(f)
f.close()
d_slave.update(d_master)
# Overwrite "slave" file.
f = open(fn_slave, 'w')
json.dump(d_slave, f)
f.close()
if __name__=='__main__':
main()
```
#### File: CoarseMC/tests/test_zerosweeps.py
```python
import subprocess
import json
import os.path
import sys
import numpy
def main():
program = os.path.realpath("coarsemc")
#inputdir = os.path.join("tests",
# "zero_sweeps")
inputdir = ""
ntasks = 1
energies = collect_energies(ntasks, inputdir)
compare_energies(ntasks, inputdir, energies, tolerance = 1e-8)
def cleanup(ntasks, inputdir):
for i in range(ntasks):
subprocess.call(["rm", "-rf", str(i)], cwd=inputdir)
def initialize(ntasks, inputdir):
# Create temporary directories for the input files.
for i in range(ntasks):
subprocess.call(["mkdir", "-p", str(i)], cwd=inputdir)
# Copy input files to the directories.
for i in range(ntasks):
subprocess.call(["cp", os.path.join(inputdir,
"inputconfiguration-inputparameters-" + str(i) + \
".json"), os.path.join(str(i),
"input-0.json")], cwd=inputdir)
def run_program(ntasks, program, inputdir):
# Run program for each input files
for i in range(ntasks):
subprocess.call(["mpirun", "-np", "1", program],
cwd=os.path.join(inputdir, str(i)))
def collect_energies(ntasks, inputdir):
energies = []
for i in range(ntasks):
f = open("zerosweepsrestart-" + str(i) + ".json")
d = json.load(f)
energies.append(d["total_energy"])
f.close()
return energies
def compare_energies(ntasks, inputdir, energies, tolerance):
reference = numpy.loadtxt(os.path.join(os.path.dirname(__file__),
"total_energies-reference.txt"))
# accept if differences are below tolerance
accept = True
for i, t in enumerate(zip(energies, reference[:ntasks])):
e, r = t
relative_error = abs((e - r) / r)
if relative_error > tolerance:
accept = False
break
else:
print "Relative error = ", relative_error
if accept:
print "All energies are OK!"
else:
print "FAILED: Relative error in total_energy for task " + str(i) + \
" = " + str(relative_error) + " > " + str(tolerance)
sys.exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "Jone-Mark/TOK_IOT",
"score": 3
} |
#### File: TOK_IOT/TOK_IOT Service/api.py
```python
from tornado import websocket, web, ioloop #导入Tornado相关库
import json,os #导入JSON和OS
import MySQLdb #导入MYSQL数据库
import datetime #导入时间库
cl = []
MYSQLHOST = "127.0.0.1" #mySQL IP
MYSQLUSER = "root" #用户名
MYSQLPW = "<PASSWORD>" #自己设置的密码
DBNAME = "HTTP" #数据库名称
VALUE_TABLE = 'VALUE' #表名
MAP_TABLE = 'MAP' #表名
db = MySQLdb.connect(MYSQLHOST, MYSQLUSER, MYSQLPW, DBNAME, charset='utf8' ) #建立数据库连接
cursor = db.cursor() #获取游标
cursor.execute('DROP TABLE IF EXISTS '+VALUE_TABLE) # 如果数据表已经存在使用 execute() 方法删除表。
cursor.execute('DROP TABLE IF EXISTS '+MAP_TABLE) # 如果数据表已经存在使用 execute() 方法删除表。
print ('Drop TABLE '+MAP_TABLE+' AND '+VALUE_TABLE+' OK ') #打印删除表
sql_creat = 'CREATE TABLE IF NOT EXISTS '+ VALUE_TABLE + '(ID TINYTEXT NOT NULL,VALUE TINYTEXT NOT NULL,TIME TINYTEXT)'# 创建数值表
cursor.execute(sql_creat) # 使用cursor()方法获取操作游标
sql_creat = 'CREATE TABLE IF NOT EXISTS '+ MAP_TABLE + '(ID TINYTEXT NOT NULL,VALUE TINYTEXT NOT NULL,TIME TINYTEXT)'# 创建地图表
cursor.execute(sql_creat) # 使用cursor()方法获取操作游标
print ('CREATE TABLE '+MAP_TABLE+' AND '+VALUE_TABLE+' OK ') #打印创建表
######################################################################################SOCKET模块
class SocketHandler(websocket.WebSocketHandler): #socket
def check_origin(self, origin):
return True
def open(self):
if self not in cl:
cl.append(self)
print (cl)
def on_close(self):
if self in cl:
cl.remove(self)
print (cl)
#########################################################################################
#########################################################################################数据接口
class ApiHandler(web.RequestHandler):
@web.asynchronous
def post(self, *args):
self.write("post success")
self.finish()
id = self.get_argument("id")
value = self.get_argument("value")
##########################################################MySQL插入语句
nowtime = datetime.datetime.now().strftime('%Y%m%d%H%M%S') #获取时间
sql_insert = 'INSERT INTO '+VALUE_TABLE+'(ID,VALUE,TIME) VALUES ('+str(id)+','+str(value)+','+nowtime+')'
print (sql_insert)
try:
cursor.execute(sql_insert)# 执行sql语句
db.commit() # 提交到数据库执行
print("OK")
except:
db.rollback()# Rollback in case there is any error
print("ERR")
############################################################
data = {"id": id, "value" : value}#写入JSON让前台获取
data = json.dumps(data)
for c in cl:
c.write_message(data)
pass
@web.asynchronous
def get(self, *args):
start = self.get_argument("start")
end = self.get_argument("end")
################################################################
sql_insert = 'SELECT * FROM '+VALUE_TABLE+' WHERE TIME >= '+str(start)+ ' AND TIME <= ' +str(end)
print (sql_insert)
try:
cursor.execute(sql_insert) # 执行sql语句
results = cursor.fetchall() # 数据库返回值
print("OK")
except:
db.rollback()
print("ERR")
###############################################################
print (results)
self.write(str(results))
self.finish()
########################################################################################
########################################################################################MAP接口
class MapHandler(web.RequestHandler):#
@web.asynchronous
def post(self, *args):
lat = self.get_argument("lat") #get方法得到上传你的数据并将ID和value解析出来
long = self.get_argument("long")
##########################################################MySQL插入语句
nowtime = datetime.datetime.now().strftime('%Y%m%d%H%M%S') #获取时间
sql_insert = 'INSERT INTO '+MAP_TABLE+'(ID,VALUE,TIME) VALUES ('+str(lat)+','+str(long)+','+nowtime+')'
print (sql_insert)
try:
cursor.execute(sql_insert)# 执行sql语句
db.commit() # 提交到数据库执行
print("OK")
except:
# Rollback in case there is any error
db.rollback()
print("ERR")
###########################################################################
data = {"lat": lat, "long" : long}#写入JSON让前台获取
data = json.dumps(data)
print (data)
for c in cl:
c.write_message(data)
pass
self.write("success")
self.finish()
@web.asynchronous
def get(self, *args):
start = self.get_argument("start")
end = self.get_argument("end")
################################################################从数据库中读取
sql_insert = 'SELECT * FROM '+ MAP_TABLE +' WHERE TIME >= '+str(start)+ ' AND TIME <= ' +str(end)
print (sql_insert)
try:
cursor.execute(sql_insert) # 执行sql语句
results = cursor.fetchall() # 数据库返回值
print("OK")
except:
db.rollback()
print("ERR")
###############################################################
print (results)
self.write(str(results))
self.finish()
```
#### File: TOK_IOT/TOK_IOT Service/app.py
```python
from tornado import websocket, web, ioloop
import json,os #导入JSON和OS
import MySQLdb #导入MYSQL数据库
import datetime #导入时间库
from tq import WeatherHandler #导入天气配置
from btn import JsonHandler #导入按钮配置
from img import ImgHandler #导入图像配置
from api import ApiHandler,SocketHandler,MapHandler #导入数值,地图,SOCKET
class IndexHandler(web.RequestHandler): #渲染主界面
def get(self):
self.render("index.html")
app = web.Application([
(r'/', IndexHandler),
(r'/tq', WeatherHandler),
(r'/btn', JsonHandler),
(r'/img', ImgHandler),
(r'/ws', SocketHandler),
(r'/api', ApiHandler),
(r'/map', MapHandler),
])
if __name__ == '__main__':
app.listen(8888)#开启8888端口
ioloop.IOLoop.instance().start()
``` |
{
"source": "jonemo/microscan-driver",
"score": 3
} |
#### File: src/microscan/config.py
```python
from enum import Enum
import logging
import re
logger = logging.getLogger(__name__)
"""
Since the ASCII characters below 32 have special meaning, they need to be
escaped when transmitted as part of a setting. The following string can be used
in regex patterns in place of `.` to match any ASCII character as well as any
escaped ASCII character. The full list of ASCII characters and how they are
escaped is on page A-11 of the MS3 user manual.
"""
ASCII_CHAR = b'.|\^[A-Z\[\\\]\^_]'
class MicroscanConfigException(Exception):
"""Parent class for all configuration related exceptions
"""
class InvalidConfigString(MicroscanConfigException):
"""Raised when decoding or encoding a character string with invalid format
For example, the following strings would raise this exception when passed
to HostPortConnection.from_config_string():
- '<K100,4,0>' because K100 has four parameters, not two
- '<K100,4,9,1,0>' because the second parameter of K100 is one of [0, 1, 2]
"""
class UnknownConfigString(MicroscanConfigException):
"""Raised when a config string is received for which no serializer is known
For example, the following strings would raise this exception when passed
to MicroscanConfiguration.from_config_strings():
- '<K92,1>' because '92' is not a known configuration ID
- '<K
"""
class KSetting:
"""Base class for all configuration settings"""
def to_config_string(self, values):
# class must have non-empty K_CODE attribute
assert hasattr(self, 'K_CODE')
assert isinstance(self.K_CODE, bytes)
assert len(self.K_CODE) > 0
# object must be valid
# TODO: write validators for all settings
# assert self.is_valid()
# values must be list
assert isinstance(values, list)
def normalize_value(val):
if isinstance(val, bytes):
return val
elif val is None:
return b''
else:
return str(val).encode('ascii')
# normalize values to bytes, None gets cast to empty string
values = [normalize_value(val) for val in values]
str_ = b'<%s,%s>' % (self.K_CODE, b','.join(values))
# test the generated K-string against the pattern used for decoding
if not re.match(self.K_PATTERN, str_):
raise InvalidConfigString(
'Encoding the %s object resulted in an invalid K-string: "%s"'
% (self.__class__.__name__, str_.decode('ascii'))
)
return str_
# === Host Port Connection setting and corresponding enums ===
class Parity(Enum):
"""Used in HostPortConnection setting"""
NONE = b'0'
EVEN = b'1'
ODD = b'2'
class StopBits(Enum):
ONE = b'0'
TWO = b'1'
class DataBits(Enum):
SEVEN = b'0'
EIGHT = b'1'
BAUD_RATES = {
b'0': 600,
b'1': 1200,
b'2': 2400,
b'3': 4800,
b'4': 9600,
b'5': 19200,
b'6': 38400,
b'7': 57600,
b'8': 115200,
}
def _serialize_baud_rate(baud_rate):
baud_rate_to_config_val = dict(
zip(BAUD_RATES.values(), BAUD_RATES.keys()))
try:
return baud_rate_to_config_val[baud_rate]
except IndexError:
raise ValueError(
'%s is not a valid baud rate, must be one of %s' %
(baud_rate, ', '.join(BAUD_RATES.values())))
def _deserialize_baud_rate(configval):
try:
return BAUD_RATES[configval]
except IndexError:
raise ValueError(
'%s is not a valid config value for baud rate, must be one of '
'%s' % (configval, ', '.join(BAUD_RATES.keys())))
class HostPortConnection(KSetting):
"""See page 3-4 of Microscan MS3 manual for reference
Note that this section is referred to with the plural "Host Port
Connections" in the manual, but this library uses the singular, for
consistency with other settings names.
"""
K_CODE = b'K100'
K_PATTERN = b'^<%s,([0-8]),([0-2]),([0-1]),([0-1])>$' % K_CODE
def __init__(
self, baud_rate=9600, parity=Parity.NONE, stop_bits=StopBits.ONE,
data_bits=DataBits.SEVEN):
self.baud_rate = baud_rate
self.parity = parity
self.stop_bits = stop_bits
self.data_bits = data_bits
def to_config_string(self):
return super().to_config_string([
_serialize_baud_rate(self.baud_rate),
self.parity.value,
self.stop_bits.value,
self.data_bits.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create HostPortConnection object from string returned by the device
The str_ argument should be the device response to the <K100?>
command, for example '<K100,4,0,0,0>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
baud_rate, parity, stop_bits, data_bits = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
baud_rate=_deserialize_baud_rate(baud_rate),
parity=Parity(parity),
stop_bits=StopBits(stop_bits),
data_bits=DataBits(data_bits),
)
# === Host Protocol setting and corresponding enums ===
class Protocol(Enum):
PointToPoint = b'0'
PointToPointWithRTSCTS = b'1'
PointToPointWithXONXOFF = b'2'
PointToPointWithRTSCTSAndXONXOFF = b'3'
PollingModeD = b'4'
Multidrop = b'5'
UserDefined = b'6'
UserDefinedMultidrop = b'7'
class HostProtocol(KSetting):
"""See page 3-5 of Microscan MS3 manual for reference
The protocol options `Multidrop`, `UserDefined`, and `UserDefinedMultidrop`
require additional settings besides the protocol parameter, which are
currently not supported by this libary. Refer to pages 3-7 to 3-9 in the
MS3 user manual for detailed explanations of these Host Protocol settings.
"""
K_CODE = b'K140'
K_PATTERN = b'^<%s,([0-7])(,.*)?>$' % K_CODE
def __init__(self, protocol=Protocol.PointToPoint):
self.protocol = protocol
# TODO: protocol 5-7 require additional parameters
def to_config_string(self):
return super().to_config_string([
self.protocol.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create HostProtocol object from string returned by the device
The str_ argument should be the device response to the <K140?>
command, for example '<K140,0>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
protocol, _ = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(protocol=Protocol(protocol))
# === Host RS-232/422 Status setting and corresponding enums ===
class RS422Status(Enum):
Disabled = b'0'
Enabled = b'1'
class HostRS422Status(KSetting):
"""See page 3-10 of Microscan MS3 manual for reference
This setting contains a single binary flag for switching between RS-232
and RS-422 mode.
When using computer with serial port or USB-to-serial adapter, ensure that
your hardware supports RS-422 communication before switching the barcode
reader into RS-422 mode.
When the Host Protocol is set to `Multidrop` or `UserDefinedMultidrop`
(both not currently supported by this library), RS-485 is implied and this
setting is ignored.
Note that this section is referred to as "Host RS-232/422 Status" in the
manual.
"""
K_CODE = b'K102'
K_PATTERN = b'^<%s,([0-1])?>$' % K_CODE
def __init__(self, status=RS422Status.Disabled):
self.status = status
def to_config_string(self):
return super().to_config_string([
self.status.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create HostRS422Status object from string returned by the device
The str_ argument should be the device response to the <K102?>
command, for example '<K102,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(status=RS422Status(status))
# === Host RS-232 Auxiliary Port setting and corresponding enums ===
class AuxiliaryPortMode(Enum):
Disabled = b'0'
Transparent = b'1'
HalfDuplex = b'2'
FullDuplex = b'3'
DaisyChain = b'4'
CommandProcessing = b'5'
class DaisyChainIdStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class RS232AuxiliaryPort(KSetting):
"""See page 3-11 of Microscan MS3 manual for reference
"""
K_CODE = b'K101'
K_PATTERN = (
b'<%s,([0-5]),([0-8]),([0-2]),([0-1]),([0-1]),([0-1]),(.{1,2})?>'
% K_CODE
)
def __init__(
self, aux_port_mode=AuxiliaryPortMode.Disabled, baud_rate=9600,
parity=Parity.NONE, stop_bits=StopBits.ONE,
data_bits=DataBits.SEVEN,
daisy_chain_id_status=DaisyChainIdStatus.Disabled,
daisy_chain_id='1/'
):
self.aux_port_mode = aux_port_mode
self.baud_rate = baud_rate
self.parity = parity
self.stop_bits = stop_bits
self.data_bits = data_bits
self.daisy_chain_id_status = daisy_chain_id_status
self.daisy_chain_id = daisy_chain_id
def to_config_string(self):
return super().to_config_string([
self.aux_port_mode.value,
_serialize_baud_rate(self.baud_rate),
self.parity.value,
self.stop_bits.value,
self.data_bits.value,
self.daisy_chain_id_status.value,
self.daisy_chain_id,
])
@classmethod
def from_config_string(cls, str_):
"""Create RS232AuxiliaryPort object from string returned by the device
The str_ argument should be the device response to the <K101?>
command, for example '<K101,2,3,1,1,0,1,AB>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
aux_port_mode, baud_rate, parity, stop_bits, data_bits,
daisy_chain_id_status, daisy_chain_id
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
aux_port_mode=AuxiliaryPortMode(aux_port_mode),
baud_rate=_deserialize_baud_rate(baud_rate),
parity=Parity(parity),
stop_bits=StopBits(stop_bits),
data_bits=DataBits(data_bits),
daisy_chain_id_status=DaisyChainIdStatus(daisy_chain_id_status),
daisy_chain_id=daisy_chain_id,
)
# === Preamble setting and corresponding enums ===
class PreambleStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class Preamble(KSetting):
"""See page 3-20 of Microscan MS3 manual for reference
"""
K_CODE = b'K141'
K_PATTERN = b'^<%s,([0-1])?,(.{1,4})?>$' % K_CODE
def __init__(self, status=PreambleStatus.Disabled, characters=None):
self.status = status
self.characters = characters
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.characters,
])
@classmethod
def from_config_string(cls, str_):
"""Create Preamble object from string returned by the device
The str_ argument should be the device response to the <K141?>
command, for example '<K141,1,ABCD>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, characters = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=PreambleStatus(status),
characters=characters,
)
# === Postamble setting and corresponding enums ===
class PostambleStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class Postamble(KSetting):
"""See page 3-20 of Microscan MS3 manual for reference
"""
K_CODE = b'K142'
K_PATTERN = b'^<%s,([0-1])?,(.{1,4})?>$' % K_CODE
def __init__(self, status=PostambleStatus.Disabled, characters=None):
self.status = status
self.characters = characters
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.characters,
])
@classmethod
def from_config_string(cls, str_):
"""Create Postamble object from string returned by the device
The str_ argument should be the device response to the <K142?>
command, for example '<K142,1,A16z>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, characters = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=PostambleStatus(status),
characters=characters,
)
# === LRC Status setting and corresponding enums ===
class LRCStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class LRC(KSetting):
"""See page 3-22 of Microscan MS3 manual for reference
LRC stands for "Longitudinal Redundancy Check".
Note that this setting is referred to as "LRC Status" in the user manual
but called `LRC` here to avoid the name colission with the `LRCStatus`
enum.
"""
K_CODE = b'K145'
K_PATTERN = b'^<%s,([0-1])?>$' % K_CODE
def __init__(self, status=LRCStatus.Disabled):
self.status = status
def is_valid(self):
return all([
isinstance(self.status, LRCStatus),
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create LRC object from string returned by the device
The str_ argument should be the device response to the <K145?>
command, for example '<K145,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=LRCStatus(status),
)
# === Inter Character Delay setting and corresponding enums ===
class InterCharacterDelay(KSetting):
"""See page 3-22 of Microscan MS3 manual for reference
"""
K_CODE = b'K144'
K_PATTERN = b'^<%s,([\d]{1,3})?>$' % K_CODE
def __init__(self, delay=0):
self.delay = delay
def is_valid(self):
return all([
isinstance(self.delay, int),
self.delay >= 0,
self.delay <= 255,
])
def to_config_string(self):
return super().to_config_string([
self.delay,
])
@classmethod
def from_config_string(cls, str_):
"""Create InterCharacterDelay object from string returned by the device
The str_ argument should be the device response to the <K144?>
command, for example '<K144,123>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
delay, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
delay=int(delay),
)
# === Multisymbol setting and corresponding enums ===
class Multisymbol(KSetting):
"""See page 4-3 of Microscan MS3 manual for reference
"""
K_CODE = b'K222'
K_PATTERN = b'^<%s,([1-5])?,(.)?>$' % K_CODE
def __init__(self, number_of_symbols=1, multisymbol_separator=','):
self.number_of_symbols = number_of_symbols
self.multisymbol_separator = multisymbol_separator
def is_valid(self):
return all([
isinstance(self.number_of_symbols, int),
self.number_of_symbols >= 0,
self.number_of_symbols <= 5,
isinstance(self.multisymbol_separator, str),
len(self.multisymbol_separator) <= 1,
])
def to_config_string(self):
return super().to_config_string([
self.number_of_symbols,
self.multisymbol_separator,
])
@classmethod
def from_config_string(cls, str_):
"""Create Multisymbol object from string returned by the device
The str_ argument should be the device response to the <K144?>
command, for example '<K222,2,|>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
number_of_symbols, multisymbol_separator = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
number_of_symbols=int(number_of_symbols),
multisymbol_separator=multisymbol_separator,
)
# === Trigger setting and corresponding enums ===
class TriggerMode(Enum):
ContinuousRead = b'0'
ContinuousReadOneOutput = b'1'
ExternalLevel = b'2'
ExternalEdge = b'3'
SerialData = b'4'
SerialDataAndExternalEdge = b'5'
class Trigger(KSetting):
"""See page 4-6 of Microscan MS3 manual for reference
"""
K_CODE = b'K200'
K_PATTERN = b'^<%s,([0-5])?,([\d]*)?>$' % K_CODE
def __init__(
self, trigger_mode=TriggerMode.ContinuousRead,
trigger_filter_duration=244):
self.trigger_mode = trigger_mode
self.trigger_filter_duration = trigger_filter_duration
def is_valid(self):
return all([
isinstance(self.trigger_mode, TriggerMode),
isinstance(self.trigger_filter_duration, int)
])
def to_config_string(self):
return super().to_config_string([
self.trigger_mode.value,
self.trigger_filter_duration,
])
@classmethod
def from_config_string(cls, str_):
"""Create Trigger object from string returned by the device
The str_ argument should be the device response to the <K200?>
command, for example '<K200,1,244>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
trigger_mode, trigger_filter_duration = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
trigger_mode=TriggerMode(trigger_mode),
trigger_filter_duration=int(trigger_filter_duration),
)
# === External Trigger State setting and corresponding enums ===
class ExternalTriggerState(Enum):
Negative = b'0'
Positive = b'1'
class ExternalTrigger(KSetting):
"""See page 4-11 of Microscan MS3 manual for reference
Note that this setting is referred to as "External Trigger Status" in the
user manual but called `ExternalTrigger` here to avoid the name colission
with the `ExternalTriggerState` enum.
"""
K_CODE = b'K202'
K_PATTERN = b'^<%s,([0-1])?>$' % K_CODE
def __init__(self, external_trigger_state=ExternalTriggerState.Positive):
self.external_trigger_state = external_trigger_state
def is_valid(self):
return all([
isinstance(self.external_trigger_state, ExternalTriggerState),
])
def to_config_string(self):
return super().to_config_string([
self.external_trigger_state.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create ExternalTriggerState object from str returned by the device
The str_ argument should be the device response to the <K202?>
command, for example '<K202,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
external_trigger_state, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
external_trigger_state=ExternalTriggerState(
external_trigger_state),
)
# === Serial Trigger setting and corresponding enums ===
class SerialTrigger(KSetting):
"""See page 4-12 of Microscan MS3 manual for reference
"""
K_CODE = b'K201'
K_PATTERN = b'^<%s,(.|\^\])?>$' % K_CODE
def __init__(self, serial_trigger_character='^'):
self.serial_trigger_character = serial_trigger_character
def is_valid(self):
return all([
isinstance(self.serial_trigger_character, bytes),
len(self.serial_trigger_character) == 1,
])
def to_config_string(self):
return super().to_config_string([
self.serial_trigger_character,
])
@classmethod
def from_config_string(cls, str_):
"""Create SerialTrigger object from string returned by the device
The str_ argument should be the device response to the <K201?>
command, for example '<K201,^>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
serial_trigger_character, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
serial_trigger_character=serial_trigger_character,
)
# === Non-delimited Start and Stop Characters setting ===
class StartTriggerCharacter(KSetting):
"""See page 4-13 of Microscan MS3 manual for reference
The user manual groups the `StartTriggerSetting` and `StopTriggerSetting`
under a single heading "Non-delimited Start and Stop Characters". This
library treats them as separate settings because they have distinct
K-codes, K229 and K230.
Note that the character is encoded as two hex digits and not as the actual
character, as for example SerialTrigger (K201).
"""
K_CODE = b'K229'
K_PATTERN = b'^<%s,([0-9a-fA-F]{2})?>$' % K_CODE
def __init__(self, start_trigger_character=None):
self.start_trigger_character = start_trigger_character
def is_valid(self):
return all([
isinstance(self.start_trigger_character, str),
len(self.start_trigger_character) == 1,
])
def to_config_string(self):
return super().to_config_string([
self.start_trigger_character,
])
@classmethod
def from_config_string(cls, str_):
"""Create StartTriggerCharacter object from str returned by the device
The str_ argument should be the device response to the <K229?>
command, for example '<K229,>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
start_trigger_character, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
start_trigger_character=start_trigger_character,
)
class StopTriggerCharacter(KSetting):
"""See page 4-13 of Microscan MS3 manual for reference
The user manual groups the `StartTriggerSetting` and `StopTriggerSetting`
under a single heading "Non-delimited Start and Stop Characters". This
library treats them as separate settings because they have distinct
K-codes, K229 and K230.
Note that the character is encoded as two hex digits and not as the actual
character, as for example SerialTrigger (K201).
"""
K_CODE = b'K230'
K_PATTERN = b'^<%s,([0-9a-fA-F]{2})?>$' % K_CODE
def __init__(self, stop_trigger_character=None):
self.stop_trigger_character = stop_trigger_character
def is_valid(self):
return all([
isinstance(self.stop_trigger_character, str),
len(self.stop_trigger_character) == 1,
])
def to_config_string(self):
return super().to_config_string([
self.stop_trigger_character,
])
@classmethod
def from_config_string(cls, str_):
"""Create StopTriggerCharacter object from str returned by the device
The str_ argument should be the device response to the <K230?>
command, for example '<K230,>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
stop_trigger_character, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
stop_trigger_character=stop_trigger_character,
)
# === End Read Cycle setting and corresponding enums ===
class EndReadCycleMode(Enum):
Timeout = b'0'
NewTrigger = b'1'
TimeoutAndNewTrigger = b'2'
class EndReadCycle(KSetting):
"""See page 4-14 of Microscan MS3 manual for reference
ready_cycle_timeout is measured in tens of milliseconds, e.g. 100 = 1sec
"""
K_CODE = b'K220'
K_PATTERN = b'^<%s,([0-2])?,([\d]*)?>$' % K_CODE
def __init__(
self, end_read_cycle_mode=EndReadCycleMode.Timeout,
read_cycle_timeout=100):
self.end_read_cycle_mode = end_read_cycle_mode
self.ready_cycle_timeout = read_cycle_timeout
def is_valid(self):
return all([
isinstance(self.end_read_cycle_mode, EndReadCycleMode),
isinstance(self.ready_cycle_timeout, int),
self.ready_cycle_timeout >= 0,
self.ready_cycle_timeout <= 65535,
])
def to_config_string(self):
return super().to_config_string([
self.end_read_cycle_mode.value,
self.ready_cycle_timeout,
])
@classmethod
def from_config_string(cls, str_):
"""Create EndReadCycle object from str returned by the device
The str_ argument should be the device response to the <K220?>
command, for example '<K220,1,100>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
end_read_cycle_mode, read_cycle_timeout = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
end_read_cycle_mode=EndReadCycleMode(end_read_cycle_mode),
read_cycle_timeout=int(read_cycle_timeout)
)
# === Decodes Before Output setting and corresponding enums ===
class DecodesBeforeOutputMode(Enum):
NonConsecutive = b'0'
Consecutive = b'1'
class DecodesBeforeOutput(KSetting):
"""See page 4-16 of Microscan MS3 manual for reference
"""
K_CODE = b'K221'
K_PATTERN = b'^<%s,([\d]{1,3})?,([0-1])?>$' % K_CODE
def __init__(
self, number_before_output=1,
decodes_before_output_mode=DecodesBeforeOutputMode.NonConsecutive):
self.number_before_output = number_before_output
self.decodes_before_output_mode = decodes_before_output_mode
def is_valid(self):
return all([
isinstance(self.number_before_output, int),
self.number_before_output >= 1,
self.number_before_output <= 255,
isinstance(
self.decodes_before_output_mode, DecodesBeforeOutputMode),
])
def to_config_string(self):
return super().to_config_string([
self.number_before_output,
self.decodes_before_output_mode.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create DecodesBeforeOutput object from string returned by the device
The str_ argument should be the device response to the <K221?>
command, for example '<K221,10,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
number_before_output, decodes_before_output_mode = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
number_before_output=int(number_before_output),
decodes_before_output_mode=DecodesBeforeOutputMode(
decodes_before_output_mode)
)
# === Scan Speed setting and corresponding enums ===
class ScanSpeed(KSetting):
"""See page 4-17 of Microscan MS3 manual for reference
Note that the user manual groups the "Scan Speed" setting under the
"Scanner Setup" heading. This library treats it as separate setting because
it is stored with a distinct K-code `K500` while all other Scanner Setup
settings are stored with a K-code of `K504`.
"""
K_CODE = b'K500'
K_PATTERN = b'^<%s,([\d]{2,3})?>$' % K_CODE
def __init__(self, scan_speed=350):
self.scan_speed = scan_speed
def is_valid(self):
return all([
isinstance(self.scan_speed, int),
self.scan_speed >= 30,
self.scan_speed <= 100,
])
def to_config_string(self):
return super().to_config_string([
self.scan_speed,
])
@classmethod
def from_config_string(cls, str_):
"""Create ScanSpeed object from str returned by the device
The str_ argument should be the device response to the <K500?>
command, for example '<K500,350>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
scan_speed, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
scan_speed=int(scan_speed),
)
# === Scanner Setup setting and corresponding enums ===
class AGCSamplingMode(Enum):
Disabled = b'0'
LeadingEdge = b'1'
Continuous = b'2'
class ScannerSetup(KSetting):
"""See page 4-17 of Microscan MS3 manual for reference
Note that the user manual groups the "Scan Speed" setting under the
"Scanner Setup" heading. This library treats it as separate setting because
it is stored with a distinct K-code `K500` while all other Scanner Setup
settings are stored with a K-code of `K504`.
"""
K_CODE = b'K504'
K_PATTERN = (
b'^<%s,([\d]{2,3})?,([0-2])?,([\d]{2,3})?,([\d]{2,3})?>$' % K_CODE)
def __init__(
self, gain_level=350,
agc_sampling_mode=AGCSamplingMode.Continuous, agc_min=70,
agc_max=245):
self.gain_level = gain_level
self.agc_sampling_mode = agc_sampling_mode
self.agc_min = agc_min
self.agc_max = agc_max
def is_valid(self):
return all([
isinstance(self.gain_level, int),
self.scan_speed >= 40,
self.scan_speed <= 255,
isinstance(self.agc_sampling_mode, AGCSamplingMode),
isinstance(self.agc_min, int),
self.agc_min >= 40,
self.agc_min <= 250,
isinstance(self.agc_max, int),
self.agc_max >= 60,
self.agc_max <= 255,
])
def to_config_string(self):
return super().to_config_string([
self.gain_level,
self.agc_sampling_mode.value,
self.agc_min,
self.agc_max,
])
@classmethod
def from_config_string(cls, str_):
"""Create ScannerSetup object from str returned by the device
The str_ argument should be the device response to the <K504?>
command, for example '<K504,50,2,60,230>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
gain_level, agc_samling_mode, agc_min, agc_max = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
gain_level=int(gain_level),
agc_sampling_mode=AGCSamplingMode(agc_samling_mode),
agc_min=int(agc_min),
agc_max=int(agc_max),
)
# === Symbol Detect Status setting and corresponding enums ===
class SymbolDetectStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class SymbolDetect(KSetting):
"""See page 4-19 of Microscan MS3 manual for reference
Note that the user manual groups the "Symbol Detect Status" setting under
the "Scanner Setup" heading. This library treats it as separate setting
because it is stored with a distinct K-code `K505` while all other Scanner
Setup settings are stored with a K-code of `K504`.
"""
K_CODE = b'K505'
K_PATTERN = b'^<%s,([0-1])?,([\d]{1,3})?>$' % K_CODE
def __init__(
self, status=SymbolDetectStatus.Disabled, transition_counter=14):
self.status = status
self.transition_counter = transition_counter
def is_valid(self):
return all([
isinstance(self.status, SymbolDetectStatus),
isinstance(self.transition_counter, int),
self.transition_counter >= 0,
self.transition_counter <= 255,
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.transition_counter,
])
@classmethod
def from_config_string(cls, str_):
"""Create SymbolDetect object from string returned by the device
The str_ argument should be the device response to the <K505?>
command, for example '<K505,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, transition_counter = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=SymbolDetectStatus(status),
transition_counter=int(transition_counter)
)
# === Inter Character Delay setting and corresponding enums ===
class MaximumElement(KSetting):
"""See page 4-20 of Microscan MS3 manual for reference
"""
K_CODE = b'K502'
K_PATTERN = b'^<%s,([\d]{1,5})?>$' % K_CODE
def __init__(self, maximum_element=0):
self.maximum_element = maximum_element
def is_valid(self):
return all([
isinstance(self.maximum_element, int),
self.maximum_element >= 0,
self.maximum_element <= 65535,
])
def to_config_string(self):
return super().to_config_string([
self.maximum_element,
])
@classmethod
def from_config_string(cls, str_):
"""Create MaximumElement object from string returned by the device
The str_ argument should be the device response to the <K502?>
command, for example '<K502,123>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
maximum_element, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
maximum_element=int(maximum_element),
)
# === Scan Width Enhance setting and corresponding enums ===
class ScanWidthEnhanceStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class ScanWidthEnhance(KSetting):
"""See page 4-20 of Microscan MS3 manual for reference
Note that the user manual groups the "Symbol Detect Status" setting under
the "Scanner Setup" heading. This library treats it as separate setting
because it is stored with a distinct K-code `K511` while all other Scanner
Setup settings are stored with a K-code of `K504`.
"""
K_CODE = b'K511'
K_PATTERN = b'^<%s,([0-1])?>$' % K_CODE
def __init__(
self, status=ScanWidthEnhanceStatus.Disabled):
self.status = status
def is_valid(self):
return all([
isinstance(self.status, ScanWidthEnhance),
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create ScanWidthEnhance object from string returned by the device
The str_ argument should be the device response to the <K511?>
command, for example '<K511,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=ScanWidthEnhanceStatus(status),
)
# === Laser Setup setting and corresponding enums ===
class LaserOnOffStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class LaserFramingStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class LaserPower(Enum):
Low = b'0'
Medium = b'1'
High = b'2'
class LaserSetup(KSetting):
"""See page 4-20 of Microscan MS3 manual for reference
Note that the "Laser Power" subsetting of the Laser Setup is mentioned
twice in the MS3 user manual, once under "Laser Setup" and once under
"Scanner Setup".
"""
K_CODE = b'K700'
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([\d]{2})?,([\d]{2})?,([0-2])?>$' % K_CODE)
def __init__(
self, laser_on_off_status=LaserOnOffStatus.Enabled,
laser_framing_status=LaserFramingStatus.Enabled,
laser_on_position=10,
laser_off_position=95,
laser_power=LaserPower.High):
self.laser_on_off_status = laser_on_off_status
self.laser_framing_status = laser_framing_status
self.laser_on_position = laser_on_position
self.laser_off_position = laser_off_position
self.laser_power = laser_power
def is_valid(self):
return all([
isinstance(self.laser_on_off_status, LaserOnOffStatus),
isinstance(self.laser_framing_status, LaserFramingStatus),
isinstance(self.laser_on_position, int),
self.laser_on_position >= 10,
self.laser_on_position <= 80,
isinstance(self.laser_off_position, int),
self.laser_off_position >= 20,
self.laser_off_position <= 95,
isinstance(self.laser_power, LaserPower)
])
def to_config_string(self):
return super().to_config_string([
self.laser_on_off_status.value,
self.laser_framing_status.value,
self.laser_on_position,
self.laser_off_position,
self.laser_power.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create LaserSetup object from string returned by the device
The str_ argument should be the device response to the <K700?>
command, for example '<K700,1,1,10,95,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
on_off_status, framing_status, on_position, off_position, power
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
laser_on_off_status=LaserOnOffStatus(on_off_status),
laser_framing_status=LaserFramingStatus(framing_status),
laser_on_position=int(on_position),
laser_off_position=int(off_position),
laser_power=LaserPower(power)
)
# === Code 39 setting and corresponding enums ===
class Code39Status(Enum):
Disabled = b'0'
Enabled = b'1'
class CheckDigitStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class CheckDigitOutputStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class LargeInterCharacterStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class FixedSymbolLengthStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class FullASCIISetStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class Code39(KSetting):
"""See page 5-3 of Microscan MS3 manual for reference
"""
K_CODE = b'K470'
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([0-1])?,([0-1])?,([0-1])?,([\d]{1,2})?,'
b'([0-1])?>$' % K_CODE)
def __init__(
self,
status=Code39Status.Enabled,
check_digit_status=CheckDigitStatus.Disabled,
check_digit_output=CheckDigitOutputStatus.Disabled,
large_intercharacter_gap=LargeInterCharacterStatus.Disabled,
fixed_symbol_length=FixedSymbolLengthStatus.Disabled,
symbol_length=10,
full_ascii_set=FullASCIISetStatus.Disabled):
self.status = status
self.check_digit_status = check_digit_status
self.check_digit_output = check_digit_output
self.large_intercharacter_gap = large_intercharacter_gap
self.fixed_symbol_length = fixed_symbol_length
self.symbol_length = symbol_length
self.full_ascii_set = full_ascii_set
def is_valid(self):
return all([
isinstance(self.status, Code39Status),
isinstance(self.check_digit_status, CheckDigitStatus),
isinstance(self.check_digit_output, CheckDigitOutputStatus),
isinstance(
self.large_intercharacter_gap, LargeInterCharacterStatus),
isinstance(self.fixed_symbol_length, FixedSymbolLengthStatus),
isinstance(self.symbol_length, int),
self.symbol_length >= 1,
self.symbol_length <= 64,
isinstance(self.full_ascii_set, FullASCIISetStatus),
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.check_digit_status.value,
self.check_digit_output.value,
self.large_intercharacter_gap.value,
self.fixed_symbol_length.value,
self.symbol_length,
self.full_ascii_set.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create Code39 object from string returned by the device
The str_ argument should be the device response to0the <K473?>
command, for example '<K473,1,0,0,1,1,32,0>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
status, check_digit_status, check_digit_output,
large_intercharacter_gap, fixed_symbol_length, symbol_length,
full_ascii_set
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=Code39Status(status),
check_digit_status=CheckDigitStatus(check_digit_status),
check_digit_output=CheckDigitOutputStatus(check_digit_output),
large_intercharacter_gap=LargeInterCharacterStatus(
large_intercharacter_gap),
fixed_symbol_length=FixedSymbolLengthStatus(fixed_symbol_length),
symbol_length=int(symbol_length),
full_ascii_set=FullASCIISetStatus(full_ascii_set),
)
# === Code 128 setting and corresponding enums ===
class Code128Status(Enum):
"""Enables/disables the Code 128 symbologies
See page 5-6 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
class EAN128Status(Enum):
"""Enables/disables/requires the EAN-128 subset of the Code 128 symbology
EAN-128 is commonly used in shipping applications, defining a wide variaty
of application specific extensions while using a subset of the possible
symbols of the Code 128 symbology.
See page 5-7 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
Required = b'2'
class Code128OutputFormat(Enum):
"""When EAN-128 is enabled, this setting controls the format of the output
This setting only takes effect when EAN128Status is set to Enabled or
Required.
When this setting is set to ApplicationRecord, the following settings may
be used for further configuration of the output format:
- ApplicationRecordSeparatorStatus
- ApplicationRecordSeparatorCharacter
- ApplicationRecordBrackets
- ApplicationRecordPadding
See page 5-7 of the Microscan MS3 manual for reference
"""
Standard = b'0'
ApplicationRecord = b'1'
class ApplicationRecordSeparatorStatus(Enum):
"""Used in conjunction with the Code128OutputFormat setting
See page 5-8 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
class ApplicationRecordBrackets(Enum):
"""Used in conjunction with the Code128OutputFormat setting
See page 5-8 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
class ApplicationRecordPadding(Enum):
"""Used in conjunction with the Code128OutputFormat setting
See page 5-8 of the Microscan MS3 manual for reference
"""
Disabled = b'0'
Enabled = b'1'
class Code128(KSetting):
"""See page 5-6 of Microscan MS3 manual for reference
Code128 is a family of high density symbologies that can encode
all ASCII characters. The three variants (Code 128-A to C) differ
in the table of characters, trading off character set with
density. 128-B allows for all 127 ASCII characters while, while
128-C is numeric only but encodes two digits in the same space as
128-B needs for one character.
Wikipedia: https://en.wikipedia.org/wiki/Code_128
Properties available in this configuration setting:
- status (enable/disable Code 128)
- fixed_symbol_length_status
- symbol_length
- ean128_status
- output_format
- application_record_separator_status
- application_record_separator_character
- application_record_brackets
- application_record_padding
"""
K_CODE = b'K474'
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([\d]{1,2})?,([0-2])?,([0-1])?,([0-1])?,'
b'(%s)?,([0-1])?,([0-1])?>$' % (K_CODE, ASCII_CHAR))
def __init__(
self,
status=Code128Status.Disabled,
fixed_symbol_length_status=FixedSymbolLengthStatus.Disabled,
symbol_length=10,
ean128_status=EAN128Status.Disabled,
output_format=Code128OutputFormat.Standard,
application_record_separator_status=(
ApplicationRecordSeparatorStatus.Disabled),
application_record_separator_character=b',',
application_record_brackets=ApplicationRecordBrackets.Disabled,
application_record_padding=ApplicationRecordPadding.Disabled):
self.status = status
self.fixed_symbol_length_status = fixed_symbol_length_status
self.symbol_length = symbol_length
self.ean128_status = ean128_status
self.output_format = output_format
self.application_record_separator_status = (
application_record_separator_status)
self.application_record_separator_character = (
application_record_separator_character)
self.application_record_brackets = application_record_brackets
self.application_record_padding = application_record_padding
def is_valid(self):
return all([
isinstance(self.status, Code128Status),
isinstance(
self.fixed_symbol_length_status, FixedSymbolLengthStatus),
isinstance(self.symbol_length, int),
self.symbol_length >= 1,
self.symbol_length <= 64,
isinstance(self.ean128_status, EAN128Status),
isinstance(self.output_format, Code128OutputFormat),
isinstance(
self.application_record_brackets, ApplicationRecordBrackets),
isinstance(
self.application_record_padding, ApplicationRecordPadding),
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.fixed_symbol_length_status.value,
self.symbol_length,
self.ean128_status.value,
self.output_format.value,
self.application_record_separator_status.value,
self.application_record_separator_character,
self.application_record_brackets.value,
self.application_record_padding.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create Code128 object from string returned by the device
The str_ argument should be the device response to0the <K474?>
command, for example '<K474,1,0,10,1,0,0,,,0,0>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
status,
fixed_symbol_length_status,
symbol_length,
ean128_status,
output_format,
application_record_separator_status,
application_record_separator_character,
application_record_brackets,
application_record_padding
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=Code128Status(status),
fixed_symbol_length_status=FixedSymbolLengthStatus(
fixed_symbol_length_status),
symbol_length=int(symbol_length),
ean128_status=EAN128Status(ean128_status),
output_format=Code128OutputFormat(output_format),
application_record_separator_status=(
ApplicationRecordSeparatorStatus(
application_record_separator_status)),
application_record_separator_character=(
application_record_separator_character),
application_record_brackets=ApplicationRecordBrackets(
application_record_brackets),
application_record_padding=ApplicationRecordPadding(
application_record_padding)
)
# === Interleaved 2 of 5 setting and corresponding enums ===
class Interleaved2Of5Status(Enum):
Disabled = b'0'
Enabled = b'1'
class Interleaved2Of5(KSetting):
"""See page 5-10 of Microscan MS3 manual for reference
"""
K_CODE = b'K472'
# TODO
# === Codabar setting and corresponding enums ===
class CodabarStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class Codabar(KSetting):
"""See page 5-13 of Microscan MS3 manual for reference
"""
K_CODE = b'K471'
# TODO
# === EAN/UPC setting and corresponding enums ===
class UPCStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class EANStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class SupplementalsStatus(Enum):
Disabled = b'0'
Enabled = b'1'
Required = b'2'
class SeparatorStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class UPC_EoutputAsUPC_A(Enum):
Disabled = b'0'
Enabled = b'1'
class UPC_EAN(KSetting):
"""See page 5-16 of Microscan MS3 manual for reference
"""
K_CODE = b'K473'
# K-codes for this setting can be tricky to read because the last five
# characters before the closing ">" are likely to be commas:
# - the second to last sub-setting is unused, i.e. empty
# - the last and third to last sub-settings default to ","
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([0-2])?,([0-1])?,(.)?,,([0-1])?,([0-1])?>$'
% K_CODE)
def __init__(
self,
upc_status=UPCStatus.Disabled,
ean_status=EANStatus.Disabled,
supplementals_status=SupplementalsStatus.Disabled,
separator_status=SeparatorStatus.Disabled,
separator_character=',',
upc_e_output_to_upc_a=UPC_EoutputAsUPC_A.Disabled, # docs wrong
undocumented_field=0):
self.upc_status = upc_status
self.ean_status = ean_status
self.supplementals_status = supplementals_status
self.separator_status = separator_status
self.separator_character = separator_character
self.upc_e_output_to_upc_a = upc_e_output_to_upc_a
self.undocumented_field = undocumented_field
def is_valid(self):
return all([
isinstance(self.upc_status, UPCStatus),
isinstance(self.ean_status, EANStatus),
isinstance(self.supplementals_status, SupplementalsStatus),
isinstance(self.separator_status, SeparatorStatus),
isinstance(self.separator_character, str),
isinstance(self.upc_e_output_to_upc_a, UPC_EoutputAsUPC_A),
])
def to_config_string(self):
return super().to_config_string([
self.upc_status.value,
self.ean_status.value,
self.supplementals_status.value,
self.separator_status.value,
self.separator_character,
None, # accomodates for the "unused" sub-setting
self.upc_e_output_to_upc_a.value,
self.undocumented_field,
])
@classmethod
def from_config_string(cls, str_):
"""Create UPC_EAN object from string returned by the device
The str_ argument should be the device response to the <K473?>
command, for example '<K473,1,0,0,0,,,,>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
(
upc_status, ean_status, supplementals_status, separator_status,
separator_character, upc_e_output_to_upc_a, undocumented_field
) = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
upc_status=UPCStatus(upc_status),
ean_status=EANStatus(ean_status),
supplementals_status=SupplementalsStatus(supplementals_status),
separator_status=SeparatorStatus(separator_status),
separator_character=separator_character,
upc_e_output_to_upc_a=UPC_EoutputAsUPC_A(upc_e_output_to_upc_a),
undocumented_field=int(undocumented_field),
)
# === Code 93 setting and corresponding enums ===
class Code93Status(Enum):
Disabled = b'0'
Enabled = b'1'
class Code93(KSetting):
"""See page 5-19 of Microscan MS3 manual for reference
"""
K_CODE = b'K475'
K_PATTERN = (
b'^<%s,([0-1])?,([0-1])?,([\d]{1,2})?>$' % K_CODE)
def __init__(
self,
status=Code93Status.Disabled,
fixed_symbol_length_status=FixedSymbolLengthStatus.Disabled,
fixed_symbol_length=10,):
self.status = status
self.fixed_symbol_length_status = fixed_symbol_length_status
self.fixed_symbol_length = fixed_symbol_length
def is_valid(self):
return all([
isinstance(self.status, Code93Status),
isinstance(
self.fixed_symbol_length_status, FixedSymbolLengthStatus),
isinstance(self.fixed_symbol_length, int),
self.fixed_symbol_length >= 1,
self.fixed_symbol_length <= 64,
])
def to_config_string(self):
return super().to_config_string([
self.status.value,
self.fixed_symbol_length_status.value,
self.fixed_symbol_length,
])
@classmethod
def from_config_string(cls, str_):
"""Create Code93 object from string returned by the device
The str_ argument should be the device response to the <K475?>
command, for example '<K475,1,0,10>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
status, fsl_status, fsl = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
status=Code93Status(status),
fixed_symbol_length_status=FixedSymbolLengthStatus(fsl_status),
fixed_symbol_length=int(fsl),
)
# === Pharmacode setting and corresponding enums ===
class PharmacodeStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class Pharmacode(KSetting):
"""See page 5-19 of Microscan MS3 manual for reference
"""
K_CODE = b'K475'
# TODO
# === Narrow Margins and Symbology ID setting and corresponding enums ===
class NarrowMarginsStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class SymbologyIDStatus(Enum):
Disabled = b'0'
Enabled = b'1'
class NarrowMarginsAndSymbologyID(KSetting):
"""See page 5-22 of Microscan MS3 manual for reference
"""
K_CODE = b'K450'
K_PATTERN = b'^<%s,([0-1])?,([0-1])?>$' % K_CODE
def __init__(
self,
narrow_margins_status=NarrowMarginsStatus.Disabled,
symbology_id_status=SymbologyIDStatus.Disabled):
self.narrow_margins_status = narrow_margins_status
self.symbology_id_status = symbology_id_status
def is_valid(self):
return all([
isinstance(self.narrow_margins_status, NarrowMarginsStatus),
isinstance(self.symbology_id_status, SymbologyIDStatus)
])
def to_config_string(self):
return super().to_config_string([
self.narrow_margins_status.value,
self.symbology_id_status.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create NarrowMargins object from string returned by the device
The str_ argument should be the device response to the <K450?>
command, for example '<K450,1,0>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
narrow_margins_status, symbology_id_status = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
narrow_margins_status=NarrowMarginsStatus(narrow_margins_status),
symbology_id_status=SymbologyIDStatus(symbology_id_status)
)
# === Background Color setting and corresponding enums ===
class Color(Enum):
White = b'0'
Black = b'1'
class BackgroundColor(KSetting):
"""See page 5-24 of Microscan MS3 manual for reference
"""
K_CODE = b'K451'
K_PATTERN = b'^<%s,([0-1])?>$' % K_CODE
def __init__(self, color=Color.White):
self.color = color
def is_valid(self):
return all([
isinstance(self.color, Color),
])
def to_config_string(self):
return super().to_config_string([
self.color.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create BackgroundColor object from string returned by the device
The str_ argument should be the device response to the <K451?>
command, for example '<K451,1>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
color, = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
color=Color(color),
)
# === Symbol Ratio Mode setting and corresponding enums ===
class SymbolRatio(Enum):
Tight = b'0'
Standard = b'1'
Aggressive = b'2'
class SymbolRatioMode(KSetting):
"""See page 5-25 of Microscan MS3 manual for reference
"""
K_CODE = b'K452'
K_PATTERN = b'^<%s,([0-2])?,([0-2])?,([0-2])?,([0-2])?>$' % K_CODE
def __init__(
self,
code39=SymbolRatio.Standard,
codabar=SymbolRatio.Standard,
interleaved_2_of_5=SymbolRatio.Standard,
code93=SymbolRatio.Standard):
self.code39 = code39
self.codabar = codabar
self.interleaved_2_of_5 = interleaved_2_of_5
self.code93 = code93
def is_valid(self):
return all([
isinstance(self.code39, SymbolRatio),
isinstance(self.codabar, SymbolRatio),
isinstance(self.interleaved_2_of_5, SymbolRatio),
isinstance(self.code93, SymbolRatio),
])
def to_config_string(self):
return super().to_config_string([
self.code39.value,
self.codabar.value,
self.interleaved_2_of_5.value,
self.code93.value,
])
@classmethod
def from_config_string(cls, str_):
"""Create SymbolRatioMode object from string returned by the device
The str_ argument should be the device response to the <K452?>
command, for example '<K452,1,1,1,2>'
"""
match = re.match(cls.K_PATTERN, str_)
try:
code39, codabar, il2of5, code93 = match.groups()
except (ValueError, AttributeError):
raise InvalidConfigString(
'Cannot decode config string %s for K-code %s' %
(str_, cls.K_CODE))
return cls(
code39=SymbolRatio(code39),
codabar=SymbolRatio(codabar),
interleaved_2_of_5=SymbolRatio(il2of5),
code93=SymbolRatio(code93),
)
"""A mapping of K-code to property name and serializer class
For example, maps the K-code 'K100' to the HostPortConnection class which can
be used to serialize or deserialize the device's host port serial protocol
settings (baud rate, parity, ...)
"""
REGISTRY = {cls.K_CODE: cls for cls in [
HostPortConnection,
HostProtocol,
HostRS422Status,
RS232AuxiliaryPort,
Preamble,
Postamble,
LRC,
InterCharacterDelay,
Multisymbol,
Trigger,
ExternalTrigger,
SerialTrigger,
StartTriggerCharacter,
StopTriggerCharacter,
EndReadCycle,
DecodesBeforeOutput,
ScanSpeed,
ScannerSetup,
SymbolDetect,
MaximumElement,
ScanWidthEnhance,
LaserSetup,
Code39,
Code128,
# Interleaved2Of5,
# Codabar,
UPC_EAN,
Code93,
# Pharmacode,
NarrowMarginsAndSymbologyID,
BackgroundColor,
SymbolRatioMode,
]}
class MicroscanConfiguration:
"""Container for configuration settings for a barcode reader device
Calling the constructor will initialize a configuration object with all
available configuration settings, each set to the default value as
specified by the device documentation.
Use MicroscanConfiguration.from_config_strings() to create a configuration
object from data recorded from a device in response to the `<K?>` command
(or any other source of configuration data in string format).
"""
_K_CODE_PATTERN = re.compile(b'<(K\d+)(.*)>')
def __init__(self):
self.load_defaults()
def load_defaults(self):
"""Loads documented default settings into the configuration object
If called after otherwise setting configuration settings, these will be
overwritten with defaults by this method.
"""
for k_code, serializer in REGISTRY.items():
prop_name = self._clsname_to_propname(serializer.__name__)
setattr(self, prop_name, serializer())
@classmethod
def from_config_strings(cls, list_of_strings, defaults=False):
"""Create configuration object from a list of configuration strings
Expects a list of byte strings, each representing a configuration
setting as <K...> string.
Set the `defaults` argument `True` to additionally load default
settings for all available settings. This is useful, when your list of
configuration strings does not cover the full list of available
settings.
"""
# initialize instance with defaults
instance = cls()
if defaults:
instance.load_defaults()
for line in list_of_strings:
match = cls._K_CODE_PATTERN.match(line)
try:
k_code = match.group(1)
except (IndexError, AttributeError):
# line did not start with K-code
continue
try:
serializer = REGISTRY[k_code]
prop_name = instance._clsname_to_propname(serializer.__name__)
setattr(
instance, prop_name, serializer.from_config_string(line))
except KeyError:
logger.info(
'Cannot find serializer class for K-code %s$' % k_code)
return instance
def _clsname_to_propname(self, clsname):
"""camelCase-to-under_score string conversion
Used to convert serializer class names to property names when
dynamically setting or getting configuration properties.
"""
return re.sub(r'([a-z])([A-Z])', r'\1_\2', clsname).lower()
def to_config_string(self, separator=b''):
"""Serialized the object into a single string for sending to device
Use the `separator` argument to specify any bytes that should appear
between individual configuration settings. To output one setting per
line, for example, specify `separator=b'\\n'`
"""
props = [
getattr(self, self._clsname_to_propname(prop.__name__), None)
for prop in REGISTRY.values()
]
return separator.join([
prop.to_config_string() for prop in props if prop])
``` |
{
"source": "JoneNaZi/Interfaceplatform",
"score": 2
} |
#### File: Interfaceplatform/app/__init__.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from config import lod
from apscheduler.schedulers.background import BackgroundScheduler, BlockingScheduler
from config import jobstores, executors
from flask_admin import Admin
from flask_moment import Moment
from flask_restplus import Api, reqparse
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
pagination_arguments = reqparse.RequestParser()
pagination_arguments.add_argument('page', type=int, required=False, default=1, help='Page number')
api = Api(version='1.0', title='系统api',
description='系统对外api', doc='/api', license_url="/api")
app = Flask(__name__)
conf = lod()
api.init_app(app)
loginManager = LoginManager(app)
app.config.from_object(conf)
bootstrap = Bootstrap(app)
loginManager.session_protection = "strong"
loginManager.login_view = 'home.login'
loginManager.login_message = u'FXTest测试平台必须登录,请登录您的FXTest平台账号!'
db = SQLAlchemy(app)
moment = Moment(app)
admin = Admin(app, name=u'FXTest系统管理后台')
from app import views, models, url, apiadmin
def listerner(event):
if event.exception:
print('任务出错了!')
else:
print('任务正常运行中...')
sched = BackgroundScheduler(jobstores=jobstores, executors=executors)
sched.add_listener(listerner, EVENT_JOB_ERROR | EVENT_JOB_EXECUTED)
try:
sched.start()
except Exception as e:
print(e)
```
#### File: app/mock/views.py
```python
from flask import Blueprint
from flask import redirect, request, \
render_template, url_for, flash
from app.models import *
from flask.views import MethodView
from flask_login import current_user, login_required
from app import loginManager
from common.mockservermeth import get_token_data
from error_message import MessageEnum
from common.systemlog import logger
@loginManager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
mock = Blueprint('mock', __name__)
class EditMockServerView(MethodView): # 编辑mock服务
@login_required
def get(self, id):
mock = Mockserver.query.filter_by(id=id, status=False).first()
if not mock:
flash(MessageEnum.use_select_edit.value[1])
return redirect(url_for('home.mockserver'))
return render_template('edit/editmock.html', mock=mock)
def post(self, id):
mock = Mockserver.query.filter_by(id=id, status=False).first()
if not mock:
flash(MessageEnum.mock_check_again.value[1])
return redirect(url_for('home.mockserver'))
name = request.form['name']
desc = request.form['desc']
path = request.form['path']
methods = request.form['meth']
types = request.form['type']
headers = request.form['headers']
parm = request.form['parm']
back = request.form['back']
is_check = request.form['checkout']
is_headers = request.form['checkouheaders']
run_is = request.form['kaiqi']
if is_check == u'是':
is_check = True
else:
is_check = False
if is_headers == u'是':
is_headers = True
else:
is_headers = False
if run_is == u'是':
is_start = True
else:
is_start = False
mock.make_uers = current_user.id
mock.path = path
mock.methods = methods
mock.headers = headers
mock.description = desc
mock.fanhui = back
mock.name = name
mock.params = parm
mock.rebacktype = types
mock.status = is_start
mock.ischeck = is_check
mock.is_headers = is_headers
mock.update_time = datetime.datetime.now()
try:
db.session.commit()
flash(MessageEnum.successs.value[1])
return redirect(url_for('home.mockserver'))
except Exception as e:
logger.exception(e)
db.session.rollback()
flash(MessageEnum.mock_edit_fail.value[1])
return render_template('edit/editmock.html', mock=mock)
class MakeMockserverView(MethodView): # 做一个mock服务
def get(self, path): # get请求方法
data = get_token_data(path)
return data
def post(self, path): # post请求方法
data = get_token_data(path)
return data
def put(self, path): # put请求方法
data = get_token_data(path)
return data
def delete(self, path): # delete请求方法
data = get_token_data(path)
return data
class StartMockView(MethodView): # 开启mock服务
@login_required
def get(self, id):
next = request.headers.get('Referer')
start = Mockserver.query.filter_by(id=id, status=False).first()
if start:
start.status = True
try:
db.session.commit()
flash(MessageEnum.mock_start_success.value[1])
return redirect(next or url_for('home.mockserver'))
except Exception as e:
logger.exception(e)
flash(MessageEnum.mock_server_start_fail.value[1])
return redirect(next or url_for('home.mockserver'))
flash(MessageEnum.mock_start_error.value[1])
return redirect(next or url_for('mockserver'))
class CloseMockView(MethodView): # 关闭mock服务
@login_required
def get(self, id):
next = request.headers.get('Referer')
start = Mockserver.query.filter_by(id=id).first()
if start:
start.status = False
try:
db.session.commit()
flash(MessageEnum.mock_close_success.value[1])
return redirect(next or url_for('home.mockserver'))
except Exception as e:
logger.exception(e)
flash(MessageEnum.mock_server_close_fail.value[1])
return redirect(next or url_for('home.mockserver'))
flash(MessageEnum.mock_stop_fail.value[1])
return redirect(next or url_for('mockserver'))
```
#### File: app/users/views.py
```python
from flask import Blueprint
user = Blueprint('user', __name__)
from flask import redirect, request, \
session, url_for, flash
from app.models import *
from flask.views import View, MethodView
from common.decorators import chckuserpermisson
from flask_login import login_required
from config import OneAdminCount
from error_message import MessageEnum
from flask_mail import Message, Mail
from common.jsontools import reponse
from common.systemlog import logger
class SetAdminView(View): # 设置管理员
methods = ['GET', "POST"]
@login_required
def dispatch_request(self):
if chckuserpermisson() is False:
return reponse(
code= MessageEnum.permiss_is_ness.value[0],message= MessageEnum.permiss_is_ness.value[1], data='')
projec = request.get_json()
try:
username = projec['username']
por = projec['url']
if por == '':
return reponse(
code= MessageEnum.select_project_not.value[0], message= MessageEnum.select_project_not.value[1],
data= '')
pan_user = User.query.filter_by(username=username).first()
if not pan_user:
return reponse(code= MessageEnum.login_user_not_exict_message.value[0],
message= MessageEnum.login_user_not_exict_message.value[1], data= '')
if pan_user.is_sper is True:
return reponse(code= MessageEnum.super_admin_not_set_project.value[0],
message= MessageEnum.super_admin_not_set_project.value[1], data= '')
pand_por = Project.query.filter_by(project_name=por).first()
if not pand_por:
return reponse(code= MessageEnum.set_project_bot_exict.value[0],
message= MessageEnum.set_project_bot_exict.value[1], data= '')
pro_per = Quanxian.query.filter_by(project=pand_por.id).all()
oneadmin = []
for i in pro_per:
if i.rose == 2:
oneadmin.append(i.user.all())
if [pan_user] in oneadmin:
return reponse(code= MessageEnum.set_is_admin.value[0], message= MessageEnum.set_is_admin.value[1])
if (len(oneadmin)) > OneAdminCount:
return reponse(
code= MessageEnum.project_admin_many.value[0], message= MessageEnum.project_admin_many.value[1])
for roses in pan_user.quanxians:
if roses.project == pand_por.id:
roses.rose = 2
try:
db.session.commit()
return reponse(code= MessageEnum.successs.value[0], message= MessageEnum.successs.value[1])
except Exception as e:
logger.exception(e)
db.session.rollback()
return reponse(
code= MessageEnum.set_fail.value[0], message= MessageEnum.set_fail.value[1], data= '')
except Exception as e:
logger.exception(e)
return reponse(code= MessageEnum.set_project_admin_exception.value[0],
message= MessageEnum.set_project_admin_exception.value[1] + '原因是:%s' % e, data= '')
class CancelAdminView(View): # 取消管理员
methods = ['GET', "POST"]
@login_required
def dispatch_request(self, id):
if chckuserpermisson() is False:
flash(MessageEnum.permiss_is_ness.value[1])
return redirect(request.headers.get('Referer'))
new_ad = User.query.filter_by(id=id, status=False).first()
if not new_ad:
flash(MessageEnum.login_user_not_exict_message.value[1])
return redirect(url_for('home.adminuser'))
if new_ad == user:
flash(MessageEnum.admin_cannot_use.value[1])
return redirect(url_for('home.adminuser'))
return redirect(url_for('home.adminuser'))
class FreezeUserView(View): # 冻结
methods = ['GET', "POST"]
@login_required
def dispatch_request(self, id):
if chckuserpermisson() is False:
flash(MessageEnum.permiss_is_ness.value[1])
return redirect(request.headers.get('Referer'))
user = User.query.filter_by(username=session.get('username')).first()
if user.is_sper != 1:
flash(MessageEnum.permiss_is_ness.value[1])
return redirect(request.headers.get('Referer'))
new_ad = User.query.filter_by(id=id).first()
if new_ad.status is True:
flash(MessageEnum.free_is_again.value[1])
return redirect(url_for('home.adminuser'))
if new_ad == user:
flash(MessageEnum.ower_cannot_free_me.value[1])
return redirect(url_for('home.adminuser'))
new_ad.status = True
try:
db.session.commit()
flash(MessageEnum.free_is_success.value[1])
return redirect(url_for('home.adminuser'))
except Exception as e:
logger.exception(e)
db.session.rollback()
flash(MessageEnum.free_user_error.value[1])
return redirect(url_for('home.adminuser'))
class UnFreezeUserView(View): # 解冻
methods = ['GET', "POST"]
@login_required
def dispatch_request(self, id):
if chckuserpermisson() == False:
flash(MessageEnum.permiss_is_ness.value[1])
return redirect(request.headers.get('Referer'))
user = User.query.filter_by(username=session.get('username')).first()
new_ad = User.query.filter_by(id=id).first()
if new_ad.status is False:
flash(MessageEnum.user_is_not_free.value[1])
return redirect(url_for('home.adminuser'))
if new_ad != user:
new_ad.status = False
try:
db.session.commit()
flash(MessageEnum.user_is_un_free.value[1])
return redirect(url_for('home.adminuser'))
except Exception as e:
print(e)
db.session.rollback()
flash(MessageEnum.user_is_unfree_success.value[1])
return redirect(url_for('home.adminuser'))
flash(MessageEnum.ower_not_free_me.value[1])
return redirect(url_for('home.adminuser'))
class ActivationUserview(View):
methods = ['GET', "POST"]
@login_required
def dispatch_request(self):
if chckuserpermisson() is False:
return reponse(
code= MessageEnum.permiss_is_ness.value[0], message= MessageEnum.permiss_is_ness.value[1], data= '')
userjobnum = request.get_json()
try:
id = int(userjobnum['id'])
job_num = int(userjobnum['jobnum'])
except Exception as e:
logger.exception(e)
return reponse(code= MessageEnum.activ_is_int.value[0], message= MessageEnum.activ_is_int.value[1])
user = User.query.filter_by(id=id, status=False).first()
if not user:
return reponse(code= MessageEnum.login_user_not_exict_message.value[0],
message= MessageEnum.login_user_not_exict_message.value[1])
try:
user_job = User.query.filter_by(jobnum=job_num).first()
if user_job:
return reponse(
code= MessageEnum.activi_user_jobnum.value[0], message= MessageEnum.activi_user_jobnum.value[1])
except Exception as e:
logger.exception(e)
pass
if (user.jobnum is None or user.jobnum == "None"):
user.jobnum = job_num
db.session.add(user)
db.session.commit()
return reponse(code= MessageEnum.successs.value[0], message= MessageEnum.successs.value[1], data= '')
return reponse(code= MessageEnum.activi_user_jobnum_is.value[0],
message= MessageEnum.activi_user_jobnum_is.value[1])
class ResetPasswordView(View): # 重置密码
methods = ['GET', "POST"]
@login_required
def dispatch_request(self, id):
if chckuserpermisson() is False:
flash(MessageEnum.permiss_is_ness.value[1])
return redirect(request.headers.get('Referer'))
user = User.query.filter_by(username=session.get('username')).first()
new_ad = User.query.filter_by(id=id).first()
if new_ad != user:
if user.is_sper == 1:
new_ad.set_password('<PASSWORD>')
try:
db.session.commit()
msg = Message(u"密码修改通知", sender=user.email, recipients=user.email)
msg.body = u"密码修改成功, 你的用户名:%s,你的密码是:%s" % (user.username, "<PASSWORD>")
msg.html = '<a href="http://127.0.0.1:5000/login">去登录</a>'
mail = Mail()
mail.send(msg)
flash(MessageEnum.reset_success_message.value[1])
return redirect(url_for('home.adminuser'))
except Exception as e:
logger.exception(e)
db.session.rollback()
flash(MessageEnum.user_reset_error.value[1])
return redirect(url_for('home.adminuser'))
flash(MessageEnum.user_reset_isnot_amin.value[1])
return redirect(url_for('home.adminuser'))
flash(MessageEnum.user_reset_owner.value[1])
return redirect(url_for('home.adminuser'))
class ChangePassword(MethodView):
@login_required
def post(self):
password = request.data.decode('utf-8')
user = User.query.filter_by(username=session.get('username')).first()
user.set_password(password)
try:
db.session.commit()
return reponse(code= MessageEnum.change_password_success.value[0],
data= MessageEnum.change_password_success.value[1])
except Exception as e:
logger.exception(e)
db.session.rollback()
return reponse(code= MessageEnum.change_password_error.value[0],
data= MessageEnum.change_password_error.value[1])
```
#### File: Interfaceplatform/common/jsontools.py
```python
from flask import jsonify
from typing import Union
def reponse(*, code=1,data: Union[list, dict, str]=None, message="message"):
return jsonify({
'code': code,
'message': message,
'data': data,
}
)
```
#### File: Interfaceplatform/common/merge.py
```python
def hebingDict(dict_list: dict):
dictMerged = {}
for item in dict_list:
try:
dictMerged.update(eval(item))
except Exception as e:
print(e)
return dictMerged
```
#### File: Interfaceplatform/common/mockservermeth.py
```python
from flask import request, abort, jsonify, make_response
from app.models import *
from common.packagedict import comp_dict, dict_par
import json
from error_message import MessageEnum
from common.jsontools import reponse as jsonreponse
from common.systemlog import logger
def get_token_data(path):
huoqupath = Mockserver.query.filter_by(path=path, status=True).first()
if not huoqupath:
abort(404)
heders = request.headers
method = request.method
if method.lower() != huoqupath.methods:
return jsonreponse(code=MessageEnum.request_method.value[0],
message=MessageEnum.request_method.value[1])
try:
token = heders['token']
if token == 'Fetext_token_system':
paerm = request.values.to_dict()
if dict_par(paerm, huoqupath.params) == True:
if huoqupath.rebacktype == 'json':
try:
json_fan = json.dumps(huoqupath.fanhui)
return jsonreponse(code=MessageEnum.successs.value[0],
message=MessageEnum.successs.value[1],
data=json_fan)
except Exception as e:
logger.exception(e)
return jsonreponse(code=MessageEnum.resquest_return_not_json.value[0],
message=MessageEnum.resquest_return_not_json.value[1])
else:
return jsonreponse(code=MessageEnum.request_method_not_supprot.value[0],
message=MessageEnum.request_method_not_supprot.value[1])
else:
return jsonreponse(code=MessageEnum.method_parame_not_right.value[0],
message=MessageEnum.method_parame_not_right.value[1])
except Exception as e:
logger.exception(e)
if huoqupath.is_headers == True:
if comp_dict(heders, huoqupath.headers) == True:
if huoqupath.ischeck == True:
paerm = request.values.to_dict()
if dict_par(paerm, huoqupath.params) == True:
if huoqupath.rebacktype == 'json':
try:
json_return = json.dumps(huoqupath.fanhui)
return jsonreponse(code=MessageEnum.successs.value[0],
message=MessageEnum.successs.value[1],
data=json_return)
except Exception as e:
logger.exception(e)
return jsonreponse(code=MessageEnum.resquest_return_not_json.value[0],
message=MessageEnum.resquest_return_not_json.value[1])
elif huoqupath.rebacktype == 'xml':
response = make_response(huoqupath.fanhui)
response.content_type = 'application/xml'
return response
else:
return jsonreponse(code=MessageEnum.request_method_not_supprot.value[0],
message=MessageEnum.request_method_not_supprot.value[1])
else:
return jsonreponse(code=MessageEnum.method_parame_not_right.value[0],
message=MessageEnum.method_parame_not_right.value[1])
else:
if huoqupath.rebacktype == 'json':
try:
json_return = json.dumps(huoqupath.fanhui)
return jsonreponse(code=MessageEnum.successs.value[0],
message=MessageEnum.successs.value[1],
data=json_return)
except Exception as e:
logger.exception(e)
return jsonreponse(code=MessageEnum.resquest_return_not_json.value[0],
message=MessageEnum.resquest_return_not_json.value[1])
elif huoqupath.rebacktype == 'xml':
response = make_response(huoqupath.fanhui)
response.content_type = 'application/xml'
return response
return jsonreponse(code=MessageEnum.request_method_not_supprot.value[0],
message=MessageEnum.request_method_not_supprot.value[1])
return jsonreponse(code=MessageEnum.request_scre.value[0],
message=MessageEnum.request_scre.value[1])
else:
if huoqupath.ischeck == True:
paerm = request.values.to_dict()
if dict_par(paerm, huoqupath.params) == True:
if huoqupath.rebacktype == 'json':
try:
json_return = json.dumps(huoqupath.fanhui)
return jsonreponse(code=MessageEnum.successs.value[0],
message=MessageEnum.successs.value[1],
data=json_return)
except Exception as e:
logger.exception(e)
return jsonreponse(code=MessageEnum.resquest_return_not_json.value[0],
message=MessageEnum.resquest_return_not_json.value[1])
elif huoqupath.rebacktype == 'xml':
response = make_response(huoqupath.fanhui)
response.content_type = 'application/xml'
return response
else:
return jsonreponse(code=MessageEnum.request_method_not_supprot.value[0],
message=MessageEnum.request_method_not_supprot.value[1])
return jsonreponse(code=MessageEnum.method_parame_not_right.value[0],
message=MessageEnum.method_parame_not_right.value[1])
if huoqupath.rebacktype == 'json':
try:
json_return = json.dumps(huoqupath.fanhui)
return jsonreponse(code=MessageEnum.successs.value[0],
message=MessageEnum.successs.value[1],
data=json_return)
except Exception as e:
logger.exception(e)
return jsonreponse(code=MessageEnum.resquest_return_not_json.value[0],
message=MessageEnum.resquest_return_not_json.value[1])
elif huoqupath.rebacktype == 'xml':
response = make_response(huoqupath.fanhui)
response.content_type = 'application/xml'
return response
else:
return jsonreponse(code=MessageEnum.request_method_not_supprot.value[0],
message=MessageEnum.request_method_not_supprot.value[1])
```
#### File: Interfaceplatform/common/opearexcel.py
```python
import xlwt
from xlwt import *
def yangshi1():
style = XFStyle()
fnt = Font()
fnt.name = u'微软雅黑'
fnt.bold = True
style.font = fnt
alignment = xlwt.Alignment()
alignment.horz = xlwt.Alignment.HORZ_CENTER
alignment.vert = xlwt.Alignment.VERT_CENTER
style.alignment = alignment # 给样式添加文字居中属性
style.font.height = 350 # 设置字体大小
return style
def yangshi2():
style1 = XFStyle()
alignment = xlwt.Alignment()
alignment.horz = xlwt.Alignment.HORZ_CENTER
alignment.vert = xlwt.Alignment.VERT_CENTER
style1.alignment = alignment # 给样式添加文字居中属性
style1.font.height = 330 # 设置字体大小
return style1
def create_interface(filename: str, interfacelist: list):
try:
file = Workbook(filename)
table = file.add_sheet('接口', cell_overwrite_ok=True)
for i in range(1, 9):
table.col(i).width = 300 * 25
style = yangshi1()
table.write(0, 0, '编号', style=style)
table.write(0, 1, '项目名字', style=style)
table.write(0, 2, '模块名字', style=style)
table.write(0, 3, '接口名字', style=style)
table.write(0, 4, '接口url', style=style)
table.write(0, 5, '接口协议', style=style)
table.write(0, 6, '请求头', style=style)
table.write(0, 7, '请求方式', style=style)
# table.write(0, 8, '请求示例', style=style)
# table.write(0, 9, '请求返回示例', style=style)
table.write(0, 8, '添加人', style=style)
stylen = yangshi2()
for i in range(len(interfacelist)):
table.write(i + 1, 0, str(interfacelist[i].id), style=stylen)
table.write(i + 1, 1, str(interfacelist[i].projects), style=stylen)
table.write(i + 1, 2, str(interfacelist[i].models), style=stylen)
table.write(i + 1, 3, interfacelist[i].Interface_name, style=stylen)
table.write(i + 1, 4, interfacelist[i].Interface_url, style=stylen)
table.write(i + 1, 5, interfacelist[i].interfacetype, style=stylen)
table.write(i + 1, 6, interfacelist[i].Interface_headers, style=stylen)
table.write(i + 1, 7, interfacelist[i].Interface_meth, style=stylen)
# table.write(i + 1, 8, interfacelist[i].Interface_par, style=stylen)
# table.write(i + 1, 9, interfacelist[i].Interface_back, style=stylen)
table.write(i + 1, 8, str(interfacelist[i].users), style=stylen)
i += 1
file.save(filename)
return {'code': 0, 'message': filename}
except Exception as e:
return {'code': 1, 'error': e}
def create_interface_case(filename: str, caselist: list):
try:
file = Workbook(filename)
table = file.add_sheet('接口测试用例', cell_overwrite_ok=True)
style = yangshi1()
for i in range(1, 17):
table.col(i).width = 300 * 30
table.write(0, 0, '编号', style=style)
table.write(0, 1, '项目名字', style=style)
table.write(0, 2, '模块名字', style=style)
table.write(0, 3, '接口名字', style=style)
table.write(0, 4, '接口url', style=style)
table.write(0, 5, '接口协议', style=style)
table.write(0, 6, '请求头', style=style)
table.write(0, 7, '请求方式', style=style)
table.write(0, 8, '参数', style=style)
table.write(0, 9, '断言', style=style)
table.write(0, 10, '是否保存测试结果', style=style)
table.write(0, 11, '是否依赖', style=style)
table.write(0, 12, '依赖接口', style=style)
table.write(0, 13, '依赖接口的参数', style=style)
table.write(0, 14, '是否查询数据库', style=style)
table.write(0, 15, '数据库查询语句', style=style)
table.write(0, 16, '数据库比较字段', style=style)
table.write(0, 17, '添加人', style=style)
stylen = yangshi2()
for i in range(len(caselist)):
if caselist[i].pid is None:
shifao = '否'
else:
shifao = '是'
if caselist[i].pid is None:
shi_pid = ''
else:
shi_pid = caselist[i].pid
table.write(i + 1, 0, caselist[i].id, style=stylen)
table.write(i + 1, 1, str(caselist[i].projects), style=stylen)
table.write(i + 1, 2, str(caselist[i].models), style=stylen)
table.write(i + 1, 3, caselist[i].Interface_name, style=stylen)
table.write(i + 1, 4, caselist[i].Interface_url, style=stylen)
table.write(i + 1, 5, caselist[i].interface_type, style=stylen)
table.write(i + 1, 6, caselist[i].Interface_headers, style=stylen)
table.write(i + 1, 7, caselist[i].Interface_meth, style=stylen)
table.write(i + 1, 8, caselist[i].Interface_pase, style=stylen)
table.write(i + 1, 9, caselist[i].Interface_assert, style=stylen)
table.write(i + 1, 10, caselist[i].saveresult, style=stylen)
table.write(i + 1, 11, shifao, style=stylen)
table.write(i + 1, 12, shi_pid, style=stylen)
table.write(i + 1, 13, caselist[i].getattr_p, style=stylen)
table.write(i + 1, 14, caselist[i].is_database, style=stylen)
table.write(i + 1, 15, caselist[i].chaxunshujuku, style=stylen)
table.write(i + 1, 16, caselist[i].databaseziduan, style=stylen)
table.write(i + 1, 17, str(caselist[i].users), style=stylen)
i += 1
file.save(filename)
return {'code': 0, 'message': filename}
except Exception as e:
return {'code': 1, 'error': e}
```
#### File: Interfaceplatform/common/packagedict.py
```python
def comp_dict(dict1, dict2):
try:
for k, v in dict1.items():
for k2, v2 in dict2.items():
if k == k2 and v == v2:
return True
else:
return False
except:
return False
'''
断言封装,断言切割根据&切割
'''
def assert_in(asserqiwang, fanhuijson):
if len(asserqiwang.split('=')) > 1:
try:
data = asserqiwang.split('&')
result = dict([(item.split('=')) for item in data])
value1 = ([(str(fanhuijson[key])) for key in result.keys()])
value2 = ([(str(value)) for value in result.values()])
if value1 == value2:
return 'pass'
else:
return 'fail'
except Exception as e:
return '异常!原因:%s' % e
else:
return '预期不存在'
def dict_par(doct1, dict2):
h = []
l = []
for k, v in doct1.items():
h.append(k)
for key, value in dict2.items():
l.append(key)
if h == l:
return True
else:
return False
``` |
{
"source": "jonepatr/affordance-master-thesis",
"score": 3
} |
#### File: affordance-master-thesis/affordance/arduino.py
```python
import threading
import serial
import time
import distutils.util
import math
from numpy import interp
import statistics
import config
class Arduino(threading.Thread):
"""docstring for Arduino"""
daemon = True
previously_sent = None
actioations_per_second = 15
time_between_ems = 30
def __init__(self):
super(Arduino, self).__init__()
self.channels = {
'ems1': {
'min_max': [20, 100],
'type': 'digipot',
'prefix': 1000,
'last_value': 0,
'ems_on_off': False,
'name': 'A1',
'color': 'green',
'serial_open': 'a',
'serial_close': 'b'
},
'ems2': {
'min_max': [20, 100],
'type': 'digipot',
'prefix': 2000,
'last_value': 0,
'ems_on_off': False,
'name': 'B1',
'color': 'red',
'serial_open': 'f',
'serial_close': 'g'
},
'ems3': {
'min_max': [20, 100],
'type': 'digipot',
'prefix': 3000,
'last_value': 0,
'ems_on_off': False,
'name': 'A2',
'color': 'blue',
'serial_open': 'c',
'serial_close': 'e'
},
'ems4': {
'min_max': [20, 100],
'type': 'digipot',
'prefix': 4000,
'last_value': 0,
'ems_on_off': False,
'name': 'B2',
'color': 'blue',
'serial_open': 'h',
'serial_close': 'i'
}
}
# 'ems3': {
# 'min_max': [20, 100],
# 'type': 'digipot',
# 'prefix': 3000,
# 'last_value': 0,
# 'ems_on_off': False,
# 'name': 'EMS3',
# 'color': 'violet',
# 'serial_open': 'b',
# 'serial_close': 'n'
# },
#'ems3': {'min_max': [20, 100], 'type': 'digipot', 'prefix': 3000, 'last_value': 0, 'ems_on_off': False, 'name': 'EMS3', 'color': 'orange'}
#'relay1': {'type': 'relay', 'state': False, 'serial_open': 'o', 'serial_close': 'c'}
self.subscribers = []
self.stop = True
self.last_sent_ems = 0
self.list_with_ems_strength = {}
self.stop_gesture = False
self.study_no_ems = False
self.arduino_value_callback = None
try:
self.ser = serial.Serial(port=config.EMS_SERIAL, baudrate=19200, timeout=0.05, writeTimeout=0)
self.no_serial = False
except:
self.no_serial = True
try:
self.ser_capacitive = serial.Serial(port=config.CAPACITIVE_SERIAL, baudrate=19200, timeout=0, writeTimeout=0)
self.no_serial_cap = False
except:
self.no_serial_cap = True
print("failed getting cap arduino...")
def stop_all(self):
self.send_value("s")
def open_all_channels(self):
for channel in self.channels.keys():
self.change_relay_state(channel, True)
def close_all_channels(self):
for channel in self.channels.keys():
self.change_relay_state(channel, False)
def perform_gesture(self, gesture, duration, ignore_channels=False):
#self.stop_gesture = False
sampled_gestures = []
for ges, val in gesture.items():
new_value = val[::int(math.ceil(len(val)/self.actioations_per_second/(duration/1000)))]
sampled_gestures.append([new_value, ges])
samples = dict()
channels = {}
for index, sampled_gesture in enumerate(sampled_gestures):
for idx, cord in enumerate(sampled_gesture[0]):
if not idx in samples:
samples[idx] = []
channels[sampled_gesture[1]] = True
samples[idx].append([int(interp(cord, [0, 100], self.channels[sampled_gesture[1]]['min_max'])), sampled_gesture[1]])
samples[idx].append([int(cord), sampled_gesture[1]])
for channel in channels:
self.change_relay_state(channel, True)
for index, val in samples.items():
final_list = {}
for thing in val:
final_list[thing[1]] = thing[0]
if not self.stop_gesture:
self.send_ems_strength(final_list)
time.sleep(1/self.actioations_per_second)
else:
break
if not ignore_channels:
stop_ems = {}
for channel in self.channels.keys():
stop_ems[channel] = 0
self.send_ems_strength(stop_ems, True)
for channel in channels:
self.change_relay_state(channel, False)
self.stop_all()
def change_relay_state(self, channel, state):
if state:
self.send_value(self.channels[channel]['serial_open'])
else:
self.send_value(self.channels[channel]['serial_close'])
self.channels[channel]['state'] = state
def calibration(self, message):
if message[1] == "reset":
self.send_value("r")
if message[1] == "ems_min_max":
if message[2] in self.channels:
self.channels[message[2]]['min_max'] = [int(message[3]), int(message[4])]
if message[1] == "ems_on_off":
self.change_relay_state(message[2], distutils.util.strtobool(message[3]))
if message[1] == "ems_value":
if message[3] and message[3].isdigit() and int(message[3]) >= 0 and int(message[3]) <= 100:
self.send_ems_strength({message[2]: int(message[3])})
else:
raise ValueError
if message[1] == "relay":
self.change_relay_state(message[2], distutils.util.strtobool(message[3]))
def send_ems_strength(self, values, force=False):
final_list = []
too_short = False
if time.time() - self.last_sent_ems < self.time_between_ems/1000 and force is not True:
too_short = True
for channel, val in sorted(values.items()):
if channel in self.channels:
new_val = int(val)
if new_val < self.channels[channel]['min_max'][0] and new_val < self.channels[channel]['min_max'][1]:
new_val = self.channels[channel]['min_max'][0]
if new_val > self.channels[channel]['min_max'][1] and new_val > self.channels[channel]['min_max'][0]:
new_val = self.channels[channel]['min_max'][1]
if not channel in self.list_with_ems_strength:
self.list_with_ems_strength[channel] = []
self.list_with_ems_strength[channel].append(int(new_val))
if not too_short:
final_list.append(str(self.channels[channel]['prefix'] + round(100 - statistics.mean(self.list_with_ems_strength[channel]))))
#final_list.append(str((self.channels[channel]['prefix']) + int(interp(val, [0,100], self.channels[channel]['min_max'][::-1]))))
else:
raise IndexError
if not too_short:
#print(final_list)
self.send_value("$" + "%$".join(final_list) + "%")
self.list_with_ems_strength = {}
self.last_sent_ems = time.time()
def send_value(self, value):
if value != self.previously_sent and not self.no_serial and not self.study_no_ems:
self.ser.write(bytes(value, "UTF-8"))
self.previously_sent = value
print(value)
def subscribe(self, callback):
self.subscribers.append(callback)
def run(self):
"""docstring for run"""
while True:
if not self.no_serial:
#print(self.ser.readline(1))
data = self.ser.readline(1024)
if data:
if self.arduino_value_callback != None:
self.arduino_value_callback(data.decode("utf-8").replace('\n', '').replace('\r', ''))
if not self.no_serial_cap:
data = self.ser_capacitive.readline(1)
if data and data != bytes("\n", "utf-8") and data != bytes("\r", "utf-8") and not self.stop:
for subscriber in self.subscribers:
subscriber(data.decode("utf-8").replace('\n', '').replace('\r', ''))
time.sleep(0.01)
```
#### File: affordance-master-thesis/tests/test_arduino.py
```python
import sys
import unittest
from unittest.mock import patch, call, MagicMock
from lib.arduino import Arduino
from nose_focus import focus
class test_arduino(unittest.TestCase):
"""docstring for test_arduino"""
def test_init(self):
def serial_return():
raise SerialException
with patch.multiple('lib.arduino', serial=serial_return):
ard = Arduino()
self.assertEqual(ard.no_serial, True)
with patch.multiple('lib.arduino', serial=MagicMock()):
ard = Arduino()
self.assertEqual(ard.no_serial, False)
@patch.multiple('lib.arduino.Arduino', change_relay_state=MagicMock())
def test_open_all_channels(self):
"""docstring for test_open_all_channels"""
ard = Arduino()
ard.channels = {
'ems1': {'min_max': [0, 80], 'type': 'digipot', 'prefix': 1000, 'last_value': 0, 'ems_on_off': False},
'ems2': {'min_max': [0, 80], 'type': 'digipot', 'prefix': 2000, 'last_value': 0, 'ems_on_off': False}
}
ard.open_all_channels()
calls = [call("ems1", True), call("ems2", True)]
ard.change_relay_state.assert_has_calls(calls, True)
@patch.multiple('lib.arduino.Arduino', send_value=MagicMock())
def test_stop_all(self):
ard = Arduino()
ard.stop_all()
ard.send_value.assert_called_with("s")
@patch.multiple('lib.arduino.Arduino', send_value=MagicMock())
def test_change_relay_state(self):
ard = Arduino()
ard.channels = {
'relay1': {'type': 'relay', 'state': False, 'serial_open': 'o', 'serial_close': 'c'},
'relay2': {'type': 'relay', 'state': False, 'serial_open': 'k', 'serial_close': 'l'}
}
ard.change_relay_state("relay1", True)
self.assertEqual(ard.channels['relay1']['state'], True)
ard.send_value.assert_called_with("o")
ard.change_relay_state("relay2", True)
self.assertEqual(ard.channels['relay2']['state'], True)
ard.send_value.assert_called_with("k")
ard.change_relay_state("relay1", False)
self.assertEqual(ard.channels['relay1']['state'], False)
ard.send_value.assert_called_with("c")
ard.change_relay_state("relay2", 0)
self.assertEqual(ard.channels['relay2']['state'], False)
ard.send_value.assert_called_with("l")
@patch.multiple('lib.arduino.Arduino', send_value=MagicMock(), send_ems_strength=MagicMock(), change_relay_state=MagicMock())
def test_calibration(self):
"""docstring for test_calibration"""
ard = Arduino()
ard.channels = {
'ems1': {'min_max': [0, 80], 'type': 'digipot', 'prefix': 1000, 'last_value': 0, 'ems_on_off': False},
'ems2': {'min_max': [0, 80], 'type': 'digipot', 'prefix': 2000, 'last_value': 0, 'ems_on_off': False}
}
def test_calibration_reset():
message = ["calibrate", "reset"]
ard.calibration(message)
ard.send_value.assert_called_with("r")
def test_calibration_set_min_max():
self.assertEqual(ard.channels['ems1']['min_max'],[0,80])
self.assertEqual(ard.channels['ems2']['min_max'],[0,80])
message = ["calibrate", "ems_min_max", "ems1", "10", "80"]
ard.calibration(message)
self.assertEqual(ard.channels['ems1']['min_max'],[10,80])
self.assertEqual(ard.channels['ems2']['min_max'],[0,80])
message = ["calibrate", "ems_min_max", "ems2", "6", "19"]
ard.calibration(message)
self.assertEqual(ard.channels['ems1']['min_max'],[10,80])
self.assertEqual(ard.channels['ems2']['min_max'],[6,19])
with self.assertRaises(ValueError):
message = ["calibrate", "ems_min_max", "ems2", "a", "19"]
ard.calibration(message)
with self.assertRaises(IndexError):
message = ["calibrate", "ems_min_max"]
ard.calibration(message)
def test_calibration_ems_on_off():
message = ["calibrate", "ems_on_off", "ems1", "true"]
ard.calibration(message)
ard.change_relay_state.assert_called_with("ems1", True)
message = ["calibrate", "ems_on_off", "ems2", "true"]
ard.calibration(message)
ard.change_relay_state.assert_called_with("ems2", True)
message = ["calibrate", "ems_on_off", "ems1", "false"]
ard.calibration(message)
ard.change_relay_state.assert_called_with("ems1", False)
message = ["calibrate", "ems_on_off", "ems1", "0"]
ard.calibration(message)
ard.change_relay_state.assert_called_with("ems1", False)
with self.assertRaises(ValueError):
message = ["calibrate", "ems_on_off", "ems1", "a"]
ard.calibration(message)
with self.assertRaises(IndexError):
message = ["calibrate", "ems_on_off"]
ard.calibration(message)
def test_calibration_ems_value():
message = ["calibrate", "ems_value", "ems1", "25"]
ard.calibration(message)
ard.send_ems_strength.assert_called_with({"ems1": 25})
message = ["calibrate", "ems_value", "ems2", "25"]
ard.calibration(message)
ard.send_ems_strength.assert_called_with({"ems2": 25})
with self.assertRaises(ValueError):
message = ["calibrate", "ems_value", "ems1", "a"]
ard.calibration(message)
with self.assertRaises(ValueError):
message = ["calibrate", "ems_value", "ems1", "49.1"]
ard.calibration(message)
with self.assertRaises(ValueError):
message = ["calibrate", "ems_value", "ems1", "-1"]
ard.calibration(message)
with self.assertRaises(IndexError):
message = ["calibrate", "ems_value", "ems1"]
ard.calibration(message)
def test_calibration_relay():
message = ["calibrate", "relay", "ems1", "true"]
ard.calibration(message)
ard.change_relay_state.assert_called_with("ems1", True)
message = ["calibrate", "relay", "ems2", "true"]
ard.calibration(message)
ard.change_relay_state.assert_called_with("ems2", True)
message = ["calibrate", "relay", "ems1", "false"]
ard.calibration(message)
ard.change_relay_state.assert_called_with("ems1", False)
with self.assertRaises(ValueError):
message = ["calibrate", "relay", "ems1", "a"]
ard.calibration(message)
with self.assertRaises(IndexError):
message = ["calibrate", "relay", "ems1"]
ard.calibration(message)
test_calibration_reset()
test_calibration_set_min_max()
test_calibration_ems_on_off()
test_calibration_ems_value()
test_calibration_relay()
def test_send_value(self):
ar = Arduino()
ar.ser = MagicMock()
ar.no_serial = True
ar.previously_sent = None
ar.send_value("1")
self.assertFalse(ar.ser.write.called)
ar.no_serial = False
ar.previously_sent = None
ar.send_value("1")
ar.ser.write.assert_called_with(bytes("1", "UTF-8"))
self.assertEqual(ar.previously_sent, "1")
ar.ser.write.reset_mock()
ar.previously_sent = "1"
ar.send_value("1")
self.assertFalse(ar.ser.write.called)
def test_subscribe(self):
ar = Arduino()
ar.subscribers = []
cb = MagicMock()
ar.subscribe(cb)
self.assertEqual(ar.subscribers[0], cb)
def test_run(self):
ar = Arduino()
ar.stop = False
ar.no_serial = False
cb = MagicMock()
ar.ser = MagicMock()
def fake_readline(val):
return b"l"
ar.ser.readline = fake_readline
ar.subscribers = [cb]
ar.start()
ar.stop = True
ar.join()
cb.assert_called_with("l")
@patch.multiple('lib.arduino.Arduino', send_value=MagicMock())
def test_send_ems_strength(self):
"""docstring for test_send_percentage"""
ard = Arduino()
ard.channels = {
'ems1': {'min_max': [0, 80], 'type': 'digipot', 'prefix': 1000, 'last_value': 0, 'ems_on_off': False},
'ems2': {'min_max': [0, 80], 'type': 'digipot', 'prefix': 2000, 'last_value': 0, 'ems_on_off': False}
}
ard.last_sent_ems = 0
ard.send_ems_strength({"ems1": 0, "ems2": 0})
ard.send_value.assert_called_with("$1100%$2100%")
ard.last_sent_ems = 0
ard.send_ems_strength({"ems1": 100, "ems2": 100})
ard.send_value.assert_called_with("$1020%$2020%")
ard.last_sent_ems = 0
ard.send_ems_strength({"ems1": 100})
ard.send_value.assert_called_with("$1020%")
ard.last_sent_ems = 0
ard.send_ems_strength({"ems1": 100, "ems2": 0})
ard.send_value.assert_called_with("$1020%$2100%")
ard.last_sent_ems = 0
ard.channels['ems1']['min_max'] = [12,100]
ard.send_ems_strength({"ems1": 0, "ems2": 0})
ard.send_value.assert_called_with("$1088%$2100%")
ard.last_sent_ems = 0
ard.channels['ems1']['min_max'] = [20,67]
ard.send_ems_strength({"ems1": 0, "ems2": 0})
ard.send_value.assert_called_with("$1080%$2100%")
ard.last_sent_ems = 0
ard.channels['ems1']['min_max'] = [20,67]
ard.send_ems_strength({"ems1": 100, "ems2": 0})
ard.send_value.assert_called_with("$1033%$2100%")
ard.last_sent_ems = 0
ard.channels['ems1']['min_max'] = [20,80]
ard.send_ems_strength({"ems1": 25, "ems2": 0})
ard.send_value.assert_called_with("$1075%$2100%")
ard.last_sent_ems = 0
ard.send_ems_strength({"ems1": 25.3, "ems2": 0})
ard.send_value.assert_called_with("$1075%$2100%")
ard.last_sent_ems = 0
ard.send_ems_strength({"ems1": -1, "ems2": 0})
ard.send_value.assert_called_with("$1080%$2100%")
ard.last_sent_ems = 0
ard.send_ems_strength({"ems1": 101, "ems2": 0})
ard.send_value.assert_called_with("$1020%$2100%")
ard.last_sent_ems = 0
with self.assertRaises(ValueError):
ard.send_ems_strength({"ems1": "wrong", "ems2": 0})
ard.last_sent_ems = 0
with self.assertRaises(ValueError):
ard.send_ems_strength({"ems1": 0, "ems2": "wrong"})
ard.last_sent_ems = 0
with self.assertRaises(IndexError):
ard.send_ems_strength({"ThisWillNeverBeAnamefoooorANems": 100})
``` |
{
"source": "jonepatr/lets_face_it",
"score": 2
} |
#### File: code/data_segments/create_seqs.py
```python
from misc.generate_test_sequences import generate_videos
from misc.find_test_segments import BLOCK_LIST, get_mimicry_normal, get_mimicry_random_alignment, get_non_mimicry_normal, get_non_mimicry_random_alignment
from misc.shared import BASE_DIR
import torch
import shutil
OUTPUT_DIR = BASE_DIR / "outputs/super_new"
from collections import defaultdict
a = defaultdict(list)
def find_overlapping_sequences():
for file_ in (base_dir / "mimicry_gt").glob("*.mp4"):
*_, session, start, stop = file_.stem.split("_")
frames = set(range(int(start), int(stop)))
if any([x[0] & frames for x in a[session]]):
print(file_.stem, start, stop)
for x in a[session]:
print("->", x[1], min(x[0]), max(x[0]))
print("----------")
a[session].append((frames, file_.stem))
def cache_sequence(name, func, args=[]):
json_file = name.with_suffix(suffix=".pt")
if not json_file.exists():
sequences = func(*args)
torch.save(sequences, json_file)
else:
sequences = torch.load(json_file)
return sequences
def create_mimicry_random_alignment(name):
output_name = OUTPUT_DIR / f"{name}_random_align"
sequences = cache_sequence(
output_name, get_mimicry_random_alignment, (OUTPUT_DIR / f"{name}_gt",),
)
generate_videos(sequences, output_name)
def create_mimicry_gt(name):
output_name = OUTPUT_DIR / f"{name}_gt"
sequences = cache_sequence(output_name, get_mimicry_normal)
total_seq = []
try:
shutil.rmtree(output_name)
except FileNotFoundError:
pass
output_name.mkdir(exist_ok=True)
orig = OUTPUT_DIR / f"{name}_gt_original"
for seq in sequences:
seq = list(seq)
if seq[0] not in BLOCK_LIST:
*_, start, stop = seq[0].split(".")[0].split("_")
print(seq[0], int(stop) - int(start))
total_seq.append(seq)
orig_file = (orig / seq[0]).with_suffix(".mp4")
if orig_file.exists():
shutil.copy(
orig_file, (output_name / seq[0]).with_suffix(".mp4"),
)
(output_name / "meta").mkdir(exist_ok=True)
shutil.copy(
(orig_file.parent / "meta" / orig_file.stem).with_suffix(".txt"),
(output_name / "meta" / orig_file.stem).with_suffix(".txt"),
)
generate_videos(total_seq, output_name)
def create_non_mimicry_random_alignment(name):
output_name = OUTPUT_DIR / f"{name}_no_mimicry_rand_align"
sequences = cache_sequence(output_name, get_non_mimicry_random_alignment, (OUTPUT_DIR / f"{name}_random_align",))
generate_videos(sequences, output_name)
def create_non_mimicry(name):
output_name = OUTPUT_DIR / f"{name}_no_mimicry_gt"
sequences = cache_sequence(output_name, get_non_mimicry_normal, (OUTPUT_DIR / f"{name}_no_mimicry_rand_align",))
generate_videos(sequences, output_name)
if __name__ == "__main__":
name = "mimicry"
# create_mimicry_gt(name)
# create_mimicry_random_alignment(name)
create_non_mimicry_random_alignment(name)
create_non_mimicry(name)
```
#### File: code/examples/visualize_example.py
```python
import io
import numpy as np
import json
import requests
import h5py
seq_len = 100
file_name = "output_video.mp4"
data_file = "flame_params.hdf5"
def byteify(x):
memfile = io.BytesIO()
np.save(memfile, x)
memfile.seek(0)
return memfile.read().decode("latin-1")
def get_face(x, seq_len):
return {
"expression": byteify(x["tf_exp"][:seq_len]),
"pose": byteify(x["tf_pose"][:seq_len]),
"shape": byteify(x["tf_shape"][:seq_len]),
"rotation": byteify(x["tf_rot"][:seq_len]),
}
with h5py.File(data_file, "r") as f:
p1 = f["sessions/1/participants/P1"]
p2 = f["sessions/1/participants/P2"]
serialized = json.dumps(
{
"seqs": [get_face(p1, seq_len), get_face(p2, seq_len)],
"file_name": file_name,
"fps": 25,
}
)
try:
resp = requests.post("http://localhost:8000/render", data=serialized, timeout=600)
resp.raise_for_status()
print(resp.json())
except requests.exceptions.HTTPError:
print("render request: failed on the server..")
except requests.exceptions.Timeout:
print("render request: timed out")
except requests.exceptions.ConnectionError:
print("render request: connection error")
```
#### File: code/feature_extraction/ringnet.py
```python
import contextlib
import os
import shutil
import tempfile
from collections import defaultdict, namedtuple
from pathlib import Path
import chumpy as ch
import h5py
import numpy as np
import skimage.io as io
from misc.shared import BASE_DIR, CONFIG, DATASET_DIR
from misc.utils import get_gender
from tqdm import tqdm
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from psbody.mesh import Mesh
from RingNet.run_RingNet import RingNet_inference
from RingNet.smpl_webuser.serialization import load_model
from RingNet.smpl_webuser.verts import verts_decorated
from RingNet.util import image as img_util
Config = namedtuple(
"Config",
[
"load_path",
"batch_size",
"img_size",
"data_format",
"pose_params",
"shape_params",
"expression_params",
],
)
def preprocess_image(img_path, img_size):
"""
Taken (and modified) from the RingNet repo
"""
img = io.imread(img_path)
if np.max(img.shape[:2]) != img_size:
scale = float(img_size) / np.max(img.shape[:2])
else:
scale = 1.0 # scaling_factor
center = np.round(np.array(img.shape[:2]) / 2).astype(int)
# image center in (x,y)
center = center[::-1]
crop, proc_param = img_util.scale_and_crop(img, scale, center, img_size)
# Normalize image to [-1, 1]
crop = 2 * ((crop / 255.0) - 0.5)
return crop, proc_param
def make_predicted_mesh_neutral(params, flame_model_path):
"""
Taken (and modified) from the RingNet repo
"""
pose = np.zeros(15)
expression = np.zeros(100)
shape = np.hstack((params["shape"], np.zeros(300 - params["shape"].shape[0])))
flame_genral_model = load_model(flame_model_path)
generated_neutral_mesh = verts_decorated(
ch.array([0.0, 0.0, 0.0]),
ch.array(pose),
ch.array(flame_genral_model.r),
flame_genral_model.J_regressor,
ch.array(flame_genral_model.weights),
flame_genral_model.kintree_table,
flame_genral_model.bs_style,
flame_genral_model.f,
bs_type=flame_genral_model.bs_type,
posedirs=ch.array(flame_genral_model.posedirs),
betas=ch.array(
np.hstack((shape, expression))
), # betas=ch.array(np.concatenate((theta[0,75:85], np.zeros(390)))), #
shapedirs=ch.array(flame_genral_model.shapedirs),
want_Jtr=True,
)
return Mesh(v=generated_neutral_mesh.r, f=generated_neutral_mesh.f)
def extract_ringnet(fps):
sess = tf.compat.v1.Session()
config = Config(
load_path=str(BASE_DIR / CONFIG["ringnet"]["model"]),
batch_size=1,
img_size=224,
data_format="NHWC",
pose_params=6,
shape_params=100,
expression_params=50,
)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
model = RingNet_inference(config, sess,)
img_dirs = list(DATASET_DIR.glob(f"*/*/imgs_{fps}fps"))
for img_dir in tqdm(img_dirs, desc="Extracting ringnet", leave=True):
ringnet_file = img_dir.parent / f"ringnet_{fps}fps.h5"
if ringnet_file.exists():
continue
data = defaultdict(list)
for img in tqdm(list(sorted(img_dir.glob("*.jpg"))), leave=False):
input_img, proc_param = preprocess_image(img, config.img_size)
vertices, flame_parameters = model.predict(
np.expand_dims(input_img, axis=0), get_parameters=True
)
data["proc_params/scale"].append(proc_param["scale"])
data["proc_params/start_pt"].append(proc_param["start_pt"])
data["proc_params/end_pt"].append(proc_param["end_pt"])
data["proc_params/img_size"].append(proc_param["img_size"])
data["vertices"].append(vertices[0])
data["flame_params/cam"].append(flame_parameters[0][:3])
data["flame_params/pose"].append(
flame_parameters[0][3 : 3 + config.pose_params]
)
data["flame_params/shape"].append(
flame_parameters[0][
3
+ config.pose_params : 3
+ config.pose_params
+ config.shape_params
]
)
data["flame_params/expression"].append(
flame_parameters[0][3 + config.pose_params + config.shape_params :]
)
with tempfile.TemporaryDirectory() as tmpd:
tmpf = Path(tmpd) / ringnet_file.name
with h5py.File(tmpf, "w") as f:
for key, value in data.items():
arrays = np.stack(value)
if (arrays == arrays[0]).all():
arrays = arrays[0]
f.create_dataset(key, data=arrays)
shutil.move(tmpf, ringnet_file)
sess.close()
def extract_neutral_mesh(fps):
files = list(DATASET_DIR.glob(f"*/*/ringnet_{fps}fps.h5"))
for file in tqdm(files, desc="Extracting neutral mesh"):
neutral_mesh_file = file.parent / "neutral_mesh.ply"
if neutral_mesh_file.exists():
continue
gender = get_gender(file.parent.parent.name, file.parent.name)
with h5py.File(file, "r") as f:
avg_shape = f["flame_params/shape"][()].mean(axis=0)
flame_model_path = BASE_DIR / CONFIG["flame"][f"model_path_{gender}"]
neutral_mesh = make_predicted_mesh_neutral(
{"shape": avg_shape}, flame_model_path
)
neutral_mesh.write_ply(str(neutral_mesh_file))
```
#### File: glow_pytorch/glow/lets_face_it_glow.py
```python
import random
from glow_pytorch.glow.utils import (
derange_batch,
get_mismatched_modalities,
get_scheduler,
test_params,
)
import numpy as np
import optuna
import torch
from glow_pytorch.glow import get_longest_history
from glow_pytorch.glow.models import SeqGlow
from pytorch_lightning import LightningModule
from torch.optim import SGD, Adam, RMSprop
class LetsFaceItGlow(LightningModule):
def __init__(self, hparams, dataset_root=None, test=None):
super().__init__()
test_params(hparams)
if dataset_root is not None:
hparams.dataset_root = dataset_root
if test is not None:
hparams.Test = test
self.hparams = hparams
self.register_buffer("last_missmatched_nll", torch.tensor(np.Inf))
self.seq_glow = SeqGlow(self.hparams)
if self.hparams.Train["use_negative_nll_loss"]:
modalities, nll_name = get_mismatched_modalities(self.hparams)
self.missmatched_modalities = modalities
self.missmatched_nll_name = nll_name
def training_step(self, batch, batch_idx):
if (
self.hparams.Train["use_negative_nll_loss"]
and self.last_missmatched_nll > 0
and random.random() < 0.1
and self.missmatched_modalities
):
deranged_batch = derange_batch(batch, self.missmatched_modalities)
_, loss, _ = self.seq_glow(deranged_batch)
self.log("Loss/missmatched_nll", -loss)
self.last_missmatched_nll = -loss
loss *= -0.1
else:
_, loss, _ = self.seq_glow(batch)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
_, loss, _ = self.seq_glow(batch)
self.log("val_loss", loss, prog_bar=True)
return loss
def configure_optimizers(self):
optimizer_name = self.hparams.Optim["name"]
# Define optimizer
optimizers = {"adam": Adam, "sgd": SGD, "rmsprop": RMSprop}
optimizer = optimizers[optimizer_name](
self.parameters(),
lr=self.hparams.lr,
**self.hparams.Optim["args"][optimizer_name],
)
return [optimizer], get_scheduler(self.hparams.Optim["Schedule"], optimizer)
# learning rate warm-up
# def optimizer_step(
# self,
# current_epoch,
# batch_nb,
# optimizer,
# optimizer_idx,
# second_order_closure=None,
# on_tpu=False,
# using_native_amp=False,
# using_lbfgs=False,
# ):
# lr = self.hparams.lr
# # warm up lr
# warm_up = self.hparams.Optim["Schedule"]["warm_up"]
# if self.trainer.global_step < warm_up:
# lr_scale = min(1.0, float(self.trainer.global_step + 1) / warm_up)
# lr *= lr_scale
# for pg in optimizer.param_groups:
# pg["lr"] = lr
# for pg in optimizer.param_groups:
# self.log("learning_rate", pg["lr"])
# # update params
# optimizer.step()
# optimizer.zero_grad()
def test_step(self, batch, batch_idx):
_, loss, losses = self(batch, test=True)
output = {"test_loss": loss, "test_losses": losses}
seq_len = self.hparams.Test["seq_len"]
cond_data = {
"p1_face": torch.zeros_like(
batch["p1_face"][:, : get_longest_history(self.hparams.Conditioning)]
),
"p2_face": batch.get("p2_face"),
"p1_speech": batch.get("p1_speech"),
"p2_speech": batch.get("p2_speech"),
}
predicted_seq = self.inference(seq_len, data=cond_data)
output["predicted_prop_seq"] = predicted_seq.cpu().detach()
gt_seq = batch["p1_face"][:, -predicted_seq.shape[1] :]
output["gt_seq"] = gt_seq.cpu().detach()
for modality in ["p2_face", "p2_speech", "p1_speech"]:
if self.hparams.Conditioning[modality]["history"] > 0:
deranged_batch = self.derange_batch(batch, [modality])
_, missaligned_nll, misaligned_losses = self(deranged_batch, test=True)
output[f"nll_mismatched_{modality}"] = missaligned_nll.cpu().detach()
output[f"losses_mismatched_{modality}"] = misaligned_losses
cond_data = {
"p1_face": torch.zeros_like(
deranged_batch["p1_face"][
:, : get_longest_history(self.hparams.Conditioning)
]
),
"p2_face": deranged_batch.get("p2_face"),
"p1_speech": deranged_batch.get("p1_speech"),
"p2_speech": deranged_batch.get("p2_speech"),
}
predicted_seq = self.inference(seq_len, data=cond_data)
output[
f"predicted_mismatch_{modality}_seq"
] = predicted_seq.cpu().detach()
return output
def test_epoch_end(self, outputs):
return {"results": outputs}
```
#### File: glow_pytorch/glow/modules.py
```python
import numpy as np
import scipy.linalg
import torch
import torch.nn as nn
import torch.nn.functional as F
from glow_pytorch.glow import thops
class ActNorm2d(nn.Module):
"""
Activation Normalization
Initialize the bias and scale with a given minibatch,
so that the output per-channel have zero mean and unit variance for that.
After initialization, `bias` and `logs` will be trained as parameters.
"""
def __init__(self, num_features, scale=1.0):
super().__init__()
# register mean and scale
size = [1, num_features]
self.register_parameter("bias", nn.Parameter(torch.zeros(*size)))
self.register_parameter("logs", nn.Parameter(torch.zeros(*size)))
self.num_features = num_features
self.scale = float(scale)
self.inited = False
def _check_input_dim(self, input):
return NotImplemented
def initialize_parameters(self, input):
if not self.training:
return
assert input.device == self.bias.device
with torch.no_grad():
bias = thops.mean(input.clone(), dim=0, keepdim=True) * -1.0
vars = thops.mean((input.clone() + bias) ** 2, dim=0, keepdim=True)
logs = torch.log(self.scale / (torch.sqrt(vars) + 1e-6))
self.bias.data.copy_(bias.data)
self.logs.data.copy_(logs.data)
self.inited = True
def _center(self, input, reverse=False):
if not reverse:
return input + self.bias
else:
return input - self.bias
def _scale(self, input, logdet=None, reverse=False):
logs = self.logs
if not reverse:
input = input * torch.exp(logs)
else:
input = input * torch.exp(-logs)
if logdet is not None:
"""
logs is log_std of `mean of channels`
so we need to multiply on the channel length
"""
dlogdet = thops.sum(logs) * input.size(1)
if reverse:
dlogdet *= -1
logdet = logdet + dlogdet
return input, logdet
def forward(self, input, logdet=None, reverse=False):
if not self.inited:
self.initialize_parameters(input)
# no need to permute dims as old version
if not reverse:
# center and scale
input = self._center(input, reverse)
input, logdet = self._scale(input, logdet, reverse)
else:
# scale and center
input, logdet = self._scale(input, logdet, reverse)
input = self._center(input, reverse)
return input, logdet
class LinearZeros(nn.Linear):
def __init__(self, in_channels, out_channels, logscale_factor=3):
super().__init__(in_channels, out_channels)
self.logscale_factor = logscale_factor
# set logs parameter
self.register_parameter("logs", nn.Parameter(torch.zeros(out_channels)))
# init
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
output = super().forward(input)
return output * torch.exp(self.logs * self.logscale_factor)
class Permute2d(nn.Module):
def __init__(self, num_channels, shuffle):
super().__init__()
self.num_channels = num_channels
self.indices = np.arange(self.num_channels - 1).astype(np.long)
self.indices_inverse = np.zeros((self.num_channels), dtype=np.long)
for i in range(self.num_channels):
self.indices_inverse[self.indices[i]] = i
if shuffle:
self.reset_indices()
def reset_indices(self):
np.random.shuffle(self.indices)
for i in range(self.num_channels):
self.indices_inverse[self.indices[i]] = i
def forward(self, input, reverse=False):
assert len(input.size()) == 4
if not reverse:
return input[:, self.indices]
else:
return input[:, self.indices_inverse]
class InvertibleConv1x1(nn.Module):
def __init__(self, num_channels, LU_decomposed=False):
super().__init__()
w_shape = [num_channels, num_channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32)
if not LU_decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=np.float32), -1)
eye = np.eye(*w_shape, dtype=np.float32)
self.register_buffer("p", torch.Tensor(np_p.astype(np.float32)))
self.register_buffer("sign_s", torch.Tensor(np_sign_s.astype(np.float32)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(np.float32)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(np.float32)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(np.float32)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.LU = LU_decomposed
def get_weight(self, input, reverse):
w_shape = self.w_shape
if not self.LU:
dlogdet = torch.slogdet(self.weight)[1] * input.size(1)
if not reverse:
weight = self.weight.view(w_shape[0], w_shape[1])
else:
weight = (
torch.inverse(self.weight.double())
.float()
.view(w_shape[0], w_shape[1])
)
return weight, dlogdet
else:
self.p = self.p.to(input.device)
self.sign_s = self.sign_s.to(input.device)
self.l_mask = self.l_mask.to(input.device)
self.eye = self.eye.to(input.device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(
self.sign_s * torch.exp(self.log_s)
)
dlogdet = thops.sum(self.log_s) * input.size(1)
if not reverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.double()).float()
u = torch.inverse(u.double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
return w.view(w_shape[0], w_shape[1]), dlogdet
def forward(self, input, logdet=None, reverse=False):
"""
log-det = log|abs(|W|)| * pixels
"""
weight, dlogdet = self.get_weight(input, reverse)
if not reverse:
z = torch.matmul(input, weight)
if logdet is not None:
logdet = logdet + dlogdet
return z, logdet
else:
z = torch.matmul(input, weight)
if logdet is not None:
logdet = logdet - dlogdet
return z, logdet
class GaussianDiag:
Log2PI = float(np.log(2 * np.pi))
@staticmethod
def likelihood_simplified(x):
"""
lnL = -1/2 * { ln|Var| + ((X - Mu)^T)(Var^-1)(X - Mu) + kln(2*PI) }
k = 1 (Independent)
Var = logs ** 2
"""
return -0.5 * ((x ** 2) + GaussianDiag.Log2PI)
@staticmethod
def logp_simplified(x):
likelihood = GaussianDiag.likelihood_simplified(x)
return torch.sum(likelihood, dim=1)
@staticmethod
def likelihood(mean, logs, x):
"""
lnL = -1/2 * { ln|Var| + ((X - Mu)^T)(Var^-1)(X - Mu) + kln(2*PI) }
k = 1 (Independent)
Var = logs ** 2
"""
return -0.5 * (
logs * 2.0 + ((x - mean) ** 2) / torch.exp(logs * 2.0) + GaussianDiag.Log2PI
)
@staticmethod
def logp(mean, logs, x):
likelihood = GaussianDiag.likelihood(mean, logs, x)
return thops.sum(likelihood, dim=[1])
@staticmethod
def sample(output_shape, eps_std=1):
return torch.normal(
mean=torch.zeros_like(output_shape),
std=torch.ones_like(output_shape) * eps_std,
)
```
#### File: code/glow_pytorch/hparams_tuning.py
```python
import json
import multiprocessing
import os
import shutil
import socket
from argparse import ArgumentParser, Namespace
from pprint import pprint
from glow_pytorch.glow.utils import calc_jerk, get_longest_history
import numpy as np
import optuna
import pytorch_lightning as pl
import torch
import yaml
from jsmin import jsmin
from optuna.integration import PyTorchLightningPruningCallback
from pytorch_lightning import Trainer, seed_everything
from glow_pytorch.glow.lets_face_it_glow import LetsFaceItGlow
from glow_pytorch.hparam_tuning_configs import hparam_configs
from misc.shared import CONFIG, DATA_DIR, RANDOM_SEED
from misc.utils import get_training_name
seed_everything(RANDOM_SEED)
class FailedTrial(Exception):
pass
class MyEarlyStopping(PyTorchLightningPruningCallback):
def __init__(self, trial, monitor="val_loss", patience=2):
super().__init__(trial, monitor=monitor)
self.best_loss = torch.tensor(np.Inf)
self.wait = 0
self.patience = patience
self.jerk_generated_means = []
def on_train_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
super().on_validation_batch_end(
trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
)
if pl_module.global_step > 20 and outputs > 0:
message = f"Trial was pruned since loss > 0"
raise optuna.exceptions.TrialPruned(message)
def on_validation_epoch_start(self, trainer, pl_module):
self.jerk_generated_means = []
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
super().on_validation_batch_end(
trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
)
seq_len = pl_module.hparams.Validation["seq_len"]
new_batch = {x: y.type_as(outputs) for x, y in batch.items()}
cond_data = {
"p1_face": new_batch["p1_face"][
:, : get_longest_history(pl_module.hparams.Conditioning)
],
"p2_face": new_batch.get("p2_face"),
"p1_speech": new_batch.get("p1_speech"),
"p2_speech": new_batch.get("p2_speech"),
}
predicted_seq = pl_module.seq_glow.inference(seq_len, data=cond_data)
self.jerk_generated_means.append(calc_jerk(predicted_seq))
if pl_module.global_step > 20 and outputs > 0:
message = f"Trial was pruned since loss > 0"
raise optuna.exceptions.TrialPruned(message)
def on_validation_epoch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
super().on_validation_epoch_end(
trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
)
jerk_generated_mean = torch.stack(self.jerk_generated_means).mean()
val_loss = torch.stack(outputs).mean()
if jerk_generated_mean > 10 and pl_module.global_step > 20:
message = f"Trial was pruned since jerk > 5"
raise optuna.exceptions.TrialPruned(message)
if val_loss is not None and val_loss > 0:
message = f"Trial was pruned because val loss was too high {val_loss}."
raise optuna.exceptions.TrialPruned(message)
if val_loss < self.best_loss:
self.best_loss = val_loss
self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
return True
parser = ArgumentParser()
parser.add_argument("hparams_file")
parser.add_argument("-n", type=int)
parser = Trainer.add_argparse_args(parser)
default_params = parser.parse_args()
parser2 = ArgumentParser()
parser2.add_argument("hparams_file")
parser2.add_argument("-n", type=int)
override_params, unknown = parser2.parse_known_args()
conf_name = (
os.path.basename(override_params.hparams_file)
.replace(".yaml", "")
.replace(".json", "")
)
def prepare_hparams(trial):
if override_params.hparams_file.endswith(".json"):
hparams_json = json.loads(jsmin(open(override_params.hparams_file).read()))
elif override_params.hparams_file.endswith(".yaml"):
hparams_json = yaml.load(open(override_params.hparams_file))
hparams_json["dataset_root"] = str(DATA_DIR)
params = vars(default_params)
params.update(hparams_json)
params.update(vars(override_params))
hparams = Namespace(**params)
return hparam_configs[conf_name].hparam_options(hparams, trial)
def run(hparams, return_dict, trial, batch_size, current_date):
log_path = os.path.join("logs", conf_name, f"{current_date}")
if os.path.exists(log_path):
shutil.rmtree(log_path)
hparams.batch_size = batch_size
trainer_params = vars(hparams).copy()
trainer_params["checkpoint_callback"] = pl.callbacks.ModelCheckpoint(
save_top_k=3, monitor="val_loss", mode="min"
)
if CONFIG["comet"]["api_key"]:
from pytorch_lightning.loggers import CometLogger
trainer_params["logger"] = CometLogger(
api_key=CONFIG["comet"]["api_key"],
project_name=CONFIG["comet"]["project_name"],
experiment_name=conf_name, # + current_date
)
trainer_params["early_stop_callback"] = MyEarlyStopping(trial, monitor="val_loss")
trainer = Trainer(**trainer_params)
model = LetsFaceItGlow(hparams)
try:
trainer.fit(model)
except RuntimeError as e:
if str(e).startswith("CUDA out of memory"):
return_dict["OOM"] = True
else:
return_dict["error"] = e
except (optuna.exceptions.TrialPruned, Exception) as e:
return_dict["error"] = e
for key, item in trainer.callback_metrics.items():
return_dict[key] = float(item)
def objective(trial):
current_date = get_training_name()
manager = multiprocessing.Manager()
hparams = prepare_hparams(trial)
batch_size = hparams.batch_size
trial.set_user_attr("version", current_date)
trial.set_user_attr("host", socket.gethostname())
trial.set_user_attr("GPU", os.environ.get("CUDA_VISIBLE_DEVICES"))
pprint(vars(hparams))
while batch_size > 0:
print(f"trying with batch_size {batch_size}")
return_dict = manager.dict()
p = multiprocessing.Process(
target=run,
args=(hparams, return_dict, trial, batch_size, current_date),
)
p.start()
p.join()
print(return_dict)
if return_dict.get("OOM"):
new_batch_size = batch_size // 2
if new_batch_size < 2:
raise FailedTrial("batch size smaller than 2!")
else:
batch_size = new_batch_size
elif return_dict.get("error"):
raise return_dict.get("error")
else:
break
trial.set_user_attr("batch_size", batch_size)
for metric, val in return_dict.items():
if metric != "val_loss":
trial.set_user_attr(metric, float(val))
return float(return_dict["val_loss"])
if __name__ == "__main__":
conf_vars = {}
if CONFIG["optuna"]["rdbs_storage"]:
conf_vars["storage"] = optuna.storages.RDBStorage(
url=CONFIG["optuna"]["rdbs_storage"],
)
study = optuna.create_study(
**conf_vars,
study_name=conf_name,
direction="minimize",
pruner=optuna.pruners.NopPruner(),
load_if_exists=True,
)
study.optimize(objective, n_trials=override_params.n, catch=(FailedTrial,))
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
```
#### File: code/glow_pytorch/test_modules.py
```python
import torch
import numpy as np
from glow import modules, models
def test_actnorm():
print("[Test]: actnorm")
actnorm = modules.ActNorm2d(54)
x = torch.Tensor(np.random.rand(2, 54))
actnorm.initialize_parameters(x)
y, det = actnorm(x, 0)
x_, _ = actnorm(y, None, True)
print("actnorm (forward,reverse) delta", float(torch.max(torch.abs(x_ - x))))
print(" det", float(det))
def test_conv1x1():
print("[Test]: invconv1x1")
conv = modules.InvertibleConv1x1(96)
x = torch.Tensor(np.random.rand(2, 96))
y, det = conv(x, 0)
x_, _ = conv(y, None, True)
print("conv1x1 (forward,reverse) delta", float(torch.max(torch.abs(x_ - x))))
print(" det", float(det))
def test_flow_step():
print("[Test]: flow step")
step = models.FlowStep(
54,
256,
flow_permutation="invconv",
flow_coupling="affine",
cond_dim=32,
feature_encoder_dim=64,
glow_rnn_type="gru",
)
x = torch.Tensor(np.random.rand(2, 54))
cond = torch.Tensor(np.random.rand(2, 64))
y, det = step(x, cond, 0, False)
x_, det0 = step(y, cond, det, True)
print("flowstep (forward,reverse)delta", float(torch.max(torch.abs(x_ - x))))
print(" det", det, det0)
def test_flow_net():
print("[Test]: flow net")
net = models.FlowNet(
C=54,
hidden_channels=256,
cond_dim=64,
K=3,
L=1,
feature_encoder_dim=32,
glow_rnn_type="gru",
)
x = torch.Tensor(np.random.rand(4, 54))
cond = torch.Tensor(np.random.rand(4, 32))
y, det = net(x, cond)
x_, det0 = net(y, cond, reverse=True)
print("z", y.size())
print("x_", x_.size())
print(det, det0)
if __name__ == "__main__":
test_actnorm()
test_conv1x1()
test_flow_step()
test_flow_net()
```
#### File: code/rendering/render_seq.py
```python
from glow_pytorch.generate_motion_from_model import generate_motion
from misc.shared import BASE_DIR
from misc.generate_test_sequences import generate_videos, get_vad_weights
from misc.find_test_segments import LengthFail, get_frames
import random
import torch
PADDING = 24 * 2
def render(name, output_folder, session, start, stop, info):
start = start
stop = stop + stop % 2
frames = get_frames(session, start, stop)
p1_vad = get_vad_weights("P1", session, start, stop).sum()
p2_vad = get_vad_weights("P2", session, start, stop).sum()
info = info.copy()
if p1_vad > p2_vad:
info["left_start"] = 0
info["right_start"] = 136
else:
info["left_start"] = 136
info["right_start"] = 0
frame_nbs = [str(x).zfill(5) for x in sorted(frames[:, -1].int().tolist())]
left_video = {
"expression": frames[:, info["left_start"] : info["left_start"] + 50],
"jaw": frames[:, info["left_start"] + 100 : info["left_start"] + 103],
"neck": frames[:, info["left_start"] + 103 : info["left_start"] + 106],
}
# the agent on the right side is the agent that is p1, i.e. "self"
p1_indicies = list(range(info["right_start"], info["right_start"] + 136))
# the agent on the left side of the video is p2, i.e. the interloctur
p2_indicies = list(range(info["left_start"], info["left_start"] + 136))
try:
# 24 is the number of frames needed for initialization of the model, and * 2 because we only use every second frame
special_frames = get_frames(session, start - PADDING, stop)
except (LengthFail, IndexError) as e:
print(f"failed fetching frames.. {e}")
return
p1_model_frames = special_frames[:, p1_indicies]
p2_model_frames = special_frames[:, p2_indicies]
predicted_sequence = generate_motion(torch.cat([p1_model_frames, p2_model_frames], dim=1), model_path=BASE_DIR / "models/final_model.ckpt")
right_video = {
"expression": predicted_sequence[0, :, :50],
"jaw": predicted_sequence[0, :, 100:103],
"neck": predicted_sequence[0, :, 103:106],
}
generate_videos(
[(name, session, left_video, right_video, info, frame_nbs)], output_folder, 2, overwrite=True
)
if __name__ == '__main__':
info = {
"left_gender": random.choice(["male", "female"]),
"right_gender": random.choice(["male", "female"]),
"left_shape": torch.randn(300).tolist(),
"right_shape": torch.randn(300).tolist(),
"left_skin_color": random.choice(["white", "black"]),
"right_skin_color": random.choice(["white", "black"]),
}
start_frame = 16867+2000
length = 600
for i in range(4):
render(f"3_new_hello_{i}.mp4", BASE_DIR / "outputs/fresh_baked", "25", start_frame, start_frame+length, info)
```
#### File: code/visualize/render_server.py
```python
import gc
import io
import os
import subprocess
import tempfile
from pathlib import Path
from uuid import uuid4
import ffmpeg
import numpy as np
import torch
from fastapi import Body, FastAPI, Request
from fastapi.responses import StreamingResponse
try:
subprocess.check_call(
["python3", Path(__file__).parent / "test_egl.py"], stderr=subprocess.DEVNULL
)
os.environ["PYOPENGL_PLATFORM"] = "egl"
except subprocess.CalledProcessError:
print("Could not use EGL (GPU support), falling back on using CPU")
os.environ["PYOPENGL_PLATFORM"] = "osmesa"
from visualize.render_tools import get_vertices, render_double_face_video
app = FastAPI()
VIDEO_DIR = "videos"
def debyteify(x, key):
seqs = io.BytesIO()
seqs.write(x[key].encode("latin-1"))
seqs.seek(0)
return torch.from_numpy(np.load(seqs)).float()
def get_vert(seq):
return get_vertices(
debyteify(seq, "expression"),
debyteify(seq, "pose"),
debyteify(seq, "rotation"),
shape=debyteify(seq, "shape"),
)
@app.post("/render")
def read_root(request: Request, data=Body(...)):
file_name = VIDEO_DIR / Path(data.get("file_name", str(uuid4())))
fps = data["fps"]
left_vert = get_vert(data["seqs"][0])
right_vert = get_vert(data["seqs"][1])
with tempfile.NamedTemporaryFile(suffix=".mp4") as tmpf:
render_double_face_video(tmpf.name, left_vert, right_vert, fps=fps)
file_name.parent.mkdir(parents=True, exist_ok=True)
ffmpeg.input(tmpf.name).output(str(file_name), vcodec="h264").run(
overwrite_output=True
)
gc.collect()
url = f"http://{request.url.netloc}/video/{file_name}"
return {"url": url}
@app.get("/video/{path:path}")
def read_item(request: Request, path: str):
if not path.startswith(VIDEO_DIR):
path = VIDEO_DIR / Path(path)
return StreamingResponse(open(path, mode="rb"), media_type="video/mp4")
``` |
{
"source": "jonerra/my_website",
"score": 2
} |
#### File: my_website/core/models.py
```python
from django.db import models
# Create your models here.
class Message(models.Model):
subject = models.CharField(max_length=300)
name = models.CharField(max_length=300)
email = models.EmailField(max_length=300, blank=False, unique=False)
message = models.TextField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.subject
``` |
{
"source": "jones3kd/Anki-Korean-Lookup-Addon",
"score": 3
} |
#### File: jones3kd/Anki-Korean-Lookup-Addon/look_up.py
```python
import sys
import sqlite3
from online_dics import DaumDict, MyMem
class LookUp:
"""
LookUp Class.
This will read from a text file with words separated by newline characters.
ex text file:
ㅇㅓ
ㅇㅗ
ㅇㅓ
"""
def __init__(self, filename='file.txt'):
"""
?
"""
self.filename = filename
self.db_name = 'dictionary.db'
output_file_name = 'definitions.txt'
self.define_methods = [self._lookup_dic, self._lookup_daum,
self._lookup_mymem]
#open output file
self.out_file = open(output_file_name, 'w')
#delete contents if already exists
self.out_file.truncate()
#urls to visit
self.urls = {}
self.urls['daum'] = 'http://dic.daum.net/search.do?q=[word]&dic=eng'
self.daum_dict = DaumDict()
self.mymem = MyMem()
self._start_lookup()
def _lookup_kengdic(self, word):
"""
Uses the postgres kengdic to look up the korean word
returns false if word not found
"""
pass
def _lookup_mymem(self, word):
"""
Lookup the word using the mymemmory api
returns True or False if added english def to output file
"""
results = self.mymem.get_def(word)
if results is not None:
self._add_def_to_file(results, word)
return True
else:
return False
def _lookup_daum(self, word):
"""
Lookup the word using the daum dictionary class
returns True or False if added english def to output file
"""
results = self.daum_dict.get_def(word)
if results is not None:
self._add_def_to_file(results, word)
return True
else:
return False
def _lookup_dic(self, word):
"""
Use the sqllite db called dictinoary.db to look up the korean word
returns True for successful or False for unable to get any results from
the dictionary
"""
#connect to db
try:
self.db = sqlite3.connect(self.db_name)
cursor = self.db.cursor()
cursor.execute('SELECT * FROM Terms WHERE Expression = ?',(word,))
results = cursor.fetchall()
except Exception as e:
print("Not found in sqlite dictionary..")
return False
#add results to output_file
if len(results) > 0:
self._add_def_to_file(results, word)
return True
else:
return False
def _add_def_to_file(self, def_list, org_word):
"""
Adds the orginal word, all definitions and dicitonary forms and hanja
written on to the file.
output file orginal_word;dictionary_form; eng_def; hanja
def_list - is a list of tuples of definitions
ex. [('말', 'man', 'NULL'),
('말', '4.765 US gallons', 'NULL'),
('말', 'Horse', 'NULL'), ('말', 'End', 'NULL'),
('말', 'words,speaking', 'NULL')]
"""
#write to file
#orginal word; dict form; eng definition; hanja; \n
#combine eng_definitions and hanja if there are more than one tuple
if(len(def_list) < 1):#if empty list dont add
return 0
eng_def = ''
hanja = ''
dict_form = def_list[0][0]
for tup in def_list:
if(len(eng_def) > 1):
eng_def += ', '
eng_def += ('%s'%tup[1])
temp_hanja = tup[2]
if temp_hanja != 'NULL' and temp_hanja is not None:
if(len(hanja) > 1):
hanja += ', '
hanja += ('%s'%temp_hanja)
results = [org_word,dict_form,eng_def,hanja]
for value in results:
self.out_file.write('%s;'%value)
self.out_file.write('\n')
def _start_lookup(self):
"""
Start looking up the files in the various korean resources
"""
#open file and start reading line by line/word by word
try:
file = open(self.filename, 'r')
except IOError:
print("Sorry. file was not found and could not open. Please "
"Try again with a valid filename")
return -1
except Exception as e:
print("Sorry something went wrong: %s"%e)
return -1
for line in file:
word = line[:-1]#cut off newline or empty character at end of word
if(len(word) > 0):
#try to look up words
for define_method in self.define_methods:
if define_method(word):
break
#close output_file
self.out_file.close()
if len(sys.argv) > 1:
file = sys.argv[1] #get filename
look_up = LookUp(file)
else:
look_up = LookUp()
``` |
{
"source": "jonesbra/ntc-pymap",
"score": 3
} |
#### File: ntc-pymap/pymap/tcp_scanner.py
```python
class TcpScanner(object):
def __init__(self, hosts=list()):
self.hosts = hosts
def scan_host(self, host):
host.ports = list()
def scan(self):
for host in self.hosts:
self.scan_host(host)
def TcpScannerHandler(hosts):
return TcpScanner(hosts=hosts)
``` |
{
"source": "jonescarissa/csc221",
"score": 4
} |
#### File: ch10/gaps/gaps.py
```python
import argparse
import os, re, shutil
filenameRegex = re.compile(r'''
^(.*?)
(\d{3})
(.*?)$
''', re.VERBOSE)
folder_path = '~\Documents\courses\hw6\ch10\gaps'
os.chdir(folder_path)
def fill_in_gaps(folder_path):
'''Given a path to a folder, find all files with a given prefix, such
as spam001.txt, spam002.txt, and so on, in a single folder(folder_path)
and locates any gaps in the numbering (such as if there is a
spam001.txt and spam003.txt but no spam002.txt). Rename all the later
files to close this gap.
Args:
folder_path (str): Path to a folder in the file system
Returns:
None '''
mo = filenameRegex.search(files)
beforePart = mo.group(1)
digitPart = mo.group(2)
afterPart = mo.group(3)
print(f'File found {folder_path} is {files}')
print('Need to rename: ' + digitPart + '\n')
newDigitPart = 1
def fill_in_gaps(folder_path):
newFilename = beforePart + str(newDigitPart).zfill(3) + afterPart
newDigitPart += 1
source = os.path.join(folder_path, files)
destination = os.path.join(folder_path, newFilename)
if newFilename in os.listdir(folder_path):
print(f'{newFilename} already exists!')
else:
print(f'Renaming {files} to {newFilename}')
shutil.move(source, destination)
print('Done')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('folder', help='Folder to search for .txt files')
args = parser.parse_args()
fill_in_gaps(args.folder)
if __name__ == '__main__':
main()
``` |
{
"source": "JonesChi/python-social-auth",
"score": 2
} |
#### File: examples/flask_example/manage.py
```python
import sys
from flask.ext.script import Server, Manager, Shell
sys.path.append('..')
from flask_example import app, db_session, engine
manager = Manager(app)
manager.add_command('runserver', Server())
manager.add_command('shell', Shell(make_context=lambda: {
'app': app,
'db_session': db_session
}))
@manager.command
def syncdb():
from flask_example.models import user
from social.apps.flask_app.default import models
user.Base.metadata.create_all(engine)
models.PSABase.metadata.create_all(engine)
if __name__ == '__main__':
manager.run()
```
#### File: social/backends/professionali.py
```python
from time import time
from social.utils import parse_qs
from social.backends.oauth import BaseOAuth2
class ProfessionaliOAuth2(BaseOAuth2):
name = 'professionali'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'https://api.professionali.ru/oauth/authorize.html'
ACCESS_TOKEN_URL = 'https://api.professionali.ru/oauth/getToken.json'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('avatar_big', 'avatar_big'),
('link', 'link')
]
def get_user_details(self, response):
first_name, last_name = map(response.get, ('firstname', 'lastname'))
email = ''
if self.setting('FAKE_EMAIL'):
email = <EMAIL>'.<EMAIL>(time())
return {
'username': '{0}_{1}'.format(last_name, first_name),
'first_name': first_name,
'last_name': last_name,
'email': email
}
def user_data(self, access_token, response, *args, **kwargs):
url = 'https://api.professionali.ru/v6/users/get.json'
fields = list(set(['firstname', 'lastname', 'avatar_big', 'link'] +
self.setting('EXTRA_DATA', [])))
params = {
'fields': ','.join(fields),
'access_token': access_token,
'ids[]': response['user_id']
}
try:
return self.get_json(url, params)[0]
except (TypeError, KeyError, IOError, ValueError, IndexError):
return None
def get_json(self, url, *args, **kwargs):
return self.request(url, verify=False, *args, **kwargs).json()
def get_querystring(self, url, *args, **kwargs):
return parse_qs(self.request(url, verify=False, *args, **kwargs).text)
``` |
{
"source": "jonesclarence37/pomace",
"score": 2
} |
#### File: pomace/pomace/browser.py
```python
import sys
import log
from fake_useragent import UserAgent
from splinter import Browser
from splinter.exceptions import DriverNotFoundError
from webdriver_manager import chrome, firefox
from .config import settings
NAMES = ["Firefox", "Chrome"]
WEBDRIVER_MANAGERS = {
"chromedriver": chrome.ChromeDriverManager,
"geckodriver": firefox.GeckoDriverManager,
}
USER_AGENT = "Mozilla/5.0 Gecko/20100101 Firefox/53.0"
def launch() -> Browser:
if not settings.browser.name:
sys.exit("No browser specified")
if settings.browser.name == "open":
settings.browser.name = NAMES[0]
settings.browser.name = settings.browser.name.lower()
log.info(f"Launching browser: {settings.browser.name}")
options = {
"headless": settings.browser.headless,
"user_agent": UserAgent(fallback=USER_AGENT)[settings.browser.name],
"wait_time": 1.0,
}
log.debug(f"Options: {options}")
try:
return Browser(settings.browser.name, **options)
except DriverNotFoundError:
sys.exit(f"Unsupported browser: {settings.browser.name}")
except Exception as e: # pylint: disable=broad-except
log.debug(str(e))
if "exited process" in str(e):
sys.exit("Browser update prevented launch. Please try again.")
for driver, manager in WEBDRIVER_MANAGERS.items():
if driver in str(e).lower():
options["executable_path"] = manager().install()
return Browser(settings.browser.name, **options)
raise e from None
def resize(browser: Browser):
browser.driver.set_window_size(settings.browser.width, settings.browser.height)
browser.driver.set_window_position(0, 0)
size = browser.driver.get_window_size()
log.debug(f"Resized browser: {size}")
def save_url(browser: Browser):
if settings.browser != browser.url:
log.debug(f"Saving last browser URL: {browser.url}")
settings.url = browser.url
def save_size(browser: Browser):
size = browser.driver.get_window_size()
if size != (settings.browser.width, settings.browser.height):
log.debug(f"Saving last browser size: {size}")
settings.browser.width = size["width"]
settings.browser.height = size["height"]
```
#### File: pomace/tests/conftest.py
```python
import datafiles
import log
import pytest
from pomace import shared
class MockElement(str):
@property
def outer_html(self):
return f"<mockhtml>{self}</>"
@property
def visible(self):
return True
class MockLinks:
def find_by_partial_text(self, value):
return [MockElement(f"mockelement:links.partial_text={value}")]
class MockBrowser:
url = "http://example.com"
html = "Hello, world!"
def find_by_name(self, value):
return [MockElement(f"mockelement:name={value}")]
def find_by_css(self, value):
return [MockElement(f"mockelement:css={value}")]
links = MockLinks()
@pytest.fixture
def mockbrowser(monkeypatch):
browser = MockBrowser()
monkeypatch.setattr(shared, "browser", browser)
return browser
def pytest_configure(config):
log.init(debug=True)
log.silence("faker", "vcr")
terminal = config.pluginmanager.getplugin("terminal")
terminal.TerminalReporter.showfspath = False
def pytest_runtest_setup(item):
datafiles.settings.HOOKS_ENABLED = False
```
#### File: pomace/tests/test_cli.py
```python
from pathlib import Path
import pytest
from cleo import ApplicationTester
from pomace.cli import application
@pytest.fixture
def cli():
return ApplicationTester(application).execute
def describe_clone():
def with_url(cli):
cli("clone https://github.com/jacebrowning/pomace-twitter.com")
assert Path("sites", "twitter.com").is_dir()
def with_url_and_domain(cli):
cli(
"clone https://github.com/jacebrowning/pomace-twitter.com"
" twitter.fake --force"
)
assert Path("sites", "twitter.fake").is_dir()
def describe_clean():
def with_domain(cli):
cli("clean twitter.fake")
``` |
{
"source": "Jonescy/NewsCrawler",
"score": 3
} |
#### File: NewsCrawler/spiders/eastmoney.py
```python
from json import loads
from random import random
from re import match
from time import time
import scrapy
from requests import get
from NewsCrawler.items import NewsItem
from NewsCrawler.utils.call_nav_map import nav_map
from NewsCrawler.utils.validate_published import validate_replace
class EastmoneySpider(scrapy.Spider):
"""东方财富网7X24小时快讯"""
name = 'eastmoney'
allowed_domains = ['eastmoney.com']
base_url = 'https://newsapi.eastmoney.com/kuaixun/v1/getlist_102_ajaxResult_50_%(page)s_.html?r=%(ran_num)s&_=%(time_stamp)s'
time_stamp = str(time()).replace('.', '')[:-4]
ran_num = random()
start_urls = [base_url % {'page': 1, 'ran_num': ran_num, 'time_stamp': time_stamp}]
def parse(self, response):
"""解析出详情页的url,并实现翻页"""
item = NewsItem()
ajax_data = response.text.replace('var ajaxResult=', '')
data_list = loads(ajax_data)['LivesList']
for data in data_list:
item['news_id'] = data['newsid']
item['title'] = data['title']
item['link'] = data['url_unique']
item['nav_name'] = [nav_map[i] for i in data['column'].split(',') if i in nav_map.keys()]
item['published'] = validate_replace(data['showtime'])
yield scrapy.Request(item['link'], callback=self.parse_detail, meta={'item': item})
for page in range(2, 21):
next_url = self.base_url % {'page': 1, 'ran_num': self.ran_num, 'time_stamp': self.time_stamp}
yield scrapy.Request(next_url)
def parse_detail(self, response):
item = response.meta['item']
item['source'] = response.xpath('//div[@class="source data-source"]/@data-source').extract_first()
item['desc'] = response.xpath('//div[@class="b-review"]/text()').extract_first().strip()
item['content'] = []
item['images'] = []
p_list = response.xpath('//div[@id="ContentBody"]/p[not(@class)] | //div[@id="ContentBody"]/center')
for p in p_list:
if p.xpath('.//img'):
img_link = p.xpath('.//img/@src').extract_first()
# https://dfscdn.dfcfw.com/download/D25618177642896768707.jpg
if match(r"https://dfscdn\.dfcfw\.com/download/.*", img_link):
item['content'].append(img_link)
img_content = get(img_link).content
item['images'].append(img_content)
else:
text = ''.join(p.xpath('.//text()').extract()).strip()
if text:
item['content'].append(text)
yield item
```
#### File: NewsCrawler/spiders/hexun.py
```python
from copy import deepcopy
from time import strftime
import scrapy
from demjson import decode
from requests import get
from NewsCrawler.items import NewsItem
from NewsCrawler.utils.hexun_temp_time import temp_time
from NewsCrawler.utils.validate_published import validate_replace
class HexunSpider(scrapy.Spider):
"""定时刷新"""
name = 'hexun'
allowed_domains = ['hexun.com']
start_url = 'http://roll.hexun.com/roolNews_listRool.action?type=all&ids=100,101,103,125,105,124,162,194,108,122,121,119,107,116,114,115,182,120,169,170,177,180,118,190,200,155,130,117,153,106&date=%(date)s&page=%(page)s&tempTime=%(temp_time)s'
current_date = strftime("%Y-%m-%d")
start_urls = [start_url % {'date': current_date, 'page': 1, 'temp_time': int(temp_time)}]
def parse(self, response):
"""
解析和讯即时新闻的接口
:param response:
:return:
"""
# 实例化item对象
item = NewsItem()
data = decode(response.text)
news_list = data['list']
item['content'] = []
item['images'] = []
# 请求详情页
for news in news_list:
item['news_id'] = news['id']
item['nav_name'] = news['columnName']
item['title'] = news['title']
item['desc'] = news['desc']
item['link'] = news['titleLink']
# 详情页过滤
yield scrapy.Request(item['link'], callback=self.parse_detail, meta={'item': deepcopy(item)})
page_count = (int(data['sum']) // 30) + 1
# 翻页请求
for page in range(2, page_count + 1):
next_url = self.start_url % {'date': self.current_date, 'page': page, 'temp_time': int(temp_time)}
yield scrapy.Request(next_url, callback=self.parse)
def parse_detail(self, response):
"""
解析详情页数据
:param response:
:return:
"""
item = response.meta['item']
published = response.xpath('//span[@class="pr20"]/text()').extract_first()
item['published'] = validate_replace(published)
source = response.xpath('//div[@class="tip fl"]/a/text()').extract_first()
if source:
item['source'] = source
item['source_link'] = response.xpath('//div[@class="tip fl"]/a/@href').extract_first()
else:
source, editor = ''.join(response.xpath('//div[@class="tip fl"]/text()').extract()).split()
item['source'] = source
item['editor'] = editor
context_box = response.xpath('//div[@class="art_context"]/div[@class="art_contextBox"]/p')
for content in context_box:
if not content.xpath('./img'):
text = content.xpath('.//text()').extract()
if text:
item['content'].append(''.join(text))
else:
img_url = content.xpath('./img/@src').extract_first()
item['content'].append(img_url)
b_data = get(img_url).content
item['images'].append(b_data)
yield item
```
#### File: NewsCrawler/scripts/de-duplication.py
```python
import pymongo
from NewsCrawler.settings import MONGO_URL
client = pymongo.MongoClient(MONGO_URL, maxPoolSize=1024)
def find_duplicate(collection):
collection.aggregate([
{'$group': {
'_id': {'title': "$title", 'published': "$published", "link": "$link"}, # 去重字段
'uniqueIds': {'$addToSet': "$_id"}, # 重复数据的id
'count': {'$sum': 1} # 重复次数
}},
{'$match': {
'count': {'$gt': 1} # 匹配重复次数大于1的数据
}},
{'$out': tmp_colName} # 输出的表名
], allowDiskUse=True)
def del_dup(tmp_collection, source_collection):
# 保留一位
for a in tmp_collection.find():
for b in a['uniqueIds'][1:]:
source_collection.delete_one({"_id": b})
tmp_col.drop() # 删除中间表
if __name__ == '__main__':
tmp_colName = "tmp_news" # 中间表名
col_list = ['caijing', 'ce', 'eastmoney', 'hexun', 'news', 'newsqq', 'sina', 'wangyi']
for i in col_list:
col = client['news'][i]
tmp_col = client['news'][tmp_colName]
find_duplicate(col)
del_dup(tmp_col, col)
``` |
{
"source": "jonesdt/power-up",
"score": 2
} |
#### File: scripts/python/software_hosts.py
```python
from __future__ import nested_scopes, generators, division, absolute_import, \
with_statement, print_function, unicode_literals
import click
import os.path
from os import listdir, getlogin, getuid
import filecmp
import json
import pwd
import grp
from shutil import copyfile
from pathlib import Path
import re
import netaddr
import socket
from subprocess import CalledProcessError
import sys
from getpass import getpass
from socket import gethostname, getfqdn
from inventory import generate_dynamic_inventory
from lib.exception import UserException
import lib.logger as logger
from lib.genesis import get_python_path, CFG_FILE, \
get_dynamic_inventory_path, get_playbooks_path, get_ansible_path
from lib.utilities import bash_cmd, sub_proc_exec, heading1, get_selection, \
bold, get_yesno, remove_line, append_line, rlinput
def _get_dynamic_inventory():
log = logger.getlogger()
dynamic_inventory = None
config_pointer_file = get_python_path() + '/config_pointer_file'
if os.path.isfile(config_pointer_file):
with open(config_pointer_file) as f:
config_path = f.read()
else:
config_path = CFG_FILE
if os.path.isfile(config_path):
try:
dynamic_inventory = generate_dynamic_inventory()
except UserException as exc:
log.debug("UserException raised when attempting to generate "
"dynamic inventory: {}".format(exc))
if dynamic_inventory is None:
print("Dynamic inventory not found")
return dynamic_inventory
def _expand_children(dynamic_inventory, children_list):
"""Replace each children item with expanded dictionary
Args:
dynamic_inventory (dict): Dynamic inventory dictionary
children_list (list): List of children
Returns:
dict: Children dictionaries from dynamic inventory
"""
children_dict = {}
for child in children_list:
children_dict[child] = dynamic_inventory[child]
if 'children' in children_dict[child]:
children_dict[child]['children'] = _expand_children(
dynamic_inventory,
children_dict[child]['children'])
return children_dict
def _get_inventory_summary(dynamic_inventory, top_level_group='all'):
"""Get the Ansible inventory structured as a nested dictionary
with a single top level group (default 'all').
Args:
dynamic_inventory (dict): Dynamic inventory dictionary
top_level_group (str): Name of top level group
Returns:
dict: Inventory dictionary, including groups, 'hosts',
'children', and 'vars'.
"""
inventory_summary = {top_level_group: dynamic_inventory[top_level_group]}
if 'children' in inventory_summary[top_level_group]:
inventory_summary[top_level_group]['children'] = _expand_children(
dynamic_inventory,
inventory_summary[top_level_group]['children'])
return inventory_summary
def _get_hosts_list(dynamic_inventory, top_level_group='all'):
"""Get a list of hosts.
Args:
dynamic_inventory (dict): Dynamic inventory dictionary
top_level_group (str): Name of top level group
Returns:
list: List containing all inventory hosts
"""
hosts_list = []
if 'hosts' in dynamic_inventory[top_level_group]:
hosts_list += dynamic_inventory[top_level_group]['hosts']
if 'children' in dynamic_inventory[top_level_group]:
for child in dynamic_inventory[top_level_group]['children']:
hosts_list += _get_hosts_list(dynamic_inventory, child)
return hosts_list
def _get_groups_hosts_dict(dynamic_inventory, top_level_group='all'):
"""Get a dictionary of groups and hosts. Hosts will be listed under
their lowest level group membership only.
Args:
dynamic_inventory (dict): Dynamic inventory dictionary
top_level_group (str): Name of top level group
Returns:
dict: Dictionary containing groups with lists of hosts
"""
groups_hosts_dict = {}
if 'hosts' in dynamic_inventory[top_level_group]:
if top_level_group not in groups_hosts_dict:
groups_hosts_dict[top_level_group] = []
groups_hosts_dict[top_level_group] += (
dynamic_inventory[top_level_group]['hosts'])
if 'children' in dynamic_inventory[top_level_group]:
for child in dynamic_inventory[top_level_group]['children']:
groups_hosts_dict.update(_get_groups_hosts_dict(dynamic_inventory,
child))
return groups_hosts_dict
def _get_groups_hosts_string(dynamic_inventory):
"""Get a string containing groups and hosts formatted in the
Ansible inventory 'ini' style. Hosts will be listed under their
lowest level group membership only.
Args:
dynamic_inventory (dict): Dynamic inventory dictionary
Returns:
str: String containing groups with lists of hosts
"""
output_string = ""
groups_hosts_dict = _get_groups_hosts_dict(dynamic_inventory)
for host in groups_hosts_dict['all']:
output_string += host + "\n"
output_string += "\n"
for group, hosts in groups_hosts_dict.items():
if group != 'all':
output_string += "[" + group + "]\n"
for host in hosts:
output_string += host + "\n"
output_string += "\n"
return output_string.rstrip()
def _create_new_software_inventory(software_hosts_file_path):
hosts_template = ("""\
# Ansible Inventory File
#
# For detailed information visit:
# http://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
#
# Only host definitions are required. SSH keys can be automatically
# configured by pup or manually defined in this file.
#
# POWER-Up uses ssh keys to access the client nodes. If there is an
# existing ssh key pair available, you may enter it under the [all:vars]
# section (eg; ansible_ssh_private_key_file=/root/.ssh/your-private-key).
# If one is not available, POWER-Up will generate one for you. POWER-Up
# also needs an active user id for the client nodes. The POWER-Up
# software will prompt for the user id, or you may enter it under the
# [all:vars] section below (eg; ansible_ssh_user=egoadmin).
#
# Global variables can be defined via the [all:vars] group
# e.g.:
# [all:vars]
# ansible_ssh_user=egoadmin
#
# Group names are defined within brackets.
# A valid configuration must have one master node and may have one
# or more compute nodes.
#
# Hosts must be defined with a Fully Qualified Domain Name (FQDN)
# e.g.:
# [master]
# host1.domain.com # master host
#
# [compute]
# host3.domain.com # compute host 1
# host4.domain.com # compute host 2
[master]
# define master host on this line before the "#"
[compute]
# define first compute host on this line before the "#"
""")
hosts = None
while hosts is None:
hosts = click.edit(hosts_template)
if hosts is not None:
with open(software_hosts_file_path, "w") as new_hosts_file:
new_hosts_file.write(hosts)
elif not click.confirm('File not written! Try again?'):
return False
_set_software_hosts_owner_mode(software_hosts_file_path)
return True
def _set_software_hosts_owner_mode(software_hosts_file_path):
"""Set software_hosts file owner to "login" user
Args:
software_hosts_file_path (str): Path to software inventory file
"""
user_name = getlogin()
if getuid() == 0 and user_name != 'root':
user_uid = pwd.getpwnam(user_name).pw_uid
user_gid = grp.getgrnam(user_name).gr_gid
os.chown(software_hosts_file_path, user_uid, user_gid)
os.chmod(software_hosts_file_path, 0o644)
def _validate_inventory_count(software_hosts_file_path, min_hosts,
group='all'):
"""Validate minimum number of hosts are defined in inventory
Calls Ansible to process inventory which validates file syntax.
Args:
software_hosts_file_path (str): Path to software inventory file
min_hosts (int): Minimum number of hosts required to pass
group (str, optional): Ansible group name (defaults to 'all')
Returns:
list: List of hosts defined in software inventory file
Raises:
UserException: Ansible reports host count of less than min_hosts
"""
log = logger.getlogger()
host_count = None
host_list = []
raw_host_list = bash_cmd(f'ansible {group} -i {software_hosts_file_path} '
'--list-hosts')
# Iterate over ansible '--list-hosts' output
count_verified = False
host_count_pattern = re.compile(r'.*\((\d+)\)\:$')
for host in raw_host_list.splitlines():
if not count_verified:
# Verify host count is > 0
match = host_count_pattern.match(host)
if match:
host_count = int(match.group(1))
log.debug("Ansible host count: {}".format(host_count))
if host_count < min_hosts:
raise UserException("Ansible reporting host count of less "
"than one ({})!".format(host_count))
count_verified = True
else:
host_list.append(host.strip())
log.debug("Software inventory host count validation passed")
log.debug("Ansible host list: {}".format(host_list))
return host_list
def _validate_host_list_network(host_list):
"""Validate all hosts in list are pingable
Args:
host_list (list): List of hostnames or IP addresses
Returns:
bool: True if all hosts are pingable
Raises:
UserException: If list item will not resolve or ping
"""
log = logger.getlogger()
for host in host_list:
# Check if host is given as IP address
if not netaddr.valid_ipv4(host, flags=0):
try:
socket.gethostbyname(host)
except socket.gaierror as exc:
log.debug("Unable to resolve host to IP: '{}' exception: '{}'"
.format(host, exc))
raise UserException("Unable to resolve hostname '{}'!"
.format(host))
else:
raise UserException('Client nodes must be defined using hostnames '
f'(IP address found: {host})!')
# Ping IP
try:
bash_cmd('fping -u {}'.format(' '.join(host_list)))
except CalledProcessError as exc:
msg = "Ping failed on hosts:\n{}".format(exc.output)
log.debug(msg)
raise UserException(msg)
log.debug("Software inventory host fping validation passed")
return True
def _check_known_hosts(host_list):
"""Ensure all hosts have entries in 'known_hosts' to avoid
Ansible's clunky yes/no prompting to accept keys (all prompts are
printed at once).
If any hosts are missing the user will be prompted to add it.
Args:
host_list (list): List of hostnames or IP addresses
"""
known_hosts_files = [os.path.join(Path.home(), ".ssh", "known_hosts")]
user_name, user_home_dir = get_user_and_home()
if os.environ['USER'] == 'root' and user_name != 'root':
known_hosts_files.append('/root/.ssh/known_hosts')
if not os.path.isdir('/root/.ssh'):
os.mkdir('/root/.ssh')
os.chmod('/root/.ssh', 0o700)
for host in host_list:
for known_hosts in known_hosts_files:
cmd = (f'ssh-keygen -F {host} -f {known_hosts}')
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
cmd = (f'ssh-keyscan -H {host}')
resp, err, rc = sub_proc_exec(cmd)
print(f'Adding \'{host}\' host keys to \'{known_hosts}\'')
append_line(known_hosts, resp, check_exists=False)
def _validate_ansible_ping(software_hosts_file_path, hosts_list):
"""Validate Ansible connectivity and functionality on all hosts
Args:
software_hosts_file_path (str): Path to software inventory file
host_list (list): List of hostnames or IP addresses
Returns:
bool: True if Ansible can connect to all hosts
Raises:
UserException: If any host fails
"""
log = logger.getlogger()
cmd = ('{} -i {} -m ping all'.format(get_ansible_path(),
software_hosts_file_path))
resp, err, rc = sub_proc_exec(cmd)
if str(rc) != "0":
msg = f'Ansible ping validation failed:\n{resp}'
log.debug(msg)
if 'WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!' in msg:
print(
'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n'
'@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED @\n'
'@ ON ONE OR MORE CLIENT NODES! @\n'
'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n'
'IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\n'
'Someone could be eavesdropping on you right now '
'(man-in-the-middle attack)!\n'
'It is also possible that a host key has just been changed.\n')
if get_yesno('Remove the existing known host keys? '):
known_hosts_files = (
[os.path.join(Path.home(), ".ssh", "known_hosts")])
user_name, user_home_dir = get_user_and_home()
if user_home_dir != str(Path.home()):
known_hosts_files.append(os.path.join(user_home_dir,
".ssh", "known_hosts"))
for host in hosts_list:
print(f'Collecting new host key(s) for {host}')
cmd = (f'ssh-keyscan -H {host}')
new_host_key, err, rc = sub_proc_exec(cmd)
for known_hosts in known_hosts_files:
print(f'Removing host keys for {host} '
f'from {known_hosts}')
cmd = (f'ssh-keygen -R {host} -f {known_hosts}')
resp, err, rc = sub_proc_exec(cmd)
print(f'Appending new host key for {host} to '
f'{known_hosts}')
append_line(known_hosts, new_host_key,
check_exists=False)
if user_home_dir != str(Path.home()):
user_known_hosts = os.path.join(user_home_dir, ".ssh",
"known_hosts")
user_uid = pwd.getpwnam(user_name).pw_uid
user_gid = grp.getgrnam(user_name).gr_gid
os.chown(user_known_hosts, user_uid, user_gid)
os.chmod(user_known_hosts, 0o600)
os.chown(user_known_hosts + '.old', user_uid, user_gid)
os.chmod(user_known_hosts + '.old', 0o600)
return _validate_ansible_ping(software_hosts_file_path,
hosts_list)
elif 'Permission denied' in msg:
msg = ('The PowerUp software installer attempted to log into the '
'the client node(s) but was unsuccessful. SSH key access may '
'need to be configured.\n')
print(msg)
if get_yesno('OK to configure Client Nodes for SSH Key Access? '):
configure_ssh_keys(software_hosts_file_path)
return _validate_ansible_ping(software_hosts_file_path,
hosts_list)
raise UserException(msg)
log.debug("Software inventory Ansible ping validation passed")
return True
def _validate_master_node_count(software_hosts_file_path, min_count,
max_count=0):
"""Validate number of nodes are defined in inventory's 'master'
group. Either an exact or minimum count can be validated.
Args:
software_hosts_file_path (str): Path to software inventory file
min_count (int): Minimum number of master nodes
max_count (int, optional): Maximum number of master nodes. If
set to 0 no maximum value is checked.
Returns:
bool: True validation passes
Raises:
UserException: Minimum or exact count is not present
"""
host_count = len(_validate_inventory_count(software_hosts_file_path, 0,
group='master'))
if host_count < min_count:
raise UserException(f'Inventory requires at least {min_count} master '
f'node(s) ({host_count} found)!')
elif max_count != 0 and host_count > max_count:
raise UserException(f'Inventory requires at most {max_count} master '
f'node(s) ({host_count} found)!')
else:
return True
def _validate_installer_is_not_client(host_list):
"""Validate the installer node is not listed as a client
Args:
host_list (list): List of hostnames
Returns:
bool: True validation passes
Raises:
UserException: If installer is listed as client
"""
hostname = gethostname()
fqdn = getfqdn()
if hostname in host_list or fqdn in host_list:
raise UserException('Installer can not be a target for install')
else:
return True
def _validate_client_hostnames(software_hosts_file_path, hosts_list):
"""Validate hostnames listed in inventory match client hostnames
Args:
software_hosts_file_path (str): Path to software inventory file
host_list (list): List of hostnames or IP addresses
Returns:
bool: True if all client hostnames match
Raises:
UserException: If any hostname does not match
"""
base_cmd = (f'{get_ansible_path()} -i {software_hosts_file_path} ')
msg = ""
for host in hosts_list:
cmd = base_cmd + f'{host} -a "hostname --fqdn"'
resp, err, rc = sub_proc_exec(cmd, shell=True)
hostname = resp.splitlines()[-1]
if hostname != host:
msg += (f"Inventory hostname mis-match: '{host}' is reporting "
f"an FQDN of '{hostname}'\n")
if msg != "":
raise UserException(msg)
else:
return True
def configure_ssh_keys(software_hosts_file_path):
"""Configure SSH keys for Ansible software hosts
Scan for SSH key pairs in home directory, and if called using
'sudo' also in "login" user's home directory. Allow user to create
a new SSH key pair if 'default_ssh_key_name' doesn't already exist.
If multiple choices are available user will be prompted to choose.
Selected key pair is copied into "login" user's home '.ssh'
directory if necessary. Selected key pair is then copied to all
hosts listed in 'software_hosts' file via 'ssh-copy-id', and
finally assigned to the 'ansible_ssh_private_key_file' var in
the 'software_hosts' '[all:vars]' section.
Args:
software_hosts_file_path (str): Path to software inventory file
"""
log = logger.getlogger()
default_ssh_key_name = "powerup"
ssh_key_options = get_existing_ssh_key_pairs(no_root_keys=True)
user_name, user_home_dir = get_user_and_home()
if os.path.join(user_home_dir, ".ssh",
default_ssh_key_name) not in ssh_key_options:
ssh_key_options.insert(0, 'Create New "powerup" Key Pair')
if len(ssh_key_options) == 1:
item = ssh_key_options[0]
elif len(ssh_key_options) > 1:
print(bold("\nSelect an SSH key to use:"))
choice, item = get_selection(ssh_key_options)
if item == 'Create New "powerup" Key Pair':
ssh_key = create_ssh_key_pair(default_ssh_key_name)
else:
ssh_key = item
ssh_key = copy_ssh_key_pair_to_user_dir(ssh_key)
add_software_hosts_global_var(
software_hosts_file_path,
"ansible_ssh_common_args='-o StrictHostKeyChecking=no'")
hostvars = get_ansible_hostvars(software_hosts_file_path)
run = True
while run:
global_user = None
global_pass = None
header_printed = False
header_msg = bold('\nGlobal client SSH login credentials required')
for host in _validate_inventory_count(software_hosts_file_path, 0):
if global_user is None and 'ansible_user' not in hostvars[host]:
print(header_msg)
header_printed = True
global_user = rlinput('username: ')
add_software_hosts_global_var(software_hosts_file_path,
f'ansible_user={global_user}')
if (global_pass is None and
'ansible_ssh_pass' not in hostvars[host]):
if not header_printed:
print(header_msg)
global_pass = getpass('password: ')
if global_user is not None and global_pass is not None:
break
heading1("Copying SSH Public Keys to Hosts\n")
rc = copy_ssh_key_pair_to_hosts(ssh_key, software_hosts_file_path,
global_pass)
if not rc:
log.warning("One or more SSH key copy failed!")
choice, item = get_selection(['Retry', 'Continue', 'Exit'])
if choice == "1":
pass
elif choice == "2":
run = False
elif choice == "3":
log.debug('User chooses to exit.')
sys.exit('Exiting')
else:
print()
log.info("SSH key successfully copied to all hosts\n")
run = False
add_software_hosts_global_var(software_hosts_file_path,
f'ansible_ssh_private_key_file={ssh_key}')
def add_software_hosts_global_var(software_hosts_file_path, entry):
"""Copy an SSH public key into software hosts authorized_keys files
Add entry to software_hosts '[all:vars]' section. Any existing
entries with the same key name (string before '=') will be
overwritten.
Args:
software_hosts_file_path (str): Path to software inventory file
entry (str) : Entry to write in software_hosts '[all:vars]'
"""
remove_line(software_hosts_file_path, '^ansible_ssh_private_key_file=.*')
append_line(software_hosts_file_path, '[all:vars]')
with open(software_hosts_file_path, 'r') as software_hosts_read:
software_hosts = software_hosts_read.readlines()
in_all_vars = False
prev_line = "BOF"
with open(software_hosts_file_path, 'w') as software_hosts_write:
for line in software_hosts:
if line.startswith("[all:vars]"):
if prev_line != "\n":
line = "\n" + line
line = line + f'{entry}\n'
in_all_vars = True
elif in_all_vars and line.startswith('['):
in_all_vars = False
elif in_all_vars and line.startswith(entry.split('=')[0]):
continue
software_hosts_write.write(line)
prev_line = line
_set_software_hosts_owner_mode(software_hosts_file_path)
def get_existing_ssh_key_pairs(no_root_keys=False):
"""Get a list of existing SSH private/public key paths from
'~/.ssh/'. If called with 'sudo' and 'no_root_keys=False', then get
list from both '/root/.ssh/' and '~/.ssh'. If 'no_root_keys=True'
then any private keys located in '/root/.ssh' will be omitted.
Args:
no_root_keys (bool): Do not return any keys from '/root/.ssh'
Returns:
list of str: List of private ssh key paths
"""
ssh_key_pairs = []
ssh_dir = os.path.join(Path.home(), ".ssh")
if (not ('/root' == str(Path.home()) and no_root_keys) and
os.path.isdir(ssh_dir)):
for item in listdir(ssh_dir):
item = os.path.join(ssh_dir, item)
if os.path.isfile(item + '.pub'):
ssh_key_pairs.append(item)
user_name, user_home_dir = get_user_and_home()
if user_home_dir != str(Path.home()):
user_ssh_dir = os.path.join(user_home_dir, ".ssh")
if os.path.isdir(user_ssh_dir):
for item in listdir(user_ssh_dir):
item = os.path.join(user_ssh_dir, item)
if os.path.isfile(item + '.pub'):
ssh_key_pairs.append(item)
return ssh_key_pairs
def create_ssh_key_pair(name):
"""Create an SSH private/public key pair in ~/.ssh/
If an SSH key pair exists with "name" then the private key path is
returned *without* creating anything new.
Args:
name (str): Filename of private key file
Returns:
str: Private ssh key path
Raises:
UserException: If ssh-keygen command fails
"""
log = logger.getlogger()
ssh_dir = os.path.join(Path.home(), ".ssh")
private_key_path = os.path.join(ssh_dir, name)
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir, mode=0o700)
if os.path.isfile(private_key_path):
log.info(f'SSH key \'{private_key_path}\' already exists, continuing')
else:
print(bold(f'Creating SSH key \'{private_key_path}\''))
cmd = ('ssh-keygen -t rsa -b 4096 '
'-C "Generated by Power-Up Software Installer" '
f'-f {private_key_path} -N ""')
resp, err, rc = sub_proc_exec(cmd, shell=True)
if str(rc) != "0":
msg = 'ssh-keygen failed:\n{}'.format(resp)
log.debug(msg)
raise UserException(msg)
return private_key_path
def copy_ssh_key_pair_to_user_dir(private_key_path):
"""Copy an SSH private/public key pair into the user's ~/.ssh dir
This function is useful when a key pair is created as root user
(e.g. using 'sudo') but should also be available to the user for
direct 'ssh' calls.
If the private key is already in the user's ~/.ssh directory
nothing is done.
Args:
private_key_path (str) : Filename of private key file
Returns:
str: Path to user copy of private key
"""
public_key_path = private_key_path + '.pub'
user_name, user_home_dir = get_user_and_home()
user_ssh_dir = os.path.join(user_home_dir, ".ssh")
if user_ssh_dir not in private_key_path:
user_private_key_path = os.path.join(
user_ssh_dir, os.path.basename(private_key_path))
user_public_key_path = user_private_key_path + '.pub'
user_uid = pwd.getpwnam(user_name).pw_uid
user_gid = grp.getgrnam(user_name).gr_gid
if not os.path.isdir(user_ssh_dir):
os.mkdir(user_ssh_dir, mode=0o700)
os.chown(user_ssh_dir, user_uid, user_gid)
# Never overwrite an existing private key file!
already_copied = False
while os.path.isfile(user_private_key_path):
# If key pair already exists no need to do anything
if (filecmp.cmp(private_key_path, user_private_key_path) and
filecmp.cmp(public_key_path, user_public_key_path)):
already_copied = True
break
else:
user_private_key_path += "_powerup"
user_public_key_path = user_private_key_path + '.pub'
if already_copied:
print(f'\'{private_key_path}\' already copied to '
f'\'{user_private_key_path}\'')
else:
print(bold(f'Copying \'{private_key_path}\' to '
f'\'{user_private_key_path}\' for unprivileged use'))
copyfile(private_key_path, user_private_key_path)
copyfile(public_key_path, user_public_key_path)
os.chown(user_private_key_path, user_uid, user_gid)
os.chmod(user_private_key_path, 0o600)
os.chown(user_public_key_path, user_uid, user_gid)
os.chmod(user_public_key_path, 0o644)
else:
user_private_key_path = private_key_path
return user_private_key_path
def copy_ssh_key_pair_to_hosts(private_key_path, software_hosts_file_path,
global_pass=None):
"""Copy an SSH public key into software hosts authorized_keys files
TODO: detailed description
Args:
private_key_path (str) : Filename of private key file
software_hosts_file_path (str): Path to software inventory file
global_pass (str, optional): Global client default SSH password
Returns:
bool: True iff rc of all commands are "0"
"""
hosts_list = _validate_inventory_count(software_hosts_file_path, 0)
all_zero_returns = True
hostvars = get_ansible_hostvars(software_hosts_file_path)
for host in hosts_list:
print(bold(f'Copy SSH Public Key to {host}'))
cmd = f'ssh-copy-id -i {private_key_path} '
if "ansible_port" in hostvars[host]:
cmd += f'-p {hostvars[host]["ansible_port"]} '
if "ansible_ssh_common_args" in hostvars[host]:
cmd += f'{hostvars[host]["ansible_ssh_common_args"]} '
cmd += f'{hostvars[host]["ansible_user"]}@{host}'
if 'ansible_ssh_pass' not in hostvars[host]:
cmd = f'SSHPASS=\'{global_pass}\' sshpass -e ' + cmd
resp, err, rc = sub_proc_exec(cmd, shell=True)
if rc != 0:
all_zero_returns = False
print(err)
return all_zero_returns
def get_ansible_hostvars(software_hosts_file_path):
"""Get Ansible generated 'hostvars' dictionary
Args:
software_hosts_file_path (str): Path to software inventory file
Returns:
dict: Ansible 'hostvars' dictionary
"""
cmd = (f'ansible-inventory --inventory {software_hosts_file_path} --list')
resp, err, rc = sub_proc_exec(cmd, shell=True)
hostvars = json.loads(resp)['_meta']['hostvars']
return hostvars
def get_user_and_home():
"""Get user name and home directory path
Returns the user account calling the script, *not* 'root' even
when called with 'sudo'.
Returns:
user_name, user_home_dir (tuple): User name and home dir path
Raises:
UserException: If 'getent' command fails
"""
log = logger.getlogger()
user_name = getlogin()
cmd = f'getent passwd {user_name}'
resp, err, rc = sub_proc_exec(cmd, shell=True)
if str(rc) != "0":
msg = 'getent failed:\n{}'.format(err)
log.debug(msg)
raise UserException(msg)
user_home_dir = resp.split(':')[5].rstrip()
return (user_name, user_home_dir)
def validate_software_inventory(software_hosts_file_path):
"""Validate Ansible software inventory
Args:
software_hosts_file_path (str): Path to software inventory file
Returns:
bool: True is validation passes
"""
try:
# Validate file syntax and host count
hosts_list = _validate_inventory_count(software_hosts_file_path, 1)
# Validate installer is not in inventory
_validate_installer_is_not_client(hosts_list)
# Validate hostname resolution and network connectivity
_validate_host_list_network(hosts_list)
# Validate master node count is exactly 1
_validate_master_node_count(software_hosts_file_path, 1, 1)
# Ensure hosts keys exist in known_hosts
_check_known_hosts(hosts_list)
# Validate complete Ansible connectivity
_validate_ansible_ping(software_hosts_file_path, hosts_list)
# Validate hostnames listed in inventory match client hostnames
_validate_client_hostnames(software_hosts_file_path, hosts_list)
except UserException as exc:
print("Inventory validation error: {}".format(exc))
return False
# If no exceptions were caught validation passed
return True
def get_ansible_inventory():
log = logger.getlogger()
inventory_choice = None
dynamic_inventory_path = get_dynamic_inventory_path()
software_hosts_file_path = (
os.path.join(get_playbooks_path(), 'software_hosts'))
heading1("Software hosts inventory setup\n")
dynamic_inventory = None
# If dynamic inventory contains clients prompt user to use it
if (dynamic_inventory is not None and
len(set(_get_hosts_list(dynamic_inventory)) -
set(['deployer', 'localhost'])) > 0):
print("Ansible Dynamic Inventory found:")
print("--------------------------------")
print(_get_groups_hosts_string(dynamic_inventory))
print("--------------------------------")
validate_software_inventory(dynamic_inventory)
if click.confirm('Do you want to use this inventory?'):
print("Using Ansible Dynamic Inventory")
inventory_choice = dynamic_inventory_path
else:
print("NOT using Ansible Dynamic Inventory")
# If dynamic inventory has no hosts or user declines to use it
if inventory_choice is None:
while True:
# Check if software inventory file exists
if os.path.isfile(software_hosts_file_path):
print("Software inventory file found at '{}':"
.format(software_hosts_file_path))
# If no software inventory file exists create one using template
else:
rlinput("Press enter to create client node inventory")
_create_new_software_inventory(software_hosts_file_path)
# If still no software inventory file exists prompt user to
# exit (else start over to create one).
if not os.path.isfile(software_hosts_file_path):
print("No inventory file found at '{}'"
.format(software_hosts_file_path))
if click.confirm('Do you want to exit the program?'):
sys.exit(1)
else:
continue
# Menu items can modified to show validation results
continue_msg = 'Continue with current inventory'
edit_msg = 'Edit inventory file'
exit_msg = 'Exit program'
ssh_config_msg = 'Configure Client Nodes for SSH Key Access'
menu_items = []
# Validate software inventory
inv_count = len(_validate_inventory_count(software_hosts_file_path,
0))
print(f'Validating software inventory ({inv_count} nodes)...')
if validate_software_inventory(software_hosts_file_path):
print(bold("Validation passed!"))
else:
print(bold("Unable to complete validation"))
continue_msg = ("Continue with inventory as-is - "
"WARNING: Validation incomplete")
menu_items.append(ssh_config_msg)
# Prompt user
menu_items += [continue_msg, edit_msg, exit_msg]
choice, item = get_selection(menu_items)
print(f'Choice: {choice} Item: {item}')
if item == ssh_config_msg:
configure_ssh_keys(software_hosts_file_path)
elif item == continue_msg:
print("Using '{}' as inventory"
.format(software_hosts_file_path))
inventory_choice = software_hosts_file_path
break
elif item == edit_msg:
click.edit(filename=software_hosts_file_path)
elif item == exit_msg:
sys.exit(1)
if inventory_choice is None:
log.error("Software inventory file is required to continue!")
sys.exit(1)
log.debug("User software inventory choice: {}".format(inventory_choice))
return inventory_choice
if __name__ == '__main__':
logger.create()
print(get_ansible_inventory())
``` |
{
"source": "joneshf/purs-tools",
"score": 2
} |
#### File: rules_purescript/tests/rules_tests.bzl
```python
load(
"@bazel_skylib//lib:paths.bzl",
"paths",
)
load(
"@bazel_skylib//lib:unittest.bzl",
"analysistest",
"asserts",
)
load(
"//internal:rules.bzl",
"purescript_binary",
"purescript_library",
"purescript_package",
)
load(
":list_helpers.bzl",
"contains",
"find_action",
)
def _purescript_binary_works_with_only_purescript_implementation_test(ctx):
"""
Test to verify that compiled PureScript files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_module_action = find_action(env, actions, "PursCompileModule")
inputs = [input.basename for input in purs_compile_module_action.inputs.to_list()]
asserts.equals(env, 2, len(inputs))
contains(env, inputs, "purs-compile-module", "Expected purs-compile-module to be an input")
contains(env, inputs, "PureScriptOnly.purs", "Expected PureScriptOnly.purs to be an input")
outputs = [output.basename for output in purs_compile_module_action.outputs.to_list()]
asserts.equals(env, 1, len(outputs))
contains(env, outputs, "index.js", "Expected index.js to be an output")
argv = purs_compile_module_action.argv
contains(env, argv, "--output-javascript-file", "Expected --output-javascript-file to be an argument")
contains(env, argv, "--purs-file", "Expected --purs-file to be an argument")
purs_bundle_action = find_action(env, actions, "PursBundle")
inputs = [input.basename for input in purs_bundle_action.inputs.to_list()]
asserts.equals(env, 2, len(inputs))
contains(env, inputs, "purs", "Expected purs to be an input")
contains(env, inputs, "index.js", "Expected index.js to be an input")
outputs = [output.basename for output in purs_bundle_action.outputs.to_list()]
asserts.equals(env, 1, len(outputs))
contains(env, outputs, "purescript_binary_works_with_only_purescript_fake_target.js", "Expected purescript_binary_works_with_only_purescript_fake_target.js to be an output")
argv = purs_bundle_action.argv
contains(env, argv, "--main", "Expected --main to be an argument")
contains(env, argv, "--module", "Expected --module to be an argument")
contains(env, argv, "--output", "Expected --output to be an argument")
return analysistest.end(env)
_purescript_binary_works_with_only_purescript_test = analysistest.make(
_purescript_binary_works_with_only_purescript_implementation_test,
)
def _purescript_binary_works_with_purescript_and_ffi_implementation_test(ctx):
"""
Test to verify that both compiled PureScript and FFI files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_module_action = find_action(env, actions, "PursCompileModule")
inputs = [input.basename for input in purs_compile_module_action.inputs.to_list()]
asserts.equals(env, 3, len(inputs))
contains(env, inputs, "purs-compile-module", "Expected purs-compile-module to be an input")
contains(env, inputs, "PureScriptAndFFI.js", "Expected PureScriptAndFFI.js to be an input")
contains(env, inputs, "PureScriptAndFFI.purs", "Expected PureScriptAndFFI.purs to be an input")
outputs = [output.basename for output in purs_compile_module_action.outputs.to_list()]
asserts.equals(env, 2, len(outputs))
contains(env, outputs, "foreign.js", "Expected foreign.js to be an output")
contains(env, outputs, "index.js", "Expected index.js to be an output")
argv = purs_compile_module_action.argv
contains(env, argv, "--input-ffi-file", "Expected --input-ffi-file to be an argument")
contains(env, argv, "--output-ffi-file", "Expected --output-ffi-file to be an argument")
contains(env, argv, "--output-javascript-file", "Expected --output-javascript-file to be an argument")
contains(env, argv, "--purs-file", "Expected --purs-file to be an argument")
purs_bundle_action = find_action(env, actions, "PursBundle")
inputs = [input.basename for input in purs_bundle_action.inputs.to_list()]
asserts.equals(env, 3, len(inputs))
contains(env, inputs, "purs", "Expected purs to be an input")
contains(env, inputs, "foreign.js", "Expected foreign.js to be an input")
contains(env, inputs, "index.js", "Expected index.js to be an input")
outputs = [output.basename for output in purs_bundle_action.outputs.to_list()]
asserts.equals(env, 1, len(outputs))
contains(env, outputs, "purescript_binary_works_with_purescript_and_ffi_fake_target.js", "Expected purescript_binary_works_with_purescript_and_ffi_fake_target.js to be an output")
argv = purs_bundle_action.argv
contains(env, argv, "--main", "Expected --main to be an argument")
contains(env, argv, "--module", "Expected --module to be an argument")
contains(env, argv, "--output", "Expected --output to be an argument")
return analysistest.end(env)
_purescript_binary_works_with_purescript_and_ffi_test = analysistest.make(
_purescript_binary_works_with_purescript_and_ffi_implementation_test,
)
def _purescript_binary_works_with_dependencies_implementation_test(ctx):
"""
Test to verify that compiled PureScript files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_module_action = find_action(env, actions, "PursCompileModule")
inputs = [input.basename for input in purs_compile_module_action.inputs.to_list()]
asserts.equals(env, 3, len(inputs))
contains(env, inputs, "purs-compile-module", "Expected purs-compile-module to be an input")
contains(env, inputs, "Foo.purs", "Expected Foo.purs to be an input")
contains(env, inputs, "signature-externs.cbor", "Expected signature-externs.cbor to be an input")
outputs = [output.basename for output in purs_compile_module_action.outputs.to_list()]
asserts.equals(env, 1, len(outputs))
contains(env, outputs, "index.js", "Expected index.js to be an output")
argv = purs_compile_module_action.argv
contains(env, argv, "--output-javascript-file", "Expected --output-javascript-file to be an argument")
contains(env, argv, "--purs-file", "Expected --purs-file to be an argument")
purs_bundle_action = find_action(env, actions, "PursBundle")
inputs = []
for input in purs_bundle_action.inputs.to_list():
inputs.append(paths.join(paths.basename(input.dirname), input.basename))
asserts.equals(env, 3, len(inputs))
# The repository can change depending on where the tests are run.
# Only check the binary name.
contains(env, [input.basename for input in purs_compile_module_action.inputs.to_list()], "purs-compile-module", "Expected purs-compile-module to be an input")
contains(env, inputs, "Foo/index.js", "Expected Foo/index.js to be an input")
contains(env, inputs, "Bar/index.js", "Expected Bar/index.js to be an input")
outputs = [output.basename for output in purs_bundle_action.outputs.to_list()]
asserts.equals(env, 1, len(outputs))
contains(env, outputs, "purescript_binary_works_with_dependencies_foo_fake_target.js", "Expected purescript_binary_works_with_dependencies_foo_fake_target.js to be an output")
argv = purs_bundle_action.argv
contains(env, argv, "--main", "Expected --main to be an argument")
contains(env, argv, "--module", "Expected --module to be an argument")
contains(env, argv, "--output", "Expected --output to be an argument")
return analysistest.end(env)
_purescript_binary_works_with_dependencies_test = analysistest.make(
_purescript_binary_works_with_dependencies_implementation_test,
)
def _purescript_library_works_with_only_purescript_implementation_test(ctx):
"""
Test to verify that compiled PureScript files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_module_action = find_action(env, actions, "PursCompileModule")
inputs = [input.basename for input in purs_compile_module_action.inputs.to_list()]
asserts.equals(env, 2, len(inputs))
contains(env, inputs, "purs-compile-module", "Expected purs-compile-module to be an input")
contains(env, inputs, "PureScriptOnly.purs", "Expected PureScriptOnly.purs to be an input")
outputs = [output.basename for output in purs_compile_module_action.outputs.to_list()]
asserts.equals(env, 3, len(outputs))
contains(env, outputs, "index.js", "Expected index.js to be an output")
contains(env, outputs, "signature-externs.cbor", "Expected signature-externs.cbor to be an output")
contains(env, outputs, "standard-externs.cbor", "Expected standard-externs.cbor to be an output")
argv = purs_compile_module_action.argv
contains(env, argv, "--output-javascript-file", "Expected --output-javascript-file to be an argument")
contains(env, argv, "--output-signature-externs-file", "Expected --output-signature-externs-file to be an argument")
contains(env, argv, "--output-standard-externs-file", "Expected --output-standard-externs-file to be an argument")
contains(env, argv, "--purs-file", "Expected --purs-file to be an argument")
return analysistest.end(env)
_purescript_library_works_with_only_purescript_test = analysistest.make(
_purescript_library_works_with_only_purescript_implementation_test,
)
def _purescript_library_works_with_purescript_and_ffi_implementation_test(ctx):
"""
Test to verify that both compiled PureScript and FFI files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_module_action = find_action(env, actions, "PursCompileModule")
inputs = [input.basename for input in purs_compile_module_action.inputs.to_list()]
asserts.equals(env, 3, len(inputs))
contains(env, inputs, "purs-compile-module", "Expected purs-compile-module to be an input")
contains(env, inputs, "PureScriptAndFFI.js", "Expected PureScriptAndFFI.js to be an input")
contains(env, inputs, "PureScriptAndFFI.purs", "Expected PureScriptAndFFI.purs to be an input")
outputs = [output.basename for output in purs_compile_module_action.outputs.to_list()]
asserts.equals(env, 4, len(outputs))
contains(env, outputs, "foreign.js", "Expected foreign.js to be an output")
contains(env, outputs, "index.js", "Expected index.js to be an output")
contains(env, outputs, "signature-externs.cbor", "Expected signature-externs.cbor to be an output")
contains(env, outputs, "standard-externs.cbor", "Expected standard-externs.cbor to be an output")
argv = purs_compile_module_action.argv
contains(env, argv, "--input-ffi-file", "Expected --input-ffi-file to be an argument")
contains(env, argv, "--output-ffi-file", "Expected --output-ffi-file to be an argument")
contains(env, argv, "--output-javascript-file", "Expected --output-javascript-file to be an argument")
contains(env, argv, "--output-signature-externs-file", "Expected --output-signature-externs-file to be an argument")
contains(env, argv, "--output-standard-externs-file", "Expected --output-standard-externs-file to be an argument")
contains(env, argv, "--purs-file", "Expected --purs-file to be an argument")
return analysistest.end(env)
_purescript_library_works_with_purescript_and_ffi_test = analysistest.make(
_purescript_library_works_with_purescript_and_ffi_implementation_test,
)
def _purescript_library_works_with_dependencies_implementation_test(ctx):
"""
Test to verify that compiled PureScript files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_module_action = find_action(env, actions, "PursCompileModule")
inputs = [input.basename for input in purs_compile_module_action.inputs.to_list()]
asserts.equals(env, 3, len(inputs))
contains(env, inputs, "purs-compile-module", "Expected purs-compile-module to be an input")
contains(env, inputs, "Foo.purs", "Expected Foo.purs to be an input")
contains(env, inputs, "signature-externs.cbor", "Expected signature-externs.cbor to be an input")
outputs = [output.basename for output in purs_compile_module_action.outputs.to_list()]
asserts.equals(env, 3, len(outputs))
contains(env, outputs, "index.js", "Expected index.js to be an output")
contains(env, outputs, "signature-externs.cbor", "Expected signature-externs.cbor to be an output")
contains(env, outputs, "standard-externs.cbor", "Expected standard-externs.cbor to be an output")
argv = purs_compile_module_action.argv
contains(env, argv, "--output-javascript-file", "Expected --output-javascript-file to be an argument")
contains(env, argv, "--output-signature-externs-file", "Expected --output-signature-externs-file to be an argument")
contains(env, argv, "--output-standard-externs-file", "Expected --output-standard-externs-file to be an argument")
contains(env, argv, "--purs-file", "Expected --purs-file to be an argument")
return analysistest.end(env)
_purescript_library_works_with_dependencies_test = analysistest.make(
_purescript_library_works_with_dependencies_implementation_test,
)
def _purescript_package_works_with_only_purescript_implementation_test(ctx):
"""
Test to verify that compiled PureScript files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_action = find_action(env, actions, "PursCompile")
inputs = [input.basename for input in purs_compile_action.inputs.to_list()]
asserts.equals(env, 2, len(inputs))
contains(env, inputs, "purs-compile", "Expected purs-compile to be an input")
contains(env, inputs, "PureScriptOnly.purs", "Expected PureScriptOnly.purs to be an input")
outputs = [output.basename for output in purs_compile_action.outputs.to_list()]
asserts.equals(env, 1, len(outputs))
contains(env, outputs, "output-purescript_package_works_with_only_purescript_fake_target", "Expected output-purescript_package_works_with_only_purescript_fake_target to be an output")
argv = purs_compile_action.argv
contains(env, argv, "--output", "Expected --output to be an argument")
return analysistest.end(env)
_purescript_package_works_with_only_purescript_test = analysistest.make(
_purescript_package_works_with_only_purescript_implementation_test,
)
def _purescript_package_works_with_purescript_and_ffi_implementation_test(ctx):
"""
Test to verify that both compiled PureScript and FFI files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_action = find_action(env, actions, "PursCompile")
inputs = [input.basename for input in purs_compile_action.inputs.to_list()]
asserts.equals(env, 3, len(inputs))
contains(env, inputs, "purs-compile", "Expected purs-compile to be an input")
contains(env, inputs, "PureScriptAndFFI.js", "Expected PureScriptAndFFI.js to be an input")
contains(env, inputs, "PureScriptAndFFI.purs", "Expected PureScriptAndFFI.purs to be an input")
outputs = [output.basename for output in purs_compile_action.outputs.to_list()]
asserts.equals(env, 1, len(outputs))
contains(env, outputs, "output-purescript_package_works_with_purescript_and_ffi_fake_target", "Expected output-purescript_package_works_with_purescript_and_ffi_fake_target to be an output")
argv = purs_compile_action.argv
contains(env, argv, "--output", "Expected --output to be an argument")
return analysistest.end(env)
_purescript_package_works_with_purescript_and_ffi_test = analysistest.make(
_purescript_package_works_with_purescript_and_ffi_implementation_test,
)
def _purescript_package_works_with_dependencies_implementation_test(ctx):
"""
Test to verify that compiled PureScript files generate the correct actions.
"""
env = analysistest.begin(ctx)
actions = analysistest.target_actions(env)
purs_compile_action = find_action(env, actions, "PursCompile")
inputs = [input.basename for input in purs_compile_action.inputs.to_list()]
asserts.equals(env, 3, len(inputs))
contains(env, inputs, "purs-compile", "Expected purs-compile to be an input")
contains(env, inputs, "Foo.purs", "Expected Foo.purs to be an input")
contains(env, inputs, "output-purescript_package_works_with_dependencies_bar_fake_target", "Expected output-purescript_package_works_with_dependencies_bar_fake_target to be an input")
outputs = [output.basename for output in purs_compile_action.outputs.to_list()]
asserts.equals(env, 1, len(outputs))
contains(env, outputs, "output-purescript_package_works_with_dependencies_foo_fake_target", "Expected output-purescript_package_works_with_dependencies_foo_fake_target to be an output")
argv = purs_compile_action.argv
contains(env, argv, "--output", "Expected --output to be an argument")
contains(env, argv, "--include", "Expected --include to be an argument")
return analysistest.end(env)
_purescript_package_works_with_dependencies_test = analysistest.make(
_purescript_package_works_with_dependencies_implementation_test,
)
def purescript_binary_tests_suite(name):
"""
A suite of tests around purescript_binary.
Args:
name: A unique name for this target.
"""
_purescript_binary_works_with_only_purescript_test(
name = "purescript_binary_works_with_only_purescript_test",
target_under_test = ":purescript_binary_works_with_only_purescript_fake_target",
)
purescript_binary(
name = "purescript_binary_works_with_only_purescript_fake_target",
module = "PureScriptOnly",
src = "PureScriptOnly.purs",
tags = [
"manual",
],
)
_purescript_binary_works_with_purescript_and_ffi_test(
name = "purescript_binary_works_with_purescript_and_ffi_test",
target_under_test = ":purescript_binary_works_with_purescript_and_ffi_fake_target",
)
purescript_binary(
name = "purescript_binary_works_with_purescript_and_ffi_fake_target",
ffi = "PureScriptAndFFI.js",
module = "PureScriptAndFFI",
src = "PureScriptAndFFI.purs",
tags = [
"manual",
],
)
_purescript_binary_works_with_dependencies_test(
name = "purescript_binary_works_with_dependencies_test",
target_under_test = ":purescript_binary_works_with_dependencies_foo_fake_target",
)
purescript_binary(
name = "purescript_binary_works_with_dependencies_foo_fake_target",
module = "Foo",
src = "Foo.purs",
deps = [
":purescript_binary_works_with_dependencies_bar_fake_target",
],
tags = [
"manual",
],
)
purescript_library(
name = "purescript_binary_works_with_dependencies_bar_fake_target",
module = "Bar",
src = "Bar.purs",
tags = [
"manual",
],
)
def purescript_library_tests_suite(name):
"""
A suite of tests around purescript_library.
Args:
name: A unique name for this target.
"""
_purescript_library_works_with_only_purescript_test(
name = "purescript_library_works_with_only_purescript_test",
target_under_test = ":purescript_library_works_with_only_purescript_fake_target",
)
purescript_library(
name = "purescript_library_works_with_only_purescript_fake_target",
module = "PureScriptOnly",
src = "PureScriptOnly.purs",
tags = [
"manual",
],
)
_purescript_library_works_with_purescript_and_ffi_test(
name = "purescript_library_works_with_purescript_and_ffi_test",
target_under_test = ":purescript_library_works_with_purescript_and_ffi_fake_target",
)
purescript_library(
name = "purescript_library_works_with_purescript_and_ffi_fake_target",
ffi = "PureScriptAndFFI.js",
module = "PureScriptAndFFI",
src = "PureScriptAndFFI.purs",
tags = [
"manual",
],
)
_purescript_library_works_with_dependencies_test(
name = "purescript_library_works_with_dependencies_test",
target_under_test = ":purescript_library_works_with_dependencies_foo_fake_target",
)
purescript_library(
name = "purescript_library_works_with_dependencies_foo_fake_target",
module = "Foo",
src = "Foo.purs",
deps = [
":purescript_library_works_with_dependencies_bar_fake_target",
],
tags = [
"manual",
],
)
purescript_library(
name = "purescript_library_works_with_dependencies_bar_fake_target",
module = "Bar",
src = "Bar.purs",
tags = [
"manual",
],
)
def purescript_package_tests_suite(name):
"""
A suite of tests around purescript_package.
Args:
name: A unique name for this target.
"""
_purescript_package_works_with_only_purescript_test(
name = "purescript_package_works_with_only_purescript_test",
target_under_test = ":purescript_package_works_with_only_purescript_fake_target",
)
purescript_package(
name = "purescript_package_works_with_only_purescript_fake_target",
srcs = [
"PureScriptOnly.purs",
],
tags = [
"manual",
],
)
_purescript_package_works_with_purescript_and_ffi_test(
name = "purescript_package_works_with_purescript_and_ffi_test",
target_under_test = ":purescript_package_works_with_purescript_and_ffi_fake_target",
)
purescript_package(
name = "purescript_package_works_with_purescript_and_ffi_fake_target",
ffis = [
"PureScriptAndFFI.js",
],
srcs = [
"PureScriptAndFFI.purs",
],
tags = [
"manual",
],
)
_purescript_package_works_with_dependencies_test(
name = "purescript_package_works_with_dependencies_test",
target_under_test = ":purescript_package_works_with_dependencies_foo_fake_target",
)
purescript_package(
name = "purescript_package_works_with_dependencies_foo_fake_target",
srcs = [
"Foo.purs",
],
deps = [
":purescript_package_works_with_dependencies_bar_fake_target",
],
tags = [
"manual",
],
)
purescript_package(
name = "purescript_package_works_with_dependencies_bar_fake_target",
srcs = [
"Bar.purs",
],
tags = [
"manual",
],
)
``` |
{
"source": "joneshf/rules_haskell",
"score": 2
} |
#### File: rules_haskell/tools/repositories.bzl
```python
load("@rules_haskell//haskell:cabal.bzl", "stack_snapshot")
def rules_haskell_worker_dependencies(**stack_kwargs):
"""Provide all repositories that are necessary for `rules_haskell`'s tools to
function.
"""
excludes = native.existing_rules().keys()
if "rules_haskell_worker_dependencies" not in excludes:
stack_snapshot(
name = "rules_haskell_worker_dependencies",
packages = [
"base",
"bytestring",
"filepath",
"ghc",
"ghc-paths",
"microlens",
"process",
"proto-lens",
"proto-lens-runtime",
"text",
"vector",
],
snapshot = "lts-14.1",
**stack_kwargs
)
``` |
{
"source": "joneshf/rules_purescript",
"score": 2
} |
#### File: rules_purescript/purescript/purescript.bzl
```python
run_template = """
#!/usr/bin/env bash
set -o errexit
node -e "require('./{target_path}/{entry_module}/index.js').{entry_function}({entry_params})"
"""
compile_trans_template = "cp -R {path}/* {output}"
def _purescript_compile(ctx):
srcs = ctx.files.srcs + ctx.files.deps
target = ctx.actions.declare_file(ctx.outputs.target.basename)
purs = ctx.executable.purs
flags = " ".join(ctx.attr.compiler_flags)
cmd = "\n".join(
[ "set -o errexit"
, """mkdir "$2" """
, """ "$1" compile """ + flags + """ --output "$2" "${@:3}" """
]
)
ctx.actions.run_shell(
inputs = srcs + [purs],
outputs = [target],
command = cmd,
arguments = [purs.path, target.path] +
[src.path for src in srcs if src.extension == "purs"],
)
return target
def _purescript_tar(ctx):
target = _purescript_compile(ctx)
tar = ctx.actions.declare_file(ctx.outputs.tar.basename)
ctx.actions.run_shell(
inputs = [target],
outputs = [tar],
command = """
set -o errexit
tar --create --file "$1" --directory "$2" .
""",
arguments = [tar.path, target.path],
)
def _purescript_app(ctx):
target = _purescript_compile(ctx)
entry_params = ",".join([
'\\"{entry}\\"'.format(entry=e) for e in ctx.attr.entry_parameters
])
script = ctx.actions.declare_file(ctx.label.name)
script_content = run_template.format(
target_path = target.short_path,
entry_module = getattr(ctx.attr, "entry_module"),
entry_function = getattr(ctx.attr, "entry_function"),
entry_params = entry_params,
)
ctx.actions.write(script, script_content, is_executable = True)
runfiles = ctx.runfiles(files = [target])
return [DefaultInfo(executable = script, runfiles = runfiles)]
purescript_app = rule(
implementation = _purescript_app,
attrs = {
"srcs": attr.label_list(
allow_files = True,
),
"deps": attr.label_list(
default = [],
),
"purs": attr.label(
allow_single_file = True,
executable = True,
cfg = "host",
default = "@purs",
),
"compiler_flags": attr.string_list(
default = []
),
"entry_module": attr.string(
default = "Main",
),
"entry_function": attr.string(
default = "main",
),
"entry_parameters": attr.string_list(
default = [],
),
},
outputs = {
"target": "target",
},
executable = True,
)
def _purescript_lib(ctx):
_purescript_compile(ctx)
purescript_lib = rule(
implementation = _purescript_lib,
attrs = {
"srcs": attr.label_list(
allow_files = True,
),
"deps": attr.label_list(
default = [],
),
"purs": attr.label(
allow_single_file = True,
executable = True,
cfg = "host",
default = "@purs",
),
"compiler_flags": attr.string_list(
default = []
),
},
outputs = {
#"tar": "%{name}.tar",
"target": "target",
},
)
test_template = """
err=0
node -e "require('./{target_path}/{test_file}/index.js').{entry_function}()" || err=1
echo
"""
def _run_test(target_path, entry_module, entry_function):
return test_template.format(
target_path = target_path,
test_file = entry_module,
entry_function = entry_function,
)
def _purescript_test(ctx):
target = _purescript_compile(ctx)
script = "\n".join(
["""
#!/usr/bin/env bash
err=0
""" , _run_test(target.short_path, ctx.attr.main_module, ctx.attr.main_function)
, "exit $err"
],
)
ctx.actions.write(
output = ctx.outputs.executable,
content = script,
)
runfiles = ctx.runfiles(files = [target])
return [DefaultInfo(runfiles = runfiles)]
purescript_test = rule(
implementation = _purescript_test,
attrs = {
"srcs": attr.label_list(allow_files = True),
"deps": attr.label_list(),
"main_module": attr.string(
default = "Test.Main",
),
"main_function": attr.string(
default = "main",
),
"purs": attr.label(
allow_single_file = True,
executable = True,
cfg = "host",
default = "@purs",
),
"compiler_flags": attr.string_list(
default = []
),
},
outputs = {
"target": "test-target",
},
test = True,
)
_default_purs_pkg_url = \
"https://github.com/purescript/purescript/releases/download/v0.12.0/linux64.tar.gz"
_default_purs_pkg_sha256 = \
"ccd777d9350c2e238d5be26419d3f54e2a335940b82c0baed040698c7cb1c7f1"
_default_purs_pkg_strip_prefix = \
"purescript"
def purescript_toolchain(url=_default_purs_pkg_url, sha256=_default_purs_pkg_sha256, strip_prefix=_default_purs_pkg_strip_prefix):
native.new_http_archive(
name = "purs",
urls = [url],
sha256 = sha256,
strip_prefix = strip_prefix,
build_file_content = """exports_files(["purs"])""",
)
_purescript_dep_build_content = """
filegroup(
name = "pkg",
srcs = glob(["src/**/*.purs", "src/**/*.js"]),
visibility = ["//visibility:public"],
)
"""
def purescript_dep(name, url, sha256, strip_prefix):
native.new_http_archive(
name = name,
urls = [url],
sha256 = sha256,
strip_prefix = strip_prefix,
build_file_content = _purescript_dep_build_content,
)
``` |
{
"source": "jonesholger/lbann",
"score": 2
} |
#### File: applications/ATOM/dataset.py
```python
import os
import numpy as np
import json
#@todo, get rid of json and pass all variable here
# the idea here is to use the same code with abritrary sets of data
with open(os.environ['DATA_CONFIG'], 'rb') as handle:
config = json.load(handle)
pad_index = config['pad_index']
max_seq_len = int(os.environ['MAX_SEQ_LEN'])
samples = np.load(os.environ['DATA_PATH'], allow_pickle=True)
# Sample access functions
def get_sample(index):
sample = samples[index]
if len(sample) < max_seq_len:
sample = np.concatenate((sample, np.full(max_seq_len-len(sample), pad_index)))
else:
sample = np.resize(sample, max_seq_len)
return sample
def num_samples():
return samples.shape[0]
def sample_dims():
return [max_seq_len]
```
#### File: ATOM/utils/build_sample_lists.py
```python
import sys
import random
def usage() :
print 'usage:', sys.argv[0], 'input_file output_file_base_name [random_seed]'
print '''
where: input_filename contains:
1st line: num_trainers
2nd line: data base directory
remaining lines: smiles_cvs_filename n_samples_total n_samples_per_trainer reuse
where:
'n_samples_total' is the number of samples (SMILES strings) in the csv file
'n_samples_per_trainer' is the number of samples from the cvs file to be used for each trainer
'reuse' is '1' or '0'; if '1' then the same set of samples (sample_ids) will be used for each trainer; else, the sets will be unique
'''
if len(sys.argv) < 3 :
usage()
exit(9)
if len(sys.argv) == 4 :
random.seed( int(sys.argv[3]) )
output_base = sys.argv[2]
a = open(sys.argv[1]).readlines()
idx = 0
while a[idx][0] == '#' :
idx += 1
t = a[idx].split()
idx += 1
num_trainers = int(t[0])
t = a[idx].split()
idx += 1
base_data_dir = t[0]
inputs = {}
for j in range(idx, len(a)) :
t = a[j].split()
#total samples, num_to_use, reuse
inputs[t[0]] = (int(t[1]), int(t[2]), int(t[3]))
# deal with subsets common to all sample lists
common = {}
for fn in inputs.keys() :
if inputs[fn][2] == 1 :
print 'using a common set of', inputs[fn][1], 'random indices from', fn, 'for all sample lists'
indices = set()
while len(indices) < inputs[fn][1] :
indices.add(random.randint(0, inputs[fn][0]-1))
common[fn] = indices
inputs[fn] = None
#total num samples in each sample list
num_samples = 0
for fn in common :
num_samples += len(common[fn])
for fn in inputs :
if inputs[fn] != None :
num_samples += inputs[fn][1]
print 'using a unique set of', inputs[fn][1], 'random indices from', fn, 'for each sample_list'
print '\nnum samples per sample_list:', num_samples
# Generate the lists
used = {}
for fn in inputs :
used[fn] = set()
for k in range(num_trainers) :
fn = output_base + '_' + str(k) + '.txt'
print 'writing:', fn
out = open(fn, 'w')
out.write('CONDUIT_HDF5_INCLUSION\n')
out.write(str(num_samples) + ' 0 ' + str(len(inputs)) + '\n')
out.write('/p/gpfs1/brainusr/datasets/atom/combo_enamine1613M_mpro_inhib\n')
for fn in inputs.keys() :
if inputs[fn] == None :
out.write(fn + ' ' + str(len(common[fn])) + ' 0')
for x in common[fn] :
out.write(' ' + str(x))
out.write('\n')
else :
num_to_use = inputs[fn][1]
total = inputs[fn][0]
out.write(fn + ' ' + str(num_to_use) + ' 0')
useme = set()
print 'selecting', num_to_use, 'random indices from', total, 'indices'
while len(useme) < num_to_use :
r = random.randint(0, total-1)
if r not in used[fn] :
useme.add(r)
used[fn].add(r)
for id in useme :
out.write(' ' + str(id))
out.write('\n')
out.close()
```
#### File: ATOM/utils/gather_atom_metrics.py
```python
import sys
import numpy as np
import re
#tag = sys.argv[len(sys.argv)-1]
def summarize_metrics(trainer_metrics):
total_time = 0
total_train_time = 0
results = {}
partial_results = {}
total_train_times = []
total_train_times_not_first_epoch = []
# For each epoch, gather data from all trainers
for e, data in trainer_metrics.items():
train_times = []
test_recons = []
test_times = []
train_mb_times = []
# train_recon = []
num_trainers = len(data)
# For each trainer in the epoch
for k, v in data.items():
# Make sure that it has at least pairs of keys to ensure that we don't grab
# partial results
if 'train_time' in v and 'train_mb_time' in v:
train_times.append(float(v['train_time']))
train_mb_times.append(float(v['train_mb_time']))
if 'test_recon' in v and 'test_time' in v:
test_recons.append(float(v['test_recon']))
test_times.append(float(v['test_time']))
if num_trainers != len(train_times):
if np.array(train_times):
partial_mean_epoch_train_time = np.mean(np.array(train_times))
else:
partial_mean_epoch_train_time = 0
if np.array(test_times):
partial_mean_epoch_test_time = np.mean(np.array(test_times))
else:
partial_mean_epoch_test_time = 0
partial_total_time = (partial_mean_epoch_train_time + partial_mean_epoch_test_time)
partial_results[e] = { 'epoch' : e,
'total_time' : total_time + partial_total_time,
'total_train_time' : total_train_time,
'num_trainers': len(train_times)}
if np.array(train_times):
partial_results[e].update({ 'mean_train_time' : partial_mean_epoch_train_time,
'std_train_time' : np.std(np.array(train_times)),
'min_train_time' : np.amin(np.array(train_times)),
'max_train_time' : np.amax(np.array(train_times)),
'mean_train_mb_time' : np.mean(np.array(train_mb_times)),
'std_train_mb_time' : np.std(np.array(train_mb_times))})
if np.array(test_recons):
partial_results[e].update({ 'recon_min' : np.amin(np.array(test_recons)),
'recon_max' : np.amax(np.array(test_recons)),
'recon_mean' : np.mean(np.array(test_recons)),
'recon_std' : np.std(np.array(test_recons)),
'mean_test_time' : partial_mean_epoch_test_time,
'std_test_time' : np.std(np.array(test_times))})
# 'train_recon_min' : np.amin(np.array(train_recons)),
# 'train_recon_max' : np.amax(np.array(train_recons)),
# 'train_recon_mean' : np.mean(np.array(train_recons)),
# 'train_recon_std' : np.std(np.array(train_recons))
continue
else:
total_train_times.append(train_times)
if e != '0':
total_train_times_not_first_epoch.append(train_times)
if np.array(train_times):
mean_epoch_train_time = np.mean(np.array(train_times))
else:
mean_epoch_train_time = 0
if np.array(test_times):
mean_epoch_test_time = np.mean(np.array(test_times))
else:
mean_epoch_test_time = 0
total_time += (mean_epoch_train_time + mean_epoch_test_time)
total_train_time += mean_epoch_train_time
results[e] = { 'epoch' : e,
'total_time' : total_time,
'total_train_time' : total_train_time,
'num_trainers': len(train_times)}
# 'train_recon_min' : np.amin(np.array(train_recons)),
# 'train_recon_max' : np.amax(np.array(train_recons)),
# 'train_recon_mean' : np.mean(np.array(train_recons)),
# 'train_recon_std' : np.std(np.array(train_recons))
if np.array(train_times):
results[e].update({ 'mean_train_time' : mean_epoch_train_time,
'std_train_time' : np.std(np.array(train_times)),
'min_train_time' : np.amin(np.array(train_times)),
'max_train_time' : np.amax(np.array(train_times)),
'mean_train_mb_time' : np.mean(np.array(train_mb_times)),
'std_train_mb_time' : np.std(np.array(train_mb_times))})
if np.array(test_recons):
results[e].update({ 'recon_min' : np.amin(np.array(test_recons)),
'recon_max' : np.amax(np.array(test_recons)),
'recon_mean' : np.mean(np.array(test_recons)),
'recon_std' : np.std(np.array(test_recons)),
'mean_test_time' : mean_epoch_test_time,
'std_test_time' : np.std(np.array(test_times))})
return results, partial_results, total_train_times, total_train_times_not_first_epoch
def print_results(results, partial_results, total_train_times, total_train_times_not_first_epoch):
for e in sorted(results.keys()):
r = results[e]
msg = 'Epoch ' + r['epoch']
msg += ' {:7.1f}s'.format(r['total_time'])
msg += ' / {:7.1f}s'.format(r['total_train_time'])
if 'mean_train_time' in r and 'std_train_time' in r and 'min_train_time' in r and 'max_train_time' in r:
msg += ' training = {:6.2f}s +- {:3.2f} / min = {:6.3f} / max = {:6.3f}'.format(
r['mean_train_time'], r['std_train_time'], r['min_train_time'], r['max_train_time'])
if 'recon_min' in r and 'recon_max' in r and 'recon_mean' in r and 'recon_std' in r:
msg += ' :: reconstruction min = {:6.3f} / max = {:6.3f} / avg = {:6.3f} +- {:3.2f}'.format(
r['recon_min'], r['recon_max'], r['recon_mean'], r['recon_std'])
if 'mean_test_time' in r:
msg += ' :: test time = {:6.3f}s +- {:3.2f}'.format(r['mean_test_time'], r['std_test_time'])
msg += ' :: train MB time = {:5.3f}s +- {:3.2f}'.format(r['mean_train_mb_time'], r['std_train_mb_time'])
msg += ' :: ' + str(r['num_trainers']) + ' trainers'
# + ' :: train reconstruction min = {:6.3f} / max = {:6.3f} / avg = {:6.3f} +- {:3.2f}'.
# format(r['train_recon_min'], r['train_recon_max'], r['train_recon_mean'], r['train_recon_std']))
print(msg)
if len(total_train_times) != 0:
print("All epochs (including 0) epoch time : mean="
+ '{:7.2f}s'.format(np.mean(np.array(total_train_times)))
+ ' +- {:3.2f}'.format(np.std(np.array(total_train_times)))
+ ' min={:6.2f}s'.format(np.amin(np.array(total_train_times)))
+ ' max={:6.2f}s'.format(np.amax(np.array(total_train_times))))
if len(total_train_times_not_first_epoch) != 0:
print("All epochs (except 0) epoch time : mean="
+ '{:7.2f}s'.format(np.mean(np.array(total_train_times_not_first_epoch)))
+ ' +- {:3.2f}'.format(np.std(np.array(total_train_times_not_first_epoch)))
+ ' min={:6.2f}s'.format(np.amin(np.array(total_train_times_not_first_epoch)))
+ ' max={:6.2f}s'.format(np.amax(np.array(total_train_times_not_first_epoch))))
else:
print("WARNING: Training failed - No epochs completed")
print('--------------------------------------------------------------------------------')
print('Time to load data:')
for k,v in ds_times.items():
print('Loading {:12s}'.format(k) + ' data set with {:9d} samples'.format(v['samples']) + ' took {:6.2f}s'.format(v['load_time']))
print('Time to synchronize the trainers: {:12.6f}s'.format(sync_time))
for e in sorted(partial_results.keys()):
r = partial_results[e]
print('--------------------------------------------------------------------------------')
print('Results for epochs with only some trainers reporting')
print('Epoch ' + r['epoch']
+ ' {:7.1f}s'.format(r['total_time'])
+ ' training = {:6.2f}s +- {:3.2f} / min = {:6.3f} / max = {:6.3f}'.format(
r['mean_train_time'], r['std_train_time'], r['min_train_time'], r['max_train_time'])
+ ' :: reconstruction min = {:6.3f} / max = {:6.3f} / avg = {:6.3f} +- {:3.2f}'.format(
r['recon_min'], r['recon_max'], r['recon_mean'], r['recon_std'])
+ ' :: test time = {:6.3f}s +- {:3.2f}'.format(r['mean_test_time'], r['std_test_time'])
+ ' :: train MB time = {:5.3f}s +- {:3.2f}'.format(r['mean_train_mb_time'], r['std_train_mb_time'])
+ ' :: ' + str(r['num_trainers']) + ' trainers')
# + ' :: train reconstruction min = {:6.3f} / max = {:6.3f} / avg = {:6.3f} +- {:3.2f}'.
# format(r['train_recon_min'], r['train_recon_max'], r['train_recon_mean'], r['train_recon_std']))
#for each log file
for num in range(len(sys.argv)-1):
inp = sys.argv[num+1]
print('################################################################################')
print("File#", num , " ", inp)
print('################################################################################')
run_stats = dict()
trainer_metrics = dict()
current_epoch = {} # Dict for each trainer to track the current epoch
ds_times = {}
active_ds_mode = ''
sync_time = 0
# Patterns for key metrics
p_trainers = re.compile('\s+Trainers\s+: ([0-9.]+)')
p_ppt = re.compile('\s+Processes per trainer\s+: ([0-9.]+)')
p_ppn = re.compile('\s+Processes on node\s+: ([0-9.]+)')
p_procs = re.compile('\s+Total number of processes\s+: ([0-9.]+)')
p_omp = re.compile('\s+OpenMP threads per process\s+: ([0-9.]+)')
p_mb = re.compile('\s+mini_batch_size:\s+([0-9.]+)')
# Patterns for key metrics
p_train_time = re.compile('\w+\s+\(instance ([0-9]*)\) training epoch ([0-9]*) run time : ([0-9.]+)')
p_test_time = re.compile('\w+\s+\(instance ([0-9]*)\) test run time : ([0-9.]+)')
p_test_recon = re.compile('\w+\s+\(instance ([0-9]*)\) test recon : ([0-9.]+)')
# Patterns for secondary metrics
p_train_mb_time = re.compile('\w+\s+\(instance ([0-9]*)\) training epoch ([0-9]*) mini-batch time statistics : ([0-9.]+)s mean')
# p_train_recon = re.compile('\w+\s+\(instance ([0-9]*)\) training epoch ([0-9]*) recon : ([0-9.]+)')
# Capture the time required to load the data
p_preload_data_store_mode = re.compile('starting do_preload_data_store.*num indices:\s+([0-9,]+) for role: (\w+)')
p_preload_data_store_time = re.compile('\s+do_preload_data_store time:\s+([0-9.]+)')
# Find the line with time to synchronize trainers
p_sync_time = re.compile('synchronizing trainers... ([0-9.]+)s')
with open(inp) as ifile1:
for line in ifile1:
m_trainers = p_trainers.match(line)
if (m_trainers):
run_stats['num_trainers'] = m_trainers.group(1)
m_ppt = p_ppt.match(line)
if (m_ppt):
run_stats['procs_per_trainer'] = m_ppt.group(1)
m_ppn = p_ppn.match(line)
if (m_ppn):
run_stats['procs_per_node'] = m_ppn.group(1)
m_procs = p_procs.match(line)
if (m_procs):
run_stats['num_processes'] = m_procs.group(1)
m_omp = p_omp.match(line)
if (m_omp):
run_stats['num_omp_threads'] = m_omp.group(1)
m_mb = p_mb.match(line)
if (m_mb):
run_stats['minibatch_size'] = m_mb.group(1)
m_time = p_train_time.match(line)
if (m_time):
tid = m_time.group(1)
e = m_time.group(2)
current_epoch[tid] = e # track the current epoch for each trainer
t = m_time.group(3)
if not trainer_metrics :
trainer_metrics = { e : { tid : { 'train_time' : t } } }
else:
if e in trainer_metrics :
if tid in trainer_metrics[e]:
trainer_metrics[e][tid]['train_time'] = t
else:
trainer_metrics[e][tid] = { 'train_time' : t }
else:
trainer_metrics[e] = { tid : { 'train_time' : t } }
m_test_recon = p_test_recon.match(line)
if (m_test_recon):
tid = m_test_recon.group(1)
e = current_epoch[tid]
r = m_test_recon.group(2)
if not 'test_recon' in trainer_metrics[e][tid].keys():
trainer_metrics[e][tid]['test_recon'] = r
else:
print('@epoch ' + e
+ ' - duplicate test reconstruction metric found - existing = '
+ trainer_metrics[e][tid]['test_recon']
+ ' discarding ' + r + ' (ran test twice???)')
m_test_time = p_test_time.match(line)
if (m_test_time):
tid = m_test_time.group(1)
e = current_epoch[tid]
r = m_test_time.group(2)
if not 'test_time' in trainer_metrics[e][tid].keys():
trainer_metrics[e][tid]['test_time'] = r
else:
print('@epoch ' + e
+ ' - duplicate test time found - existing = '
+ trainer_metrics[e][tid]['test_time']
+ ' discarding ' + r + ' (ran test twice???)')
m_train_mb_time = p_train_mb_time.match(line)
if (m_train_mb_time):
tid = m_train_mb_time.group(1)
e = current_epoch[tid]
if not e == m_train_mb_time.group(2):
assert('Epoch mismatch')
r = m_train_mb_time.group(3)
if not 'train_mb_time' in trainer_metrics[e][tid].keys():
trainer_metrics[e][tid]['train_mb_time'] = r
else:
print('@epoch ' + e
+ ' - duplicate train mb time found - existing = '
+ trainer_metrics[e][tid]['train_mb_time']
+ ' discarding ' + r + ' (abort)')
exit(-1)
m_ds_mode = p_preload_data_store_mode.match(line)
if (m_ds_mode):
active_mode = m_ds_mode.group(2)
samples = int(m_ds_mode.group(1).replace(',', ''))
ds_times[active_mode] = {'samples' : samples }
m_ds_time = p_preload_data_store_time.match(line)
if (m_ds_time):
time = float(m_ds_time.group(1))
ds_times[active_mode]['load_time'] = time
m_sync_time = p_sync_time.match(line)
if (m_sync_time):
sync_time = float(m_sync_time.group(1))
# m_train_recon = p_train_recon.match(line)
# if (m_train_recon):
# tid = m_train_recon.group(1)
# e = current_epoch[tid]
# if not e == m_train_recon.group(2):
# assert('Epoch mismatch')
# r = m_train_recon.group(3)
# trainer_metrics[e][tid]['train_recon'] = r
print(f"Trainers : {run_stats['num_trainers']}")
print(f"Procs per trainer : {run_stats['procs_per_trainer']}")
print(f"Procs per node : {run_stats['procs_per_node']}")
print(f"Total num. Processes : {run_stats['num_processes']}")
print(f"Num. OpenMP Threads : {run_stats['num_omp_threads']}")
print(f"Mini-batch Size : {run_stats['minibatch_size']}")
results, partial_results, total_train_times, total_train_times_not_first_epoch = summarize_metrics(trainer_metrics)
print_results(results, partial_results, total_train_times, total_train_times_not_first_epoch)
ifile1.close()
#table = pd.DataFrame(results)
#table = pd.DataFrame(all_metrics)
#met_file = "gb_metrics" +str(datetime.date.today())+'.csv'
#print("Saving computed metrics to ", met_file)
#table.to_csv(met_file, index=False)
```
#### File: communityGAN/model/gan.py
```python
import lbann
import model.generator
import model.discriminator
from util import str_list
class CommunityGAN(lbann.modules.Module):
def __init__(
self,
num_vertices,
motif_size,
embed_dim,
learn_rate,
):
super().__init__()
self.num_vertices = num_vertices
self.embed_dim = embed_dim
self.learn_rate = learn_rate
# Construct generator and discriminator
self.generator = model.generator.Generator(
num_vertices,
embed_dim,
learn_rate,
)
self.discriminator = model.discriminator.Discriminator(
num_vertices,
motif_size,
embed_dim,
learn_rate,
)
def forward(
self,
motif_indices,
motif_size,
walk_indices,
walk_length,
):
# Apply generator
fake_motif_indices, gen_prob, gen_log_prob = self.generator(
walk_length,
walk_indices,
motif_size,
)
# Get discriminator embeddings in log-space
all_motif_indices = lbann.Concatenation(motif_indices, fake_motif_indices)
all_motif_log_embeddings = self.discriminator.get_log_embeddings(all_motif_indices)
all_motif_log_embeddings = lbann.Slice(
all_motif_log_embeddings,
slice_points=str_list([0, motif_size, 2*motif_size]),
)
real_motif_log_embeddings = lbann.Identity(all_motif_log_embeddings)
fake_motif_log_embeddings = lbann.Identity(all_motif_log_embeddings)
# Apply discriminator
real_disc_prob, real_disc_log_not_prob \
= self.discriminator(motif_size, real_motif_log_embeddings)
fake_disc_prob, fake_disc_log_not_prob \
= self.discriminator(motif_size, fake_motif_log_embeddings)
# Loss function
# L_disc = - log(D(real)) - log(1-D(fake))
# L_gen = - log(G) * stop_gradient(log(1-D(fake)))
real_disc_log_prob \
= lbann.Log(lbann.Clamp(real_disc_prob, min=1e-37, max=1))
disc_loss = lbann.WeightedSum(
real_disc_log_prob,
fake_disc_log_not_prob,
scaling_factors=str_list([-1,-1]),
)
gen_loss = lbann.Multiply(
gen_log_prob,
lbann.StopGradient(fake_disc_log_not_prob),
)
loss = lbann.Add(disc_loss, gen_loss)
return loss, real_disc_prob, fake_disc_prob, gen_prob
```
#### File: data/Synthetic/Synthetic_Dense_Edge.py
```python
import numpy as np
import pickle
class Synthetic_Dense_Edge(object):
"""docstring for Synthetic_Dense_Edge"""
def __init__(self,
num_samples,
num_nodes,
node_features,
edge_features,
use_cached=True,
cache_data=True,
cached_file=None):
super(Synthetic_Dense_Edge, self).__init__()
self.num_samples = num_samples
self.num_nodes = num_nodes
self.node_features = node_features
self.edge_features = edge_features
self.num_edges = num_nodes **2
self.cache_data = cache_data
node_features_size = self.num_nodes * self.node_features
node_tensor_size = self.num_edges * self.node_features
edge_tensor_size = self.num_edges * self.edge_features
adj_tensor_sie = self.num_nodes * self.num_nodes
target_size = 1
self.sample_dim = node_features_size + node_tensor_size + edge_tensor_size + \
adj_tensor_sie + target_size
self.dataset = None
if (use_cached):
print("Using cached data")
if (cached_file):
self.dataset = np.load(cached_file)
else:
file_string = "/p/vast1/zaman2/synth_dense_graphs_{}_{}_{}_{}.p".format(num_samples,
num_nodes,
node_features,
edge_features)
try:
with open(file_string, 'rb') as f:
self.dataset = pickle.load(f)
except IOError:
print("File not found. Generating dataset")
self.generate_data()
else:
self.generate_data()
def generate_data(self):
self.dataset = np.random.random((self.num_samples, self.sample_dim))
_file_string = "/p/vast1/zaman2/synth_dense_graphs_{}_{}_{}_{}.p".format(self.num_samples,
self.num_nodes,
self.node_features,
self.edge_features)
with open(_file_string, 'wb') as f:
pickle.dump(self.dataset, f, protocol=pickle.HIGHEST_PROTOCOL)
def get_sample(self, i):
return self.dataset[i]
number_samples = 10000
number_nodes = 100
number_node_features = 10
number_edge_features = 1
dataset = Synthetic_Dense_Edge(number_samples,
number_nodes,
number_node_features,
number_edge_features)
def get_sample_func(index):
_data = dataset.get_sample(index)
_data = np.float32(_data)
return _data
def num_samples_func():
return number_samples
def sample_dims_func():
return (dataset.sample_dim,)
if __name__ == '__main__':
print(dataset.sample_dim)
```
#### File: data/Synthetic/Synthetic_Sparse_Edge.py
```python
import numpy as np
import os.path as osp
import configparser
import os
cur_dir = osp.dirname(osp.realpath(__file__))
data_dir = osp.dirname(cur_dir)
config_dir = osp.dirname(data_dir)
config = configparser.ConfigParser()
_file_name = os.environ['SYNTH_TEST_CONFIG_FILE']
conf_file = osp.join(config_dir, _file_name)
print("Initializing using: ", conf_file)
config.read(conf_file)
num_nodes = int(config['Graph']['num_nodes'])
max_edges = int(config['Graph']['num_edges'])
number_samples = 10000
number_node_features = 9
number_edge_features = 3
def sample_dims_func():
node_feature_size = num_nodes * number_node_features
edge_indices_size = max_edges * 2
edge_features_size = max_edges * number_edge_features
return (node_feature_size + edge_indices_size + edge_features_size + num_nodes + 1,)
dataset = np.random.randint(2, size=(number_samples, sample_dims_func()[0]))
def get_sample_func(index):
_data = np.float32(dataset[index])
return _data
def num_samples_func():
return number_samples
if __name__ == '__main__':
print(num_samples_func())
print(sample_dims_func())
print(get_sample_func(0).shape)
```
#### File: motif/model/autoencoder.py
```python
import lbann
import lbann.modules
class FullyConnectedAutoencoder(lbann.modules.Module):
"""Multilayer perceptron autoencoder."""
global_count = 0 # Static counter, used for default names
def __init__(
self,
data_dim,
latent_dim,
encoder_hidden_dims=[],
decoder_hidden_dims=[],
activation=lbann.Relu,
data_layout='data_parallel',
name=None,
):
super().__init__()
FullyConnectedAutoencoder.global_count += 1
# Module name
self.name = name
if not self.name:
self.name = f'fcautoencoder{FullyConnectedAutoencoder.global_count}'
# Encoder
self.encoder = []
for i, dim in enumerate(encoder_hidden_dims):
self.encoder.append(
lbann.modules.FullyConnectedModule(
size=dim,
bias=False,
activation=activation,
name=f'{self.name}_encoder{i}',
data_layout=data_layout,
)
)
self.encoder.append(
lbann.modules.FullyConnectedModule(
size=latent_dim,
bias=False,
activation=activation,
name=f'{self.name}_encoder{len(self.encoder)}',
data_layout=data_layout,
)
)
# Decoder
self.decoder = []
for i, dim in enumerate(decoder_hidden_dims):
self.decoder.append(
lbann.modules.FullyConnectedModule(
size=dim,
bias=False,
activation=activation,
name=f'{self.name}_decoder{i}',
data_layout=data_layout,
)
)
self.decoder.append(
lbann.modules.FullyConnectedModule(
size=data_dim,
bias=False,
activation=activation,
name=f'{self.name}_decoder{len(self.decoder)}',
data_layout=data_layout,
)
)
def forward(self, x):
for l in self.encoder:
x = l(x)
for l in self.decoder:
x = l(x)
return x
```
#### File: node2vec/data/data_readers.py
```python
import os.path
import lbann
def make_online_data_reader(
graph_file,
epoch_size,
walk_length=80,
return_param=0.25,
inout_param=0.25,
num_negative_samples=5,
):
reader = lbann.reader_pb2.DataReader()
_reader = reader.reader.add()
_reader.name = 'node2vec'
_reader.role = 'train'
_reader.shuffle = True
_reader.percent_of_data_to_use = 1.0
_reader.node2vec.graph_file = graph_file
_reader.node2vec.epoch_size = epoch_size
_reader.node2vec.walk_length = walk_length
_reader.node2vec.return_param = return_param
_reader.node2vec.inout_param = inout_param
_reader.node2vec.num_negative_samples = num_negative_samples
return reader
def make_offline_data_reader():
reader = lbann.reader_pb2.DataReader()
_reader = reader.reader.add()
_reader.name = 'python'
_reader.role = 'train'
_reader.shuffle = True
_reader.percent_of_data_to_use = 1.0
_reader.python.module = 'offline_walks'
_reader.python.module_dir = os.path.dirname(os.path.realpath(__file__))
_reader.python.sample_function = 'get_sample'
_reader.python.num_samples_function = 'num_samples'
_reader.python.sample_dims_function = 'sample_dims'
return reader
```
#### File: MOF/test/test_integration_mof.py
```python
import functools
import operator
import os
import os.path
import re
import sys
import pytest
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
root_dir = os.path.dirname(current_dir)
sys.path.append(root_dir) # Added lbann/applications/MOF directory
import dataset
import MOFae
applications_dir = os.path.dirname(root_dir)
lbann_dir = os.path.dirname(applications_dir)
common_python_dir = os.path.join(lbann_dir, 'bamboo/common_python')# Added lbann/bamboo/common_python
sys.path.append(common_python_dir)
import tools
#Training options
num_epochs = 10
mini_batch_size = 64
num_nodes = 2
# Error
expected_MSE_range = (0.09, 0.11)
expected_mini_batch_times = {
'ray': .35,
'pascal':.35
}
def setup_experiment(lbann):
"""Construct LBANN experiment.
args:
lbann (module): Module for LBANN Python frontend
"""
trainer = lbann.Trainer(mini_batch_size=mini_batch_size)
model = construct_model(lbann)
reader = make_data_reader(lbann)
# No validation set
optimizer = lbann.Adam(learn_rate=0.01, beta1=0.9, beta2=0.99, eps=1e-8 )
return trainer, model, reader, optimizer
def make_data_reader(lbann):
"""Construct LBANN data reader
"""
reader = lbann.reader_pb2.DataReader()
_reader = reader.reader.add()
_reader.name = 'python'
_reader.role = 'train'
_reader.shuffle = True
_reader.percent_of_data_to_use = 1.0
_reader.python.module = 'dataset'
_reader.python.module_dir = root_dir
_reader.python.sample_function = 'get_train'
_reader.python.num_samples_function = 'num_train_samples'
_reader.python.sample_dims_function = 'sample_dims'
return reader
def construct_model(lbann):
latent_dim = 2048
number_of_atoms = 11
layers, img_loss, metrics = MOFae.gen_layers(latent_dim, number_of_atoms)
callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()]
return lbann.Model(num_epochs,
layers = layers,
objective_function = img_loss,
metrics = metrics,
callbacks = callbacks
)
# ==============================================
# Setup PyTest
# ==============================================
def augment_test_func(test_func):
"""Augment test function to parse log files.
`tools.create_tests` creates functions that run an LBANN
experiment. This function creates augmented functions that parse
the log files after LBANN finishes running, e.g. to check metrics
or runtimes.
Note: The naive approach is to define the augmented test functions
in a loop. However, Python closures are late binding. In other
words, the function would be overwritten every time we define it.
We get around this overwriting problem by defining the augmented
function in the local scope of another function.
Args:
test_func (function): Test function created by
`tools.create_tests`.
Returns:
function: Test that can interact with PyTest.
"""
test_name = test_func.__name__
# Define test function
def func(cluster, exes, dirname):
# Run LBANN experiment
experiment_output = test_func(cluster, exes, dirname)
# Parse LBANN log file
train_accuracy = None
test_accuracy = None
mini_batch_times = []
with open(experiment_output['stdout_log_file']) as f:
for line in f:
match = re.search('training epoch [0-9]+ recon_error : ([0-9.]+)', line)
if match:
train_accuracy = float(match.group(1))
match = re.search('training epoch [0-9]+ mini-batch time statistics : ([0-9.]+)s mean', line)
if match:
mini_batch_times.append(float(match.group(1)))
# Check if training accuracy is within expected range
assert (expected_MSE_range[0]
< train_accuracy
<expected_MSE_range[1]), \
'train accuracy is outside expected range'
#Only tested on Ray. Skip if mini-batch test on another cluster. Change this when mini-batch values are available for other clusters
if (cluster == 'ray'):
# Check if mini-batch time is within expected range
# Note: Skip first epoch since its runtime is usually an outlier
mini_batch_times = mini_batch_times[1:]
mini_batch_time = sum(mini_batch_times) / len(mini_batch_times)
assert (0.75 * expected_mini_batch_times[cluster]
< mini_batch_time
< 1.25 * expected_mini_batch_times[cluster]), \
'average mini-batch time is outside expected range'
# Return test function from factory function
func.__name__ = test_name
return func
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment,
__file__,
nodes=num_nodes):
globals()[_test_func.__name__] = augment_test_func(_test_func)
```
#### File: nlp/rnn/dataset.py
```python
import os.path
import sys
# Local imports
current_file = os.path.realpath(__file__)
root_dir = os.path.dirname(os.path.dirname(current_file))
sys.path.append(root_dir)
import utils.gutenberg
# Options
text_name = 'frankenstein'
sequence_length = 10
# Download and tokenize text data, if needed
data_url = utils.gutenberg.get_url(text_name)
data_dir = os.path.join(root_dir, 'data', text_name)
corpus = utils.gutenberg.GutenbergCorpus(data_dir, data_url)
# Sample access functions
def get_sample(index):
return corpus[index:index+sequence_length]
def num_samples():
return len(corpus) - sequence_length + 1
def sample_dims():
return (sequence_length,)
```
#### File: cosmology/ExaGAN/dataset3D.py
```python
import numpy as np
import os
input_width = int(os.environ['INPUT_WIDTH'])
data_dir = os.environ['DATA_DIR']
assert input_width in [64,128, 256, 512]
w = [input_width]*3
w.insert(0,1)
dims = np.prod(w)
#Total sample is 101251 X 1 64 64 64
nsamples = 11000 #for 128^3
samples = None
# Sample access functions
def get_sample(index):
global samples
if samples is None:
samples = np.load(data_dir, mmap_mode='r', allow_pickle=True)[:nsamples]
return samples[index].flatten()
def num_samples():
return nsamples
def sample_dims():
return [dims]
```
#### File: segmentation/unet3d/unet3d.py
```python
import argparse
import lbann
import lbann.models
import lbann.contrib.args
import lbann.contrib.launcher
import lbann.modules as lm
from lbann.core.util import get_parallel_strategy_args
class UNet3D(lm.Module):
"""The 3D U-Net.
See:
\"{O}zg\"{u}n \c{C}i\c{c}ek, <NAME>, <NAME>,
<NAME>, and <NAME>. "3D U-Net: learning dense volumetric
segmentation from sparse annotation." In International conference
on medical image computing and computer-assisted intervention,
pp. 424-432, 2016.
Note that this model assumes the same spatial input/output sizes with
extra padding to simplify the implementation.
"""
global_count = 0 # Static counter, used for default names
def __init__(self, name=None):
"""Initialize 3D U-Net.
Args:
name (str, optional): Module name
(default: 'alexnet_module<index>').
"""
UNet3D.global_count += 1
self.instance = 0
self.name = (name if name
else "unet3d_module{0}".format(UNet3D.global_count))
# The list of ([down-conv filters], [up-conv filters], deconv filters)
self.BLOCKS = [
([32, 64], [64, 64], 128), # bconv1_down, bconv3_up, deconv3
([64, 128], [128, 128], 256), # bconv2_down, bconv2_up, deconv2
([128, 256], [256, 256], 512), # bconv3_down, bconv1_up, deconv1
]
# The list of the number of filters of the "bottom" convolution block
self.BOTTOM_BLOCK = [256, 512]
# The number of pooling/deconvolution layers
self.NUM_LEVELS = len(self.BLOCKS)
# Whether PARTITIONED_LEVELS-th pooling/deconvolution is partitioned
self.PARTITION_INCLUDE_POOL = True
# Deconvolution should have the same number of input/output channels
assert self.BLOCKS[-1][2] == self.BOTTOM_BLOCK[1]
assert all([self.BLOCKS[x][2] == self.BLOCKS[x+1][1][-1]
for x in range(self.NUM_LEVELS-1)])
# Building blocks
self.downconvs = []
self.upconvs = []
self.deconvs = []
for i, blocks in enumerate(self.BLOCKS):
downBlock, upBlock, deconv = blocks
self.downconvs.append(UNet3DConvBlock(
downBlock, name="{}_bconv{}_down".format(self.name, i+1)))
ui = self.NUM_LEVELS-1-i
self.upconvs.insert(0, UNet3DConvBlock(
upBlock, name="{}_bconv{}_up".format(self.name, ui+1)))
self.deconvs.insert(0, Deconvolution3dModule(
deconv, 2, stride=2, padding=0, activation=None,
bias=False,
name="{}_deconv{}".format(self.name, ui+1)))
# The bottom convolution
self.bottomconv = UNet3DConvBlock(
self.BOTTOM_BLOCK, name="{}_bconv_bottom".format(self.name))
# The last convolution
self.lastconv = lm.Convolution3dModule(
3, 1, stride=1, padding=0, activation=None,
bias=False,
name="{}_lconv".format(self.name))
def forward(self, x):
self.instance += 1
x_concat = []
for i in range(self.NUM_LEVELS):
x = self.downconvs[i](x)
x_concat.append(x)
x = lbann.Pooling(
x, num_dims=3, has_vectors=False,
pool_dims_i=2, pool_pads_i=0, pool_strides_i=2,
pool_mode="max",
name="{}_pool{}_instance{}".format(
self.name, i+1, self.instance))
x = self.bottomconv(x)
for i in range(self.NUM_LEVELS):
x = self.deconvs[i](x)
x = self.upconvs[i](x, x_concat=x_concat[self.NUM_LEVELS-1-i])
x = self.lastconv(x)
x = lbann.Softmax(
x,
softmax_mode="channel")
return x
class UNet3DConvBlock(lm.Module):
"""Basic block of an optional concatenation layer and
a list of 3D convolutional layers.
"""
def __init__(self, out_channels_list, name):
super().__init__()
self.name = name
self.instance = 0
assert len(out_channels_list) == 2
self.convs = []
for i, channels in enumerate(out_channels_list):
self.convs.append(Convolution3dBNModule(
channels,
3,
stride=1,
padding=1,
activation=lbann.Relu,
bias=False,
name="{}_conv_block_{}".format(self.name, i+1)))
def forward(self, x, x_concat=None):
self.instance += 1
if x_concat is not None:
x = lbann.Concatenation(
[x, x_concat],
axis=0)
for c in self.convs:
x = c(x)
return x
class Convolution3dBNModule(lm.Module):
"""Basic block of a batch-normalization layer, 3D convolutional
layer, and an optional activation layer.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.name = kwargs["name"]
self.activation = None if "activation" not in kwargs.keys() \
else kwargs["activation"]
kwargs["activation"] = None
self.conv = lm.Convolution3dModule(*args, **kwargs)
bn_scale = lbann.Weights(
initializer=lbann.ConstantInitializer(value=1.0),
name="{}_bn_scale".format(self.name))
bn_bias = lbann.Weights(
initializer=lbann.ConstantInitializer(value=0.0),
name="{}_bn_bias".format(self.name))
self.bn_weights = [bn_scale, bn_bias]
self.instance = 0
def forward(self, x):
self.instance += 1
x = self.conv(x)
x = lbann.BatchNormalization(
x,
weights=self.bn_weights,
statistics_group_size=-1,
name="{}_bn_instance{}".format(
self.name,
self.instance))
if self.activation is not None:
x = self.activation(x)
return x
class Deconvolution3dModule(lm.ConvolutionModule):
"""Basic block for 3D deconvolutional neural networks.
Applies a deconvolution and a nonlinear activation function.
This is a wrapper class for ConvolutionModule.
"""
def __init__(self, *args, **kwargs):
super().__init__(3, transpose=True, *args, **kwargs)
def create_unet3d_data_reader(train_dir, test_dir):
readers = []
for role, shuffle, role_dir in [
("train", True, train_dir),
("test", False, test_dir)]:
if role_dir is None:
continue
readers.append(lbann.reader_pb2.Reader(
name="hdf5",
role=role,
shuffle=shuffle,
data_file_pattern="{}/*.hdf5".format(role_dir),
validation_percent=0,
percent_of_data_to_use=1.0,
scaling_factor_int16=1.0,
hdf5_key_data="volume",
hdf5_key_labels="segmentation",
hdf5_hyperslab_labels=True,
disable_labels=False,
disable_responses=True,
))
return lbann.reader_pb2.DataReader(reader=readers)
def create_unet3d_optimizer(learn_rate):
# TODO: This is a temporal optimizer copied from CosomoFlow.
adam = lbann.Adam(
learn_rate=learn_rate,
beta1=0.9,
beta2=0.999,
eps=1e-8)
return adam
if __name__ == '__main__':
desc = ('Construct and run the 3D U-Net on a 3D segmentation dataset.'
'Running the experiment is only supported on LC systems.')
parser = argparse.ArgumentParser(description=desc)
lbann.contrib.args.add_scheduler_arguments(parser)
# General arguments
parser.add_argument(
'--job-name', action='store', default='lbann_unet3d', type=str,
help='scheduler job name (default: lbann_unet3d)')
parser.add_argument(
'--mini-batch-size', action='store', default=1, type=int,
help='mini-batch size (default: 1)', metavar='NUM')
parser.add_argument(
'--num-epochs', action='store', default=5, type=int,
help='number of epochs (default: 100)', metavar='NUM')
# Model specific arguments
parser.add_argument(
'--learning-rate', action='store', default=0.001, type=float,
help='the initial learning rate (default: 0.001)')
parser.add_argument(
'--partition-level', action='store', default=4, type=int,
help='the spatial partition level (default: 4)')
parser.add_argument(
'--depth-groups', action='store', type=int, default=4,
help='the number of processes for the depth dimension (default: 4)')
default_lc_dataset = '/p/gpfs1/brainusr/datasets/LiTS/hdf5_dim128_float'
default_train_dir = '{}/train'.format(default_lc_dataset)
default_test_dir = '{}/test'.format(default_lc_dataset)
parser.add_argument(
'--train-dir', action='store', type=str, default=default_train_dir,
help='the directory of the training dataset (default: \'{}\')'
.format(default_train_dir))
parser.add_argument(
'--test-dir', action='store', type=str, default=default_test_dir,
help='the directory of the test dataset (default: \'{}\')'
.format(default_test_dir))
parser.add_argument(
'--dynamically-reclaim-error-signals', action='store_true',
help='Allow LBANN to reclaim error signals buffers (default: False)')
parser.add_argument(
'--batch-job', action='store_true',
help='Run as a batch job (default: false)')
lbann.contrib.args.add_optimizer_arguments(
parser,
default_optimizer="adam",
default_learning_rate=0.001,
)
args = parser.parse_args()
parallel_strategy = get_parallel_strategy_args(
sample_groups=args.mini_batch_size,
depth_groups=args.depth_groups)
# Construct layer graph
input = lbann.Input(
target_mode='label_reconstruction')
volume = lbann.Identity(input)
output = UNet3D()(volume)
segmentation = lbann.Identity(input)
ce = lbann.CrossEntropy(
[output, segmentation],
use_labels=True)
obj = lbann.ObjectiveFunction([ce])
layers = list(lbann.traverse_layer_graph(input))
for l in layers:
l.parallel_strategy = parallel_strategy
# Setup model
metrics = [lbann.Metric(ce, name='CE', unit='')]
callbacks = [
lbann.CallbackPrint(),
lbann.CallbackTimer(),
lbann.CallbackGPUMemoryUsage(),
lbann.CallbackProfiler(skip_init=True),
]
# # TODO: Use polynomial learning rate decay (https://github.com/LLNL/lbann/issues/1581)
# callbacks.append(
# lbann.CallbackPolyLearningRate(
# power=1.0,
# num_epochs=100,
# end_lr=1e-5))
model = lbann.Model(
epochs=args.num_epochs,
layers=layers,
objective_function=obj,
callbacks=callbacks,
)
# Setup optimizer
optimizer = lbann.contrib.args.create_optimizer(args)
# Setup data reader
data_reader = create_unet3d_data_reader(
train_dir=args.train_dir,
test_dir=args.test_dir)
# Setup trainer
trainer = lbann.Trainer(mini_batch_size=args.mini_batch_size)
# Runtime parameters/arguments
environment = lbann.contrib.args.get_distconv_environment(
num_io_partitions=args.depth_groups)
if args.dynamically_reclaim_error_signals:
environment['LBANN_KEEP_ERROR_SIGNALS'] = 0
else:
environment['LBANN_KEEP_ERROR_SIGNALS'] = 1
lbann_args = ['--use_data_store']
# Run experiment
kwargs = lbann.contrib.args.get_scheduler_kwargs(args)
lbann.contrib.launcher.run(
trainer, model, data_reader, optimizer,
job_name=args.job_name,
environment=environment,
lbann_args=lbann_args,
batch_job=args.batch_job,
**kwargs)
```
#### File: selfsupervised/patch_generator/chroma_blur.py
```python
import numpy as np
import scipy.ndimage.filters
import cv2
def chroma_blur(img):
"""Blur chroma channels to hide chromatic aberration.
Convert to CIE Lab format and apply box filter to a and b
channels.
"""
img = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
img[:,:,1] = scipy.ndimage.filters.uniform_filter(img[:,:,1], 13)
img[:,:,2] = scipy.ndimage.filters.uniform_filter(img[:,:,2], 13)
img = cv2.cvtColor(img, cv2.COLOR_Lab2BGR)
return img
```
#### File: selfsupervised/patch_generator/__init__.py
```python
import functools
import operator
import os.path
import random
import sys
import cv2
import numpy as np
from .extract_patches import extract_patches
from .patterns import patterns_2patch, patterns_3patch, patterns_4patch, patterns_5patch
from .chroma_blur import chroma_blur
# Data paths
label_file = '/p/lscratchh/brainusr/ILSVRC2012/labels/train.txt'
data_dir = '/p/lscratchh/brainusr/ILSVRC2012/original/train'
# Read label files
samples = []
with open(label_file) as f:
for line in f:
line = line.split(' ')
samples.append((line[0], int(line[1])))
# Get sample function
def get_sample_2patch(index):
return get_sample(index, 2)
def get_sample_3patch(index):
return get_sample(index, 3)
def get_sample_4patch(index):
return get_sample(index, 4)
def get_sample_5patch(index):
return get_sample(index, 5)
def get_sample(index, num_patches):
"""Generate data sample.
Extract patches and apply preprocessing tricks.
"""
# Read image from file
file_name, _ = samples[index]
file_name = os.path.join(data_dir, file_name)
img = cv2.imdecode(np.fromfile(file_name, dtype=np.uint8),
cv2.IMREAD_COLOR)
# Crop to get square image
size = min(img.shape[0], img.shape[1])
y = (img.shape[0] - size) // 2
x = (img.shape[1] - size) // 2
img = img[y:y+size, x:x+size, :]
# Extract patches
patterns = None
if num_patches == 2:
patterns = patterns_2patch
if num_patches == 3:
patterns = patterns_3patch
if num_patches == 4:
patterns = patterns_4patch
if num_patches == 5:
patterns = patterns_5patch
patches, label = extract_patches(img, patterns)
# Randomly rotate patches
rotate_type = random.randint(0, 3)
for i, patch in enumerate(patches):
patch = np.rot90(patch, rotate_type, axes=(0,1))
patches[i] = patch
label = label + rotate_type * len(patterns)
# Convert patch to float32
for i, patch in enumerate(patches):
if patch.dtype == np.uint8:
patches[i] = patch.astype(np.float32) / 255
# Chroma blur
for i, patch in enumerate(patches):
patches[i] = chroma_blur(patch)
# Transform to CHW format and normalize
for i, patch in enumerate(patches):
patch = np.transpose(patch, axes=(2, 0, 1))
means = np.array([0.406, 0.456, 0.485]).reshape((3,1,1))
stdevs = np.array([0.225, 0.224, 0.229]).reshape((3,1,1))
patch -= means
patch /= stdevs
patches[i] = patch
# Random aperture
for i, patch in enumerate(patches):
if i == 0:
continue
size = random.randint(64, 96)
y = random.randint(0, 96-size)
x = random.randint(0, 96-size)
new_patch = np.zeros((3, 96, 96), dtype=np.float32)
new_patch[:, y:y+size, x:x+size] = patch[:, y:y+size, x:x+size]
patches[i] = new_patch
# Construct one-hot label vector
label_vec = np.zeros(num_labels(num_patches), dtype=np.float32)
label_vec[label] = 1
# Return flattened data tensors
flat_data = []
for patch in patches:
flat_data.append(patch.reshape(-1))
flat_data.append(label_vec)
return np.concatenate(flat_data)
# Get sample dims functions
patch_dims = (3, 96, 96)
def num_labels(num_patches):
num_patterns = 0
if num_patches == 2:
num_patterns = len(patterns_2patch)
if num_patches == 3:
num_patterns = len(patterns_3patch)
if num_patches == 4:
num_patterns = len(patterns_4patch)
if num_patches == 5:
num_patterns = len(patterns_5patch)
return 4 * num_patterns
def sample_dims(num_patches):
patch_size = functools.reduce(operator.mul, patch_dims)
return (num_patches*patch_size + num_labels(num_patches),)
def sample_dims_2patch():
return sample_dims(2)
def sample_dims_3patch():
return sample_dims(3)
def sample_dims_4patch():
return sample_dims(4)
def sample_dims_5patch():
return sample_dims(5)
# Get num samples function
def num_samples():
return len(samples)
```
#### File: bamboo/compiler_tests/test_compiler.py
```python
import sys
sys.path.insert(0, '../common_python')
import tools
import pytest
import os, re
import shutil
def test_compiler_build_script(cluster, dirname):
bamboo_base_dir = os.path.join(dirname, 'bamboo', 'compiler_tests')
output_file_name = os.path.join(bamboo_base_dir, 'output', 'build_script_output.txt')
error_file_name = os.path.join(bamboo_base_dir, 'error', 'build_script_error.txt')
# Get environment variables
BAMBOO_AGENT = os.getenv('bamboo_agentId')
common_cmd = '%s/scripts/build_lbann.sh -d -l bamboo-%s --test --clean-build -j $(($(nproc)+2)) -- +deterministic +vision +numpy' % (dirname, BAMBOO_AGENT)
if cluster in ['lassen', 'pascal', 'ray']:
command = '%s +cuda +half +fft > %s 2> %s' % (common_cmd, output_file_name, error_file_name)
elif cluster in ['corona']:
command = '%s +rocm > %s 2> %s' % (common_cmd, output_file_name, error_file_name)
elif cluster in ['catalyst']:
command = '%s +onednn +half +fft > %s 2> %s' % (common_cmd, output_file_name, error_file_name)
else:
e = 'test_compiler_build_script: Unsupported Cluster %s' % cluster
print('Skip - ' + e)
pytest.skip(e)
return_code = os.system(command)
artifact_dir = os.path.join(bamboo_base_dir, 'output')
with os.scandir(dirname) as it:
for entry in it:
if entry.is_file() and re.match(r'spack-.*txt', entry.name):
(base, ext) = os.path.splitext(entry.name)
new_file_name = base + '_output' + ext
shutil.copyfile(entry.path, os.path.join(artifact_dir, new_file_name))
tools.assert_success(return_code, error_file_name)
```
#### File: bamboo/unit_tests/test_unit_layer_channelwise_gru_cell.py
```python
import functools
import operator
import os
import os.path
import sys
import numpy as np
import scipy.special
import pytest
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(20200909)
_num_samples = 16
_num_channels = 9
_input_size = 5
_hidden_size = 7
_sample_size = _num_channels*_input_size + _num_channels *_hidden_size
_samples = np.random.uniform(low=-1, high=1, size=(_num_samples,_sample_size))
_samples = _samples.astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# NumPy implementation
# ==============================================
def numpy_gru_cell(x, h, w):
#
# This implements a 2 dimensional analogue of the PyTorch.nn.GRUCell
# See here for more details:
# https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html#torch.nn.GRUCell
#
# Dimensions
input_size = x[0].size
hidden_size = h[0].size
# Unroll GRU
for sample in range(x.shape[0]):
ih = np.matmul(w[0], x[sample]) + w[1]
hh = np.matmul(w[2], h[sample]) + w[3]
r = scipy.special.expit(ih[:hidden_size] + hh[:hidden_size])
z = scipy.special.expit(ih[hidden_size:2*hidden_size] + hh[hidden_size:2*hidden_size])
n = np.tanh(ih[2*hidden_size:] + r*hh[2*hidden_size:])
h[sample] = (1-z)*n + z*h[sample]
return h
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Skip test on non-GPU systems
# Note: Test requires cuDNN (on GPU) or oneDNN (on CPU).
### @todo Assume LBANN has been built with oneDNN?
if not tools.gpus_per_node(lbann):
message = f'{os.path.basename(__file__)} requires cuDNN or oneDNN'
print('Skip - ' + message)
pytest.skip(message)
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.SGD()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
from lbann.modules.rnn import ChannelwiseGRU
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0),
name='input')
h_weights = lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0),
name='inital_hidden')
input_ = lbann.Input(data_field='samples')
input_slice = lbann.Slice(
input_,
slice_points=tools.str_list([0, _num_channels*_input_size, _sample_size]),
)
x = lbann.Reshape(input_slice, dims=tools.str_list([_num_channels,_input_size]), name="input_reshape")
x = lbann.Sum(x, lbann.WeightsLayer(weights=x_weights, dims=tools.str_list([_num_channels,_input_size])), name="input_sum")
h = lbann.Reshape(input_slice, dims=tools.str_list([_num_channels,_hidden_size]),name="hidden_reshape")
h = lbann.Sum(h, lbann.WeightsLayer(weights=h_weights, dims=tools.str_list([_num_channels,_hidden_size])), name="input_hidden_sum")
x_lbann = x
h_lbann = h
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# Weights
rnn_weights_numpy = []
ih_matrix = np.random.uniform(
low=-1,
high=1,
size=(3*_hidden_size,_input_size),
)
hh_matrix = np.random.uniform(
low=-1,
high=1,
size=(3*_hidden_size,_hidden_size),
)
ih_bias = np.random.uniform(low=-1, high=1, size=(3*_hidden_size,))
hh_bias = np.random.uniform(low=-1, high=1, size=(3*_hidden_size,))
rnn_weights_numpy.extend([ih_matrix, ih_bias, hh_matrix, hh_bias])
rnn_weights_lbann = [
lbann.Weights(
initializer=lbann.ValueInitializer(
values=tools.str_list(np.nditer(w, order='F'))))
for w in rnn_weights_numpy
]
# LBANN implementation
x = x_lbann
h = h_lbann
channelwise_GRU_cell = ChannelwiseGRU(num_channels=_num_channels,
size=_hidden_size,
weights=rnn_weights_lbann)
y = channelwise_GRU_cell(x, h)
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name="Multi-channel, Unidirectional, GRU Cell"))
# NumPy implementation
vals = []
for i in range(num_samples()):
input_ = get_sample(i).astype(np.float64)
x = input_[:_num_channels*_input_size].reshape((_num_channels,_input_size))
h = input_[_num_channels*_input_size:].reshape((_num_channels,_hidden_size))
y = numpy_gru_cell(x, h, rnn_weights_numpy)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackPrintModelDescription())
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
```
#### File: model_zoo/cosmoflow/cosmoflow.py
```python
import argparse
import os.path
import google.protobuf.text_format as txtf
import lbann
import lbann.contrib.lc.launcher
import lbann.modules as lm
import lbann.proto as lp
from lbann.weights import Weights
import numpy as np
# ----------------------------------
# The CosmoFlow module
# ----------------------------------
class CosmoFlow(lm.Module):
"""
CosmoFlow neural network.
See:
<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, Prabhat, and <NAME>.
"Cosmoflow: Using deep learning to learn the universe at scale."
Proceedings of the International Conference for High Performance
Computing, Networking, Storage, and Analysis, SC'18, pp. 65:1-65:11,
2018.
Note that this model is somewhat different from the model.
"""
global_count = 0 # Static counter, used for default names
def __init__(self, output_size,
input_width,
name=None):
"""Initialize CosmFlow.
Args:
output_size (int): Size of output tensor.
input_width (int): Width of input tensor.
name (str, optional): Module name
(default: 'cosmoflow_module<index>').
"""
CosmoFlow.global_count += 1
self.instance = 0
self.name = (name if name
else 'cosmoflow_module{0}'.format(CosmoFlow.global_count))
self.input_width = input_width
assert self.input_width in [128, 256, 512]
self.layer_params = [
{"type": "conv", "out_channels": 16, "kernel_size": 3, "stride": 1},
{"type": "pool"},
{"type": "conv", "out_channels": 32, "kernel_size": 3, "stride": 1},
{"type": "pool"},
{"type": "conv", "out_channels": 64, "kernel_size": 3, "stride": 1},
{"type": "pool"},
{"type": "conv", "out_channels": 128, "kernel_size": 3, "stride": 2},
{"type": "pool"},
{"type": "conv", "out_channels": 256, "kernel_size": 3, "stride": 1},
{"type": "pool"},
{"type": "conv", "out_channels": 256, "kernel_size": 3, "stride": 1},
{"type": "conv", "out_channels": 256, "kernel_size": 3, "stride": 1},
]
for p in self.layer_params:
if p["type"] == "conv":
p["padding"] = int((p["kernel_size"]-1)/2)
additional_pools = []
if self.input_width == 256:
additional_pools = [6]
elif self.input_width == 512:
additional_pools = [6, 7]
for i in additional_pools:
conv_idx = list(np.cumsum([1 if x["type"] == "conv" else 0 for x in self.layer_params])).index(i)
self.layer_params.insert(conv_idx+1, {"type": "pool"})
width = self.input_width
for p in self.layer_params:
if p["type"] == "conv":
output_width = int(width / p["stride"])
else:
output_width = int(width / 2)
p["width"] = output_width
width = output_width
assert width > 0
for i, param in enumerate(filter(lambda x: x["type"] == "conv", self.layer_params)):
conv_name ="conv"+str(i+1)
conv_weights = [Weights(initializer=lbann.GlorotUniformInitializer())]
param_actual = dict(param)
param_actual.pop("type", None)
param_actual.pop("width", None)
conv = lm.Convolution3dModule(
**param_actual,
activation=lbann.LeakyRelu,
name=self.name+"_"+conv_name,
bias=False,
weights=conv_weights)
setattr(self, conv_name, conv)
# Create fully-connected layers
fc_params = [
{"size": 2048},
{"size": 256},
{"size": output_size},
]
for i, param in enumerate(fc_params):
fc_name ="fc"+str(i+1)
fc = lm.FullyConnectedModule(
**param,
activation=lbann.LeakyRelu if i < len(fc_params)-1 else None,
name=self.name+"_"+fc_name,
weights=[Weights(initializer=lbann.GlorotUniformInitializer()),
Weights(initializer=lbann.ConstantInitializer(value=0.1))],
)
setattr(self, fc_name, fc)
def forward(self, x):
self.instance += 1
def create_pooling(x, i, w):
return lbann.Pooling(
x, num_dims=3, has_vectors=False,
pool_dims_i=3,
pool_pads_i=1,
pool_strides_i=2,
pool_mode='average',
name='{0}_pool{1}_instance{2}'.format(self.name,i,self.instance))
def create_dropout(x, i):
return lbann.Dropout(x, keep_prob=0.8,
name='{0}_drop{1}_instance{2}'.format(self.name,i,self.instance))
# Convolutional network
i_conv = 1
i_pool = 1
for param in self.layer_params:
if param["type"] == "conv":
x = getattr(self, "conv{}".format(i_conv))(x)
i_conv += 1
else:
x = create_pooling(x, i_pool, param["width"])
i_pool += 1
# Fully-connected layers
x = create_dropout(x,1)
x = self.fc1(x)
x = create_dropout(x,2)
x = self.fc2(x)
x = create_dropout(x,3)
x = self.fc3(x)
return x
def create_data_reader(train_path, val_path, test_path):
readerArgs = []
for role, data_filename in [("train", train_path),
("validate", val_path),
("test", test_path)]:
if not data_filename is None:
readerArgs.append({"role": role, "data_filename": data_filename})
readers = []
for readerArg in readerArgs:
reader = lp.lbann_pb2.Reader(
name="numpy_npz_conduit_reader",
shuffle=True,
validation_percent=0,
absolute_sample_count=0,
percent_of_data_to_use=1.0,
scaling_factor_int16=1.0,
**readerArg)
readers.append(reader)
return lp.lbann_pb2.DataReader(reader=readers)
# ----------------------------------
# Command-line arguments
# ----------------------------------
desc = ('Construct and run the CosmoFlow network. '
'Running the experiment is only supported on LC systems.')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--partition', action='store', type=str,
help='scheduler partition', metavar='NAME')
parser.add_argument(
'--account', action='store', type=str,
help='scheduler account', metavar='NAME')
parser.add_argument(
'--experiment-dir', action='store', type=str,
help='experiment directory', metavar='NAME')
parser.add_argument(
"--learn-rate", action="store", default=0.0005, type=float,
help="The initial learning-rate")
parser.add_argument(
"--nodes", action="store", default=32, type=int,
help="The number of nodes")
parser.add_argument(
"--mini-batch-size", action="store", default=128, type=int,
help="The mini-batch size")
parser.add_argument(
"--epochs", action="store", default=130, type=int,
help="The number of epochs")
parser.add_argument(
"--output-size", action="store", default=4, type=int,
help="Size of output tensor")
parser.add_argument(
"--input-width", action="store", default=256, type=int,
help="Width of input tensor")
for role, label, required in [("train", "training", True),
("val", "validation", False),
("test", "test", False)]:
parser.add_argument(
"--{}-path".format(role), type=str, required=required,
help="Path to {} dataset".format(label), default=None)
args = parser.parse_args()
# ----------------------------------
# Construct layer graph
# ----------------------------------
# Input data
input = lbann.Input(io_buffer='partitioned',
target_mode='regression')
universes = lbann.Identity(input)
secrets = lbann.Identity(input)
# CosmoFlow
x = CosmoFlow(args.output_size,
args.input_width).forward(universes)
# Loss function
loss = lbann.MeanSquaredError([x, secrets])
# Metrics
metrics = [lbann.Metric(loss, name="MSE", unit="")]
# Callbacks
callbacks = [
lbann.CallbackPrint(),
lbann.CallbackTimer(),
lbann.CallbackPolyLearningRate(
power=1.0,
num_epochs=100, # TODO: Warn if args.epochs < 100
),
lbann.CallbackGPUMemoryUsage(),
lbann.CallbackDumpOutputs(
directory="dump_acts/",
layers="cosmoflow_module1_fc3_instance1 layer3",
execution_modes="test"
),
lbann.CallbackProfiler(skip_init=True)
]
# ----------------------------------
# Setup experiment
# ----------------------------------
# Setup model
model = lbann.Model(args.mini_batch_size,
args.epochs,
layers=lbann.traverse_layer_graph(input),
objective_function=loss,
metrics=metrics,
callbacks=callbacks)
# Setup optimizer
opt = lbann.Adam(learn_rate=args.learn_rate,
beta1=0.9,
beta2=0.99,
eps=1e-8)
# Setup data reader
data_reader_proto = create_data_reader(args.train_path,
args.val_path,
args.test_path)
# ----------------------------------
# Run experiment
# ----------------------------------
# Note: Use `lbann.run` instead for non-LC systems.
kwargs = {}
if args.partition: kwargs['partition'] = args.partition
if args.account: kwargs['account'] = args.account
if args.experiment_dir: kwargs['experiment_dir'] = args.experiment_dir
lbann.contrib.lc.launcher.run(model, data_reader_proto, opt,
lbann_args=" --use_data_store --preload_data_store",
job_name='lbann_cosmoflow',
nodes=args.nodes,
**kwargs)
```
#### File: jag_utils/python/shuffle.py
```python
import sys
import os
import random
def shuffle(fn) :
fn2 = fn + '.shuffled'
a = open(fn).readlines()
b = open(fn2, 'w')
b.write(a[0])
b.write(a[1])
b.write(a[2])
c = a[3:]
n = len(c)
r = set()
r_idx = []
for y in range(n) :
while True :
y = random.randint(0, n-1)
if y not in r :
r.add(y)
r_idx.append(y)
if len(r) == n :
break
for j in range(len(c)) :
b.write(c[r_idx[j]])
b.close()
print 'wrote:', fn2
#====================================================================
if len(sys.argv) != 4 :
print 'usage:', sys.argv[0], 'base_dir num_sample_lists sample_list_base_name'
print 'example: python', sys.argv[0], '/p/lustre2/brainusr/datasets/10MJAG/1M_A/select_samples_test/another_dir 10 my_samples.txt',
exit(9)
dir = sys.argv[1]
n = int(sys.argv[2])
base_fn = sys.argv[3]
for j in range(n) :
fn = dir + '/t' + str(j) + '_' + base_fn
shuffle(fn)
```
#### File: examples/onnx/lbann2onnx.py
```python
import argparse
import re
import onnx
import os
import lbann.onnx.l2o
def parseInputShape(s):
name, shape = re.compile("^([^=]+)=([0-9,]+)$").search(s).groups()
return (name, list(map(int, shape.split(","))))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert a LBANN model to an ONNX model",
epilog="Usage: lbann2onnx.py model_alexnet.prototext output.onnx image=3,224,224 label=1000")
parser.add_argument("lbann_path", type=str,
help="Path to a LBANN model in .prototext")
parser.add_argument("onnx_path", type=str,
help="Path to an ONNX model")
parser.add_argument("input_shape", type=str, nargs="*",
help="Shape(s) of input tensor(s) *without* the mini-batch size in the \"NAME=N1,...,ND\" format.")
parser.add_argument("--add-value-info", dest="add_value_info", action="store_const",
const=True, default=False,
help="Embed value_info in the generated ONNX model")
args = parser.parse_args()
lbannPath = args.lbann_path
onnxPath = args.onnx_path
inputShapes = dict(map(parseInputShape, args.input_shape))
addValueInfo = args.add_value_info
model, miniBatchSize = lbann.onnx.l2o.parseLbannModelPB(os.path.expanduser(lbannPath),
inputShapes,
addValueInfo=addValueInfo)
onnx.save(model, os.path.expanduser(onnxPath))
```
#### File: lbann/core/callback.py
```python
import abc
from lbann import callbacks_pb2
import lbann.core.util
class Callback(abc.ABC):
"""Callback for neural network training."""
def __init__(self):
pass
def export_proto(self):
"""Construct and return a protobuf message."""
return callbacks_pb2.Callback()
# Generate Callback sub-classes from lbann.proto
# Note: The list of skip fields must be updated if any new fields are
# added to the Callback message in lbann.proto
if callbacks_pb2:
classes = lbann.core.util.generate_classes_from_protobuf_message(
callbacks_pb2.Callback,
base_class = Callback,
base_has_export_proto = True)
for c in classes:
globals()[c.__name__] = c
class ImageSelectionStrategy(abc.ABC):
"""Image selection strategy for summarize images callback."""
def __init__(self):
pass
def export_proto(self):
"""Construct and return a protobuf message."""
return callbacks_pb2.Callback.CallbackSummarizeImages.SelectionStrategy()
# Build all subclasses
if callbacks_pb2:
classes = lbann.core.util.generate_classes_from_protobuf_message(
callbacks_pb2.Callback.CallbackSummarizeImages.SelectionStrategy,
base_class = ImageSelectionStrategy,
base_has_export_proto = True)
for c in classes:
globals()[c.__name__] = c
```
#### File: lbann/core/optimizer.py
```python
import abc
from lbann import optimizers_pb2
import lbann.core.util
class Optimizer(abc.ABC):
"""Optimization algorithm for a neural network's parameters."""
def export_proto(self):
"""Construct and return a protobuf message."""
return optimizers_pb2.Optimizer()
# Generate Optimizer sub-classes from lbann.proto
# Note: The list of skip fields must be updated if any new fields are
# added to the Optimizer message in lbann.proto
if optimizers_pb2:
classes = lbann.core.util.generate_classes_from_protobuf_message(
optimizers_pb2.Optimizer,
base_class = Optimizer,
base_has_export_proto = True)
for c in classes:
globals()[c.__name__] = c
```
#### File: lbann/models/alexnet.py
```python
import lbann
import lbann.modules
class AlexNet(lbann.modules.Module):
"""AlexNet neural network.
Assumes image data in NCHW format.
See:
<NAME>, <NAME>, and <NAME>. "ImageNet classification with deep convolutional
neural networks." In Advances in Neural Information Processing
Systems, pp. 1097-1105. 2012.
Note that there is very little consistency in the implementation of
AlexNet across frameworks. If a particular variant is needed, you should
implement it yourself.
"""
global_count = 0 # Static counter, used for default names
def __init__(self, output_size, name=None):
"""Initialize AlexNet.
Args:
output_size (int): Size of output tensor.
name (str, optional): Module name
(default: 'alexnet_module<index>').
"""
AlexNet.global_count += 1
self.instance = 0
self.name = (name if name
else 'alexnet_module{0}'.format(AlexNet.global_count))
conv = lbann.modules.Convolution2dModule
fc = lbann.modules.FullyConnectedModule
self.conv1 = conv(96, 11, stride=4, activation=lbann.Relu,
name=self.name+'_conv1')
self.conv2 = conv(256, 5, padding=2, activation=lbann.Relu,
name=self.name+'_conv2')
self.conv3 = conv(384, 3, padding=1, activation=lbann.Relu,
name=self.name+'_conv3')
self.conv4 = conv(384, 3, padding=1, activation=lbann.Relu,
name=self.name+'_conv4')
self.conv5 = conv(256, 3, padding=1, activation=lbann.Relu,
name=self.name+'_conv5')
self.fc6 = fc(4096, activation=lbann.Relu, name=self.name+'_fc6')
self.fc7 = fc(4096, activation=lbann.Relu, name=self.name+'_fc7')
self.fc8 = fc(output_size, name=self.name+'_fc8')
def forward(self, x):
self.instance += 1
# Convolutional network
x = self.conv1(x)
x = lbann.LocalResponseNormalization(
x, window_width=5, lrn_alpha=0.0001, lrn_beta=0.75, lrn_k=2,
name='{0}_norm1_instance{1}'.format(self.name,self.instance))
x = lbann.Pooling(x, num_dims=2, has_vectors=False,
pool_dims_i=3, pool_pads_i=0, pool_strides_i=2,
pool_mode='max',
name='{0}_pool1_instance{1}'.format(self.name,self.instance))
x = self.conv2(x)
x = lbann.LocalResponseNormalization(
x, window_width=5, lrn_alpha=0.0001, lrn_beta=0.75, lrn_k=2,
name='{0}_norm2_instance{1}'.format(self.name,self.instance))
x = lbann.Pooling(x, num_dims=2, has_vectors=False,
pool_dims_i=3, pool_pads_i=0, pool_strides_i=2,
pool_mode='max',
name='{0}_pool2_instance{1}'.format(self.name,self.instance))
x = self.conv5(self.conv4(self.conv3(x)))
x = lbann.Pooling(x, num_dims=2, has_vectors=False,
pool_dims_i=3, pool_pads_i=0, pool_strides_i=2,
pool_mode='max',
name='{0}_pool5_instance{1}'.format(self.name,self.instance))
# Fully-connected network
x = self.fc6(x)
x = lbann.Dropout(x, keep_prob=0.5,
name='{0}_drop6_instance{1}'.format(self.name,self.instance))
x = self.fc7(x)
x = lbann.Dropout(x, keep_prob=0.5,
name='{0}_drop7_instance{1}'.format(self.name,self.instance))
return self.fc8(x)
```
#### File: graph/dense/DenseGraphConv.py
```python
import lbann
from lbann.modules import Module
from lbann.util import str_list
import math
class DenseGraphConv(Module):
global_count = 0
def __init__(self, input_channels, output_channels, name=None):
super().__init__()
self.name = (name if name else 'DenseGraph_{}'.format(DenseGraphConv.global_count))
DenseGraphConv.global_count+=1
bounds = math.sqrt(6.0/(input_channels + output_channels))
self.weights_1 = lbann.Weights(initializer = lbann.UniformInitializer(min=-bounds, max=bounds),
name=self.name+'_Weights_1')
self.weights_2 = lbann.Weights(initializer = lbann.UniformInitializer(min=-bounds, max=bounds),
name=self.name+'_Weights_2')
self.W1 = lbann.WeightsLayer(dims = str_list([input_channels, output_channels]),
name=self.name+'_param_1',
weights = self.weights_1)
self.W2 = lbann.WeightsLayer(dims = str_list([input_channels, output_channels]),
name=self.name+'_param_2',
weights = self.weights_2)
def forward(self, X, A):
messages = lbann.MatMul(X, self.W2, name=self.name+'_w2_mult')
messages = lbann.MatMul(A,messages,name=self.name+'_adj_mult')
ident = lbann.MatMul(X, self.W1, name=self.name+'_w1_mult')
out = lbann.Sum(ident, messages, name=self.name+'_sum_id')
return out
```
#### File: l2o/layers/regularizers.py
```python
from lbann.onnx.parserDescriptor import parserDescriptor
from lbann.onnx.l2o.layers import LbannLayerParser
@parserDescriptor(["BatchNormalization"])
class LbannLayerParser_batch_normalization(LbannLayerParser):
def parse(self):
params = self.l.batch_normalization
outputs, paramNames = self.appendOperator("BatchNormalization",
paramCount=4,
attrs={"epsilon": params.epsilon,
"momentum": params.decay,
"spatial": 1})
for p in paramNames:
self.appendParam(p, [self.inputShapes[0][1]])
@parserDescriptor(["LRN"])
class LbannLayerParser_local_response_normalization(LbannLayerParser):
def parse(self):
params = self.l.local_response_normalization
self.appendOperator("LRN",
attrs={"alpha":params.lrn_alpha,
"beta": params.lrn_beta,
"bias": params.lrn_k,
"size": params.window_width})
@parserDescriptor(["Dropout"])
class LbannLayerParser_dropout(LbannLayerParser):
def parse(self):
self.appendOperator("Dropout",
attrs={"ratio": 1-self.l.dropout.keep_prob})
```
#### File: l2o/layers/transforms.py
```python
import lbann.onnx
from lbann.onnx.parserDescriptor import parserDescriptor
from lbann.onnx.util import getNodeAttributeByName
from lbann.onnx.l2o.util import parseSpatialAttributes
from lbann.onnx.l2o.layers import LbannLayerParser
import onnx
import numpy as np
@parserDescriptor(["MaxPool", "AveragePool"])
class LbannLayerParser_pooling(LbannLayerParser):
def parse(self):
params = self.l.pooling
self.appendOperator({"max": "MaxPool",
"average": "AveragePool"}[params.pool_mode],
attrs=parseSpatialAttributes(params, "pool", False))
@parserDescriptor(["MaxUnpool"])
class LbannLayerParser_unpooling(LbannLayerParser):
def parse(self):
unpoolNode = list(filter(lambda x: x.name == self.l.unpooling.pooling_layer,
self.knownNodes))
assert len(unpoolNode) == 1
self.appendOperator("MaxUnpool",
attrs=dict(map(lambda x: (x, getNodeAttributeByName(unpoolNode[0], x)),
["kernel_shape", "pads", "strides"])))
@parserDescriptor(["Split"])
class LbannLayerParser_slice(LbannLayerParser):
def parse(self):
params = self.l.slice
offsets = list(map(int, params.slice_points.split(" ")))
sizes = list(map(lambda x: offsets[x+1]-offsets[x], range(len(offsets)-1)))
self.appendOperator("Split",
attrs={"axis": params.axis,
"split": sizes})
@parserDescriptor(["Concat"])
class LbannLayerParser_concatenation(LbannLayerParser):
def parse(self):
self.appendOperator("Concat",
attrs={"axis": self.l.concatenation.axis})
@parserDescriptor(["RandomNormal"])
class LbannLayerParser_gaussian(LbannLayerParser):
def parse(self):
params = self.l.gaussian
# mean, stdev, neuron_dims
self.appendOperator("RandomNormal",
attrs={"dtype": lbann.onnx.ELEM_TYPE,
"mean": params.mean,
"scale": params.stdev,
"shape": params.neuron_dims if isinstance(params.neuron_dims, list) \
else list(map(int, params.neuron_dims.split(" ")))})
@parserDescriptor(["Reshape"])
class LbannLayerParser_reshape(LbannLayerParser):
def parse(self):
shape = list(map(int, self.l.reshape.dims.split(" ")))
h = self.createHiddenTensorName()
self.appendOperator("Reshape", {}, 0, [self.getLbannInputNames()[0], h])
self.appendParamWithInit(h, np.array(shape, dtype=np.int64))
@parserDescriptor(["ReduceSum", "ReduceMean"])
class LbannLayerParser_reduction(LbannLayerParser):
def parse(self):
self.appendOperator({"sum": "ReduceSum",
"average": "ReduceMean"}[self.l.reduction.mode],
attrs={"keepdims": 0})
##
## Dummy parsers
##
@parserDescriptor(stub=True)
class LbannLayerParser_evaluation(LbannLayerParser):
def parse(self):
self.appendOperator("LbannEvaluation")
```
#### File: onnx/o2l/__init__.py
```python
import sys
import numpy as np
import onnx
import onnx.numpy_helper
import google.protobuf.text_format as txtf
from lbann import lbann_pb2
import lbann.onnx.util
from lbann.onnx.o2l.layers import PARSERS
from lbann.onnx.l2o import getStaticTensorShapes
def getTensorInitial(name, graph):
for init in graph.initializer:
if name == init.name:
return onnx.numpy_helper.to_array(init)
return None
def getNodeName(i_op):
i, op = i_op
return "{}_{}".format(op.op_type, i)
def onnxToLbannLayers(o, lbannInputNames, l2oInputMap, dataLayout="auto"):
"""
Parses a given ONNX model and returns the equivalent LBANN model.
Args:
o (onnx.ModelProto): An ONNX model.
lbannInputNames (list): Names of input data.
l2oInputMap (dict): A map from the names of the input data to those of the ONNX tensors.
This map is used to tie each input data tensor to ONNX input tensor,
since the order and names of input tensors of the ONNX model might not be the same to
those of the equivalent LBANN model.
dataLayout (str): If this is "auto", data_layout of the converted layers is set to "model_parallel" if
the layer is fully_connected otherwise "data_parallel".
Returns:
list of lbann_pb2.Layer: The converted layers.
"""
graph = o.graph
tensorShapes = getStaticTensorShapes(o)
opNames = list(map(getNodeName, enumerate(graph.node)))
producers = {}
for op, opName in zip(graph.node, opNames):
for opt in op.output:
assert not opt in producers.keys()
producers[opt] = opName
inputLayerName = "data"
assert dataLayout == "auto"
layers = []
layers.append(lbann_pb2.Layer(name=inputLayerName,
children=lbann.onnx.util.list2LbannList(lbannInputNames),
data_layout="data_parallel",
input=lbann_pb2.Input()))
for i in lbannInputNames:
layers.append(lbann_pb2.Layer(name=i,
parents=lbann.onnx.util.list2LbannList([inputLayerName]),
data_layout="data_parallel",
split=lbann_pb2.Split()))
producers[i] = i
if i in l2oInputMap.keys():
producers[l2oInputMap[i]] = i
for op, opName in zip(graph.node, opNames):
inputShapes = list(map(lambda x: tensorShapes[x], op.input))
outputShapes = list(map(lambda x: tensorShapes[x] if x in tensorShapes.keys() else None, op.output)) # Dropout's mask shape might be unknown
inits = list(map(lambda x: getTensorInitial(x, graph), op.input))
parents = list(map(lambda x: producers[x] if x in producers.keys() else None, op.input))
layers.append(onnxNodeToLbannLayer(op, opName, inputShapes, outputShapes, inits, parents, dataLayout="auto"))
return layers
def onnxNodeToLbannLayer(op, opName, inputShapes, outputShapes, inits, parents, dataLayout):
opType = op.op_type
if not opType in PARSERS.keys():
print(lbann.onnx.util.printWarning("op_type \"{}\" is not supported.".format(opType)))
assert False
dic = PARSERS[opType](op, inputShapes, outputShapes, inits).parse()
validParents = []
hitInvalid = False
for i, p in enumerate(parents):
if p is not None:
assert not hitInvalid
validParents.append(p)
else:
hitInvalid = True
assert dataLayout == "auto"
l = lbann_pb2.Layer(name=opName,
parents=lbann.onnx.util.list2LbannList(validParents),
data_layout=("model_parallel" if "fully_connected" in dic.keys() else "data_parallel"),
**dic)
return l
```
#### File: o2l/layers/math.py
```python
from lbann.onnx.o2l.layers import OnnxLayerParser
from lbann.onnx.parserDescriptor import parserDescriptor
from lbann import lbann_pb2
@parserDescriptor(["relu"])
class parse_Relu(OnnxLayerParser):
def parse(self):
return {"relu": lbann_pb2.Relu()}
@parserDescriptor(["softmax"])
class parse_Softmax(OnnxLayerParser):
def parse(self):
return {"softmax": lbann_pb2.Softmax()}
@parserDescriptor(["concatenation"])
class parse_Concat(OnnxLayerParser):
def parse(self):
return {"concatenation": lbann_pb2.Concatenation(axis = self.getNodeAttribute("axis"))}
@parserDescriptor(["sum"])
class parse_Sum(OnnxLayerParser):
def parse(self):
return {"sum": lbann_pb2.Sum()}
@parserDescriptor(["add"])
class parse_Add(OnnxLayerParser):
def parse(self):
return {"add": lbann_pb2.Add()}
```
#### File: o2l/layers/regularizers.py
```python
from lbann.onnx.o2l.layers import OnnxLayerParser
from lbann.onnx.parserDescriptor import parserDescriptor
from lbann import lbann_pb2
@parserDescriptor(["local_response_normalization"])
class parse_LRN(OnnxLayerParser):
def parse(self):
local_response_normalization = lbann_pb2.LocalResponseNormalization(
lrn_alpha = self.getNodeAttribute("alpha"),
lrn_beta = self.getNodeAttribute("beta"),
lrn_k = self.getNodeAttribute("bias"),
window_width = self.getNodeAttribute("size"),
)
return {"local_response_normalization": local_response_normalization}
@parserDescriptor(["batch_normalization"])
class parse_BatchNormalization(OnnxLayerParser):
def parse(self):
batch_normalization = lbann_pb2.BatchNormalization(
epsilon = self.getNodeAttribute("epsilon", 1e-5),
decay = self.getNodeAttribute("momentum", 0.9),
)
return {"batch_normalization": batch_normalization}
@parserDescriptor(["dropout"])
class parse_Dropout(OnnxLayerParser):
def parse(self):
return {"dropout": lbann_pb2.Dropout(keep_prob = 1.0-self.getNodeAttribute("ratio"))}
```
#### File: onnx/tests/onnx2lbann_layer_test.py
```python
import unittest
import onnx
import re
import lbann.onnx
import lbann.onnx.o2l
from lbann.onnx.util import lbannList2List
from lbann.onnx.tests.util import getLbannVectorField
import lbann
def makeFloatTensorVI(name, shape):
return onnx.helper.make_tensor_value_info(
name=name,
elem_type=lbann.onnx.ELEM_TYPE,
shape=shape
)
def makeOnnxModel(node, inputs):
inputs = list(map(lambda x: makeFloatTensorVI(*x), inputs.items()))
return onnx.helper.make_model(onnx.helper.make_graph(
[node],
name="",
inputs=inputs,
outputs=[])
)
def convertOnnxNode(node, inputs, params):
o = makeOnnxModel(node, {**inputs, **params})
layers = lbann.onnx.o2l.onnxToLbannLayers(
o,
inputs.keys(),
dict(zip(inputs.keys(), inputs.keys()))
)
return layers[-1]
class TestOnnx2LbannLayer(unittest.TestCase):
def _assertFields(self, lbannFields, onnxFields):
hasVectors = False
if "has_vectors" in lbannFields.get_field_names():
assert hasattr(onnxFields, "has_vectors")
hasVectors = True
for fieldName in lbannFields.get_field_names():
if fieldName == "has_vectors":
continue
if re.compile("_i$").search(fieldName):
continue
lbannField = getattr(lbannFields, fieldName)
onnxField = getattr(onnxFields, fieldName)
if hasVectors and "{}_i".format(fieldName) in lbannFields.get_field_names():
lbannField = lbannList2List(getLbannVectorField(lbannFields, fieldName))
onnxField = lbannList2List(getLbannVectorField(onnxFields, fieldName))
if len(lbannField) < len(onnxField):
assert len(lbannField) == 1
lbannField *= len(onnxField)
if len(onnxField) < len(lbannField):
assert len(onnxField) == 1
onnxField *= len(lbannField)
if lbannField is None:
continue
if hasVectors and re.compile("_i$").search(fieldName):
continue
assertFunc = self.assertEqual
if isinstance(lbannField, float) and isinstance(onnxField, float):
assertFunc = self.assertAlmostEqual
assertFunc(
lbannField,
onnxField,
msg=fieldName
)
def _test_o2l_layer_Gemm(self, hasBias):
M, N, K = (100, 200, 300)
lbannFC = lbann.FullyConnected(
lbann.Input(),
num_neurons=N,
has_bias=hasBias
)
inputShapes = {"x": [M,K]}
paramShapes = {"W": [N,K]}
if hasBias:
paramShapes["b"] = [N]
node = onnx.helper.make_node(
"Gemm",
inputs=["x","W"] + (["b"] if hasBias else []),
outputs=["y"],
transB=1
)
onnxFC = convertOnnxNode(
node,
inputShapes,
paramShapes
).fully_connected
self._assertFields(lbannFC, onnxFC)
def test_o2l_layer_Gemm_bias(self):
self._test_o2l_layer_Gemm(hasBias=True)
def test_o2l_layer_Gemm_no_bias(self):
self._test_o2l_layer_Gemm(hasBias=False)
def _test_o2l_layer_Conv(self, numDims, hasBias):
N, C_in, H = (256, 3, 224)
C_out = 64
K, P, S, D = (3, 1, 1, 1)
G = 1
lbannConv = lbann.Convolution(
lbann.Input(),
num_dims=numDims,
num_output_channels=C_out,
has_vectors=False,
conv_dims_i=K,
conv_pads_i=P,
conv_strides_i=S,
conv_dilations_i=D,
num_groups=G,
has_bias=hasBias
)
inputShapes = {"x": [N, C_in] + [H]*numDims}
paramShapes = {"W": [C_out, C_in] + [K]*numDims}
if hasBias:
paramShapes["b"] = [C_out]
node = onnx.helper.make_node(
"Conv",
inputs=["x","W"] + (["b"] if hasBias else []),
outputs=["y"],
kernel_shape=[K]*numDims,
pads=[P]*(numDims*2),
strides=[S]*numDims,
dilations=[D]*numDims,
group=G
)
onnxConv = convertOnnxNode(
node,
inputShapes,
paramShapes
).convolution
self._assertFields(lbannConv, onnxConv)
def test_o2l_layer_Conv_bias(self):
self._test_o2l_layer_Conv(numDims=2, hasBias=True)
def test_o2l_layer_Conv_no_bias(self):
self._test_o2l_layer_Conv(numDims=2, hasBias=False)
def test_o2l_layer_Conv_3D_bias(self):
self._test_o2l_layer_Conv(numDims=3, hasBias=True)
def test_o2l_layer_Conv_3D_no_bias(self):
self._test_o2l_layer_Conv(numDims=3, hasBias=False)
def _test_o2l_layer_Pool(self, numDims, poolMode, onnxOp):
N, C, H = (256, 3, 224)
K, P, S = (3, 1, 1)
lbannPooling = lbann.Pooling(
lbann.Input(),
num_dims=numDims,
has_vectors=False,
pool_dims_i=K,
pool_pads_i=P,
pool_strides_i=S,
pool_mode=poolMode
)
inputShapes = {"x": [N, C] + [H]*numDims}
node = onnx.helper.make_node(
onnxOp,
inputs=["x"],
outputs=["y"],
kernel_shape=[K]*numDims,
pads=[P]*(numDims*2),
strides=[S]*numDims,
)
onnxPooling = convertOnnxNode(
node,
inputShapes,
{}
).pooling
self._assertFields(lbannPooling, onnxPooling)
def test_o2l_layer_MaxPool(self):
self._test_o2l_layer_Pool(numDims=2, poolMode="max", onnxOp="MaxPool")
def test_o2l_layer_AveragePool(self):
self._test_o2l_layer_Pool(numDims=2, poolMode="average", onnxOp="AveragePool")
def test_o2l_layer_MaxPool_3D(self):
self._test_o2l_layer_Pool(numDims=3, poolMode="max", onnxOp="MaxPool")
def test_o2l_layer_AveragePool_3D(self):
self._test_o2l_layer_Pool(numDims=3, poolMode="average", onnxOp="AveragePool")
def test_o2l_layer_BatchNormalization(self):
N, C, H, W = (100,200,300,400)
decay = 0.95
epsilon = 1e-6
lbannBN = lbann.BatchNormalization(
lbann.Input(),
decay=decay, epsilon=epsilon,
)
inputShapes = {"x": [N,C,H,W]}
paramShapes = {"scale": [C],
"B" : [C],
"mean" : [C],
"var" : [C]}
node = onnx.helper.make_node(
"BatchNormalization",
inputs=["x", "scale", "B", "mean", "var"],
outputs=["y"],
epsilon=epsilon,
momentum=decay
)
onnxBN = convertOnnxNode(
node,
inputShapes,
paramShapes
).batch_normalization
self._assertFields(lbannBN, onnxBN)
def test_o2l_layer_Relu(self):
N, C, H, W = (100,200,300,400)
lbannRelu = lbann.Relu(
lbann.Input(),
)
node = onnx.helper.make_node(
"Relu",
inputs=["x"],
outputs=["y"],
)
onnxRelu = convertOnnxNode(
node,
{"x": [N,C,H,W]},
{}
).relu
self._assertFields(lbannRelu, onnxRelu)
if __name__ == "__main__":
unittest.main()
```
#### File: onnx/tests/util.py
```python
import os
import unittest
from lbann.onnx.util import parseBoolEnvVar, list2LbannList
DUMPED_MODELS_DIR = "dumped_models"
def isModelDumpEnabled():
return parseBoolEnvVar("LBANN_ONNX_DUMP_MODELS", False)
def createAndGetDumpedModelsDir():
if not os.path.exists(DUMPED_MODELS_DIR):
os.mkdir(DUMPED_MODELS_DIR)
return DUMPED_MODELS_DIR
def getLbannVectorField(fields, name):
if fields.has_vectors:
return getattr(fields, name)
else:
return list2LbannList([getattr(fields, "{}_i".format(name))])
```
#### File: scripts/onnx/create_support_status.py
```python
import os
import re
import unittest
from lbann.onnx.tests.onnx2lbann_layer_test import TestOnnx2LbannLayer
from lbann.onnx.tests.lbann2onnx_layer_test import TestLbann2OnnxLayer
from lbann.onnx.o2l.layers import PARSERS as PARSERS_o2l
from lbann.onnx.l2o.layers import PARSERS as PARSERS_l2o
def getTestedO2LLayers():
return set(map(lambda x: x.group(1),
filter(lambda x: x is not None,
map(lambda x: re.compile("test_o2l_layer_(.+)").match(x),
dir(TestOnnx2LbannLayer())))))
def getTestedL2OLayers():
return set(map(lambda x: x.group(1),
filter(lambda x: x is not None,
map(lambda x: re.compile("test_l2o_layer_(.+)").match(x),
dir(TestLbann2OnnxLayer())))))
def addLinkToOnnxOperator(op):
return "[{}](https://github.com/onnx/onnx/blob/master/docs/Operators.md#{})".format(op, op)
def createTable(o2l):
onnxOp = "ONNX Operator"
lbannOp = "LBANN Layer"
lines = [
"## {}".format("ONNX to LBANN" if o2l else "LBANN to ONNX"),
"| {} | Converted {} | Supported | Tested | Bijective |".format(
onnxOp if o2l else lbannOp,
lbannOp if o2l else onnxOp,
),
"|---|---|:-:|:-:|:-:|",
]
bijections = getBijections(PARSERS_o2l if o2l else PARSERS_l2o,
PARSERS_l2o if o2l else PARSERS_o2l)
testedParsers = getTestedO2LLayers() if o2l else getTestedL2OLayers()
for l, p in (PARSERS_o2l if o2l else PARSERS_l2o).items():
converted = ", ".join(list(map(lambda x: x if o2l else addLinkToOnnxOperator(x),
p.convertedLayers)))
if p.arithmetic:
converted = "[Multiple operators]"
tested = any([re.compile(l).match(x) is not None for x in testedParsers])
lines.append(
"| {} | {} | {} | {} | {} | ".format(
addLinkToOnnxOperator(l) if o2l else l,
converted,
"✔" if not p.stub else "",
"✔" if tested else "",
"✔" if l in bijections else "",
)
)
return lines
def getSurjectives(parsers):
ret = []
for l, p in parsers.items():
if not p.arithmetic and not p.stub and len(p.convertedLayers) == 1:
ret.append(l)
return set(ret)
def getBijections(parsers1, parsers2):
sur1 = getSurjectives(parsers1)
sur2 = set(map(lambda x: parsers2[x].convertedLayers[0],
getSurjectives(parsers2)))
return sur1 & sur2
if __name__ == "__main__":
lines = ["# lbann-onnx Support Status",
"This file is automatically generated by `scripts/create_support_status.py`."]
lines.extend(createTable(False))
lines.extend(createTable(True))
print("\n".join(lines))
```
#### File: scripts/plotting/plot_comp_times.py
```python
import sys
from collections import OrderedDict
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn
import load_events
def plot_times(files, names, output_name):
"""Plot comparisons of run times.
This only plots for model 0.
"""
results = []
for f in files:
results.append(load_events.load_values(
f,
event_names=['objective_time', 'objective_evaluation_time',
'objective_differentiation_time', 'metric_evaluation_time'],
layer_event_names=['fp_time', 'bp_time', 'update_time', 'imcomm_time', 'opt_time'],
model=0))
fig, ax = plt.subplots(1, 1)
bar_width = 0.35
labels = ['{0}'.format(x) for x in results[0]['fp_time'].keys()]
labels += ['obj', 'metrics']
starts = np.arange(len(labels))*(bar_width*len(files)+0.3) + 1
for i in range(len(results)):
result = results[i]
l = len(result['fp_time'].keys())
fp_tot = OrderedDict()
bp_tot = OrderedDict()
update_tot = OrderedDict()
imcomm_tot = OrderedDict()
opt_tot = OrderedDict()
for layer in result['fp_time'].keys():
fp_tot[layer] = np.sum(result['fp_time'][layer])
bp_tot[layer] = np.sum(result['bp_time'][layer])
update_tot[layer] = np.sum(result['update_time'][layer])
if 'imcomm_time' in result and layer in result['imcomm_time']:
imcomm_tot[layer] = np.sum(result['imcomm_time'][layer])
else:
imcomm_tot[layer] = 0
if 'opt_time' in result and layer in result['opt_time']:
opt_tot[layer] = np.sum(result['opt_time'][layer])
else:
opt_tot[layer] = 0
obj_val_tot = 0.0
obj_grad_tot = 0.0
if 'objective_time' in result:
obj_val_tot = np.sum(result['objective_time'])
if 'objective_evaluation_time' in result:
obj_val_tot = np.sum(result['objective_evaluation_time'])
obj_grad_tot = np.sum(result['objective_differentiation_time'])
metric_tot = 0.0
if 'metric_evaluation_time' in result:
metric_tot = np.sum(result['metric_evaluation_time'])
fp_tot = np.array(list(fp_tot.values()) + [obj_val_tot, metric_tot])
bp_tot = np.array(list(bp_tot.values()) + [obj_grad_tot, 0.0])
update_tot = np.array(list(update_tot.values()) + [0.0, 0.0])
imcomm_tot = np.array(list(imcomm_tot.values()) + [0.0, 0.0])
opt_tot = np.array(list(opt_tot.values()) + [0.0, 0.0])
ax.bar(starts + i*bar_width, fp_tot, bar_width, color='blue')
ax.bar(starts + i*bar_width, bp_tot, bar_width, bottom=fp_tot,
color='green')
ax.bar(starts + i*bar_width, update_tot, bar_width, bottom=fp_tot+bp_tot,
color='yellow')
ax.bar(starts + i*bar_width, opt_tot, bar_width,
bottom=fp_tot+bp_tot+update_tot, color='magenta')
rects = ax.bar(starts + i*bar_width, imcomm_tot, bar_width,
bottom=fp_tot+bp_tot+update_tot+opt_tot, color='red')
# Add the name to this bar.
ax.text(rects[0].get_x() + rects[0].get_width() / 2,
rects[0].get_y() + rects[0].get_height() + 1,
names[i],
ha='center', va='bottom', rotation='vertical', fontsize=4)
ax.set_ylabel('Time (s)')
ax.set_xticks(starts + bar_width*(len(results)/2))
ax.set_xticklabels(labels, rotation='vertical')
if len(fp_tot) > 35:
for label in ax.xaxis.get_ticklabels():
label.set_fontsize(3)
#for label in ax.xaxis.get_ticklabels()[::2]:
# label.set_visible(False)
ax.set_title('Per-layer runtime breakdown')
ax.legend(('FP', 'BP', 'Update', 'Opt', 'Imcomm'))
plt.tight_layout()
plt.savefig(output_name + '.pdf')
if __name__ == '__main__':
if len(sys.argv) < 4:
print('plot_comp_times.py: [events] [names] output')
sys.exit(1)
num = (len(sys.argv) - 2) // 2
plot_times(sys.argv[1:num+1], sys.argv[num+1:2*num+1], sys.argv[-1])
```
#### File: tools/mnist/mnist_to_npy_and_npz.py
```python
import numpy as np
import argparse
import os
IMAGE_WIDTH = 28
def convert_mnist_to_np_and_npz(imagePath, labelPath,
imageMagicNumber, labelMagicNumber,
out, int16):
with open(imagePath, "rb") as f:
imageBin = f.read()
assert imageMagicNumber == np.frombuffer(imageBin[ 0: 4], dtype=">u4")[0]
imageCount = np.frombuffer(imageBin[ 4: 8], dtype=">u4")[0]
assert IMAGE_WIDTH == np.frombuffer(imageBin[ 8:12], dtype=">u4")[0]
assert IMAGE_WIDTH == np.frombuffer(imageBin[12:16], dtype=">u4")[0]
pixels = np.frombuffer(imageBin[16:], dtype=">u1") \
.reshape([imageCount, IMAGE_WIDTH*IMAGE_WIDTH])
with open(labelPath, "rb") as f:
labelBin = f.read()
assert labelMagicNumber == np.frombuffer(labelBin[ 0: 4], dtype=">u4")[0]
assert imageCount == np.frombuffer(labelBin[ 4: 8], dtype=">u4")[0]
labels = np.frombuffer(labelBin[8:], dtype=">u1") \
.reshape([imageCount, 1])
pixels = pixels.astype(np.float32) / 255.0
labels = labels.astype(np.int32)
npy = np.concatenate((pixels, labels.astype(np.float32)), axis=1)
if int16:
pixels = (pixels * 0x7FFF).astype(np.int16)
np.save("{}.npy".format(out), npy)
np.savez(
"{}{}.npz".format(out, "_int16" if int16 else ""),
data=pixels,
labels=labels)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert the MNIST training/test datasets into .npy and .npz files.",
epilog="Usage: ./mnist_to_npy_and_npz.py path/to/mnist/directory")
parser.add_argument(
"mnist_dir", type=str,
help="Path to a directory containing the MNIST dataset (decompressed binary files)")
parser.add_argument(
"--int16",
dest="int16", action="store_const",
const=True, default=True,
help="Convert the image data into int16 (each pixel is multiplied by 0x7FFFF)")
args = parser.parse_args()
convert_mnist_to_np_and_npz(
os.path.join(args.mnist_dir, "train-images-idx3-ubyte"),
os.path.join(args.mnist_dir, "train-labels-idx1-ubyte"),
2051, 2049,
"train",
args.int16)
convert_mnist_to_np_and_npz(
os.path.join(args.mnist_dir, "t10k-images-idx3-ubyte"),
os.path.join(args.mnist_dir, "t10k-labels-idx1-ubyte"),
2051, 2049,
"test",
args.int16)
``` |
{
"source": "jonesim/ajax-helpers",
"score": 2
} |
#### File: django_examples/ajax_examples/views.py
```python
import base64
import datetime
import os
from io import BytesIO
from django import forms
from django.http import HttpResponse
from django.urls import reverse
from django.views.generic import TemplateView
from django_menus.menu import MenuMixin
from openpyxl import Workbook
from show_src_code.view_mixins import DemoViewMixin
from ajax_helpers.mixins import AjaxHelpers, ReceiveForm, AjaxFileUploadMixin
from ajax_helpers.utils import ajax_command
class MainMenu(DemoViewMixin, MenuMixin, TemplateView):
def setup_menu(self):
super().setup_menu()
self.add_menu('main_menu').add_items(
('ajax_main', 'General'),
('timer_examples', 'Timers'),
('download_examples', 'File Downloads'),
('dragdrop_upload', 'File Uploads'),
('event_example', 'Event'),
'help',
)
class TestForm(forms.Form):
text_entry = forms.CharField(max_length=100)
class ToolTip(TemplateView):
template_name = 'ajax_examples/tooltip_template.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.GET['django'] == 'today':
context['time'] = datetime.datetime.now().strftime('%m/%d/%Y, %H:%M:%S')
context['day'] = 'Today'
else:
context['time'] = (datetime.datetime.now()-datetime.timedelta(days=1)).strftime('%m/%d/%Y, %H:%M:%S')
context['day'] = 'Yesterday'
return context
class Example1(ReceiveForm, AjaxHelpers, MainMenu):
template_name = 'ajax_examples/main.html'
def button_redirect(self):
self.add_command('message', text='Will redirect after this message')
self.add_command('redirect', url=reverse('redirect'))
return self.command_response()
def button_test_ajax(self):
return self.command_response('message', text='From Django View')
def button_test_html(self):
return self.command_response('html', selector='#html_test', html='From Django View')
def form_form_id(self, **kwargs):
return self.command_response('message', text=f'From Django View - field {kwargs["field1"]}')
def form_django_form_id(self, **kwargs):
a = TestForm(kwargs)
if a.is_valid():
return self.command_response('html', selector='#django_form_id', html='thank you', parent=True)
else:
return self.command_response('html', selector='#django_form_id', html=a.as_p())
def button_append(self):
self.add_command('append_to', html='<div>New div</div>', selector='#append-to')
return self.command_response()
def button_css(self):
self.add_command('set_css', selector='#css-test', prop='width', val='800px')
return self.command_response('set_css', selector='#css-test', prop='background-color', val='yellow')
def button_null(self):
return self.command_response('null')
def button_count(self):
return self.command_response('element_count', selector='div', data={'ajax': 'count_response'})
def ajax_count_response(self, **kwargs):
return self.command_response('message', text='Number of divs ' + str(kwargs['count']))
def button_get_attr(self):
return self.command_response('get_attr', selector='#test-attr-div', attr='class',
data={'ajax': 'attr_response'})
def ajax_attr_response(self, **kwargs):
return self.command_response('message', text='Div classes ' + str(kwargs['val']))
def file_upload(self, file):
return self.command_response('message', text=f'Received {file.name} size {file.size}')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = TestForm()
return context
class Example2(Example1):
template_name = 'ajax_examples/redirect.html'
class TimerExample(Example1):
template_name = 'ajax_examples/timer.html'
def timer_test(self, **kwargs):
return self.command_response('html', selector='#time_div',
html=datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S'))
def get_context_data(self, **kwargs):
self.add_page_command(
'timeout', commands=[ajax_command('message', text='Message appears after 4 seconds')], time=4000
)
self.add_page_command(
'timer', commands=[ajax_command('ajax_post', data={'timer': 'test'})], interval=2000
)
return super().get_context_data(**kwargs)
class DownloadExamples(AjaxHelpers, MainMenu):
template_name = 'ajax_examples/file_downloads.html'
@staticmethod
def create_excel_file():
workbook = Workbook()
sheet = workbook.active
sheet.append(['hello', 'world'])
output = BytesIO()
workbook.save(output)
output.seek(0)
return output
def button_redirect_download(self, download=None):
return self.command_response('redirect', url=reverse('download_examples') + f'?download={download}')
@staticmethod
def button_download_text():
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename="text.txt"'
response.write('text file data')
return response
def button_download_blob(self):
filename = 'test.xlsx'
excel_file = self.create_excel_file()
response = HttpResponse(content_type='application/ms-excel')
# Providing extra download information for the user's browser.
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
response.write(excel_file.read())
return response
def button_download_base64(self, **kwargs):
excel_file = self.create_excel_file()
filename = 'test_base64.xlsx'
return self.command_response('save_file', data=base64.b64encode(excel_file.read()).decode('ascii'),
filename=filename)
def button_download_pdf(self):
f = open('/app/sample.pdf', 'rb')
return self.command_response('save_file', data=base64.b64encode(f.read()).decode('ascii'),
filename='sample.pdf', type='application/pdf')
def get(self, request, *args, **kwargs):
download = request.GET.get('download')
if download in ['1', '2']:
download_type = 'attachment' if download == '1' else 'inline'
f = open('/app/sample.pdf', 'rb')
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = f"{download_type}; filename=test.pdf"
response.write(f.read())
f.close()
return response
return super().get(request, *args, **kwargs)
class DragDropUpload(AjaxFileUploadMixin, AjaxHelpers, MainMenu):
template_name = 'ajax_examples/dragdrop_upload.html'
@staticmethod
def upload_files(filename, _size, file, **kwargs):
path = '/media/' + filename
with open(path, 'wb+') as destination:
destination.write(file.read())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['files'] = os.listdir('/media/')
return context
class EventExample(ReceiveForm, AjaxHelpers, MainMenu):
template_name = 'ajax_examples/event.html'
def form_test_form(self, **kwargs):
if kwargs.get('from_event') == 'keyup':
response = ('<span class="text-danger">Not enough chars<span>' if len(kwargs['text_entry']) < 6
else '<span class="text-success">All good now<span>')
return self.command_response('html', selector='#message', html=response)
def get_context_data(self, **kwargs):
self.add_page_command('on', event='keyup', selector='input', commands=[
ajax_command('send_form', form_id='test_form', from_event='keyup')
])
context = super().get_context_data(**kwargs)
context['form'] = TestForm()
return context
class Help(MainMenu):
template_name = 'ajax_examples/help.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['commands'] = {
'ajax_post': 'data, (url)',
'append_to': 'selector, html',
'delay': 'time',
'element_count': 'selector, data, (url)',
'get_attr': 'selector, attr, data, (url)',
'html': 'selector, (parent), html',
'message': 'text',
'null': '',
'on': 'selector, event, commands',
'onload': 'commands',
'reload': '',
'redirect': 'url',
'save_file': 'filename, data',
'send_form': 'form_id',
'set_attr': 'selector, attr, val',
'set_css': 'selector, prop, val',
'set_prop': 'selector, prop, val',
'set_value': 'selector, val',
'timeout': 'commands, time',
'timer': 'commands, interval',
'upload_file': '',
}
return context
``` |
{
"source": "jonesim/django-2fa",
"score": 2
} |
#### File: django_examples/examples/customise.py
```python
from modal_2fa.customise import CustomiseAuth
from django.utils.safestring import mark_safe
from django_modals.form_helpers import SmallHelper
class ExampleCustomise(CustomiseAuth):
@staticmethod
def customise_view(view):
view.center_header = True
view.size = 'md'
view.helper_class = SmallHelper
if hasattr(view, 'modal_title'):
image = '<img src="/static/HTML5.svg" width="28">'
view.modal_title = mark_safe(
f'<span class="m-auto">{image}<span class="ml-2">{view.modal_title}</span></span>'
)
view.no_parent_template = 'blank_page_img.html'
@staticmethod
def allowed_remember(user):
if user.username == 'ian5':
return False
return True
@staticmethod
def user_2fa_optional(user):
return True
```
#### File: django-2fa/modal_2fa/backends.py
```python
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
from django_otp import user_has_device
from .models import RememberDeviceCookie
from .utils import get_custom_auth
UserModel = get_user_model()
class CookieBackend(ModelBackend):
part_login_key = 'part_login'
def __init__(self):
super().__init__()
self.customisation_class = get_custom_auth()
@staticmethod
def get_part_login(request):
return request.session.get(CookieBackend.part_login_key)
@staticmethod
def set_part_login(request, username):
request.session[CookieBackend.part_login_key] = username
@staticmethod
def delete_part_login(request):
request.session.pop(CookieBackend.part_login_key, None)
@staticmethod
def get_part_login_user(request):
# noinspection PyProtectedMember
return UserModel._default_manager.get_by_natural_key(CookieBackend.get_part_login(request))
def authenticate(self, request, username=None, password=<PASSWORD>, device=None, token=None, **kwargs):
if device is None:
user = super().authenticate(request, username, password, **kwargs)
if not user:
return
if RememberDeviceCookie.cookie_object(request, user, active=True):
request.session['authentication_method'] = 'cookie'
return user
elif not user_has_device(user) and self.customisation_class.user_2fa_optional(user):
return user
elif user:
self.set_part_login(request, user.username)
else:
if device.verify_token(token):
request.session['authentication_method'] = '2fa'
if request.user.is_authenticated:
user = request.user
else:
user = self.get_part_login_user(request)
self.delete_part_login(request)
return user
```
#### File: django-2fa/modal_2fa/customise.py
```python
from django.contrib.auth.views import PasswordResetView
from .urls import make_url_patterns, pattern_dict
from .auth import Modal2FA
from .models import RememberDeviceCookie
class CustomiseAuth:
invite_email_template = 'modal_2fa/emails/invite.html'
invite_txt_email_template = 'modal_2fa/emails/invite_txt.html'
invite_subject_template = 'modal_2fa/emails/invite_subject.txt'
reset_password_email_template = None
reset_password_txt_email_template = 'modal_2fa/emails/password_reset_email.html'
reset_password_subject_template = 'modal_2fa/emails/password_reset_subject.txt'
@staticmethod
def override_views():
return {}
@classmethod
def paths(cls, include_admin=False):
patterns = pattern_dict
if include_admin:
from modal_2fa.user_admin import UserAdminModal
pattern_dict['user_admin_modal'] = ('user-admin-modal/', UserAdminModal)
patterns.update(cls.override_views())
return make_url_patterns(patterns)
@staticmethod
def customise_view(view):
return
def set_attribute(self, attribute_name):
if hasattr(self, attribute_name):
setattr(self.view, attribute_name, getattr(self, attribute_name))
def __init__(self, view):
self.view = view
if isinstance(view, PasswordResetView):
self.set_attribute('email_template_name')
self.set_attribute('subject_template_name')
if isinstance(view, Modal2FA):
self.set_attribute('allowed_remember')
@staticmethod
def user_2fa_optional(user):
return True
@staticmethod
def allowed_remember(user):
return True
@staticmethod
def manage_max_cookies(user, max_number):
cookies = RememberDeviceCookie.objects.filter(user=user).order_by('last_used')
if len(cookies) >= max_number:
cookies[0].delete()
@staticmethod
def max_cookies(_user):
return 2
@classmethod
def max_cookies_already(cls, user):
if RememberDeviceCookie.objects.filter(user=user).count() >= cls.max_cookies(user):
return True
``` |
{
"source": "jonesim/django-datatables",
"score": 2
} |
#### File: django-datatables/django_datatables/helpers.py
```python
from django.urls import reverse
DUMMY_ID = 999999
simple_table = {
'dom': 't',
'no_col_search': True,
'no_footer': True,
'pageLength': 400,
'stateSave': False
}
def row_button(command, button_text, *, function='Html', button_classes='btn btn-sm', **kwargs):
rb = {
'html': (f'<button data-command="{command}" onclick="django_datatables.b_r(this)" '
f'class="{button_classes}">{button_text}</button>'),
'function': function,
}
rb.update(kwargs)
return rb
def render_replace(*, var='%1%', **kwargs):
return dict(var=var, function='Replace', **kwargs)
def get_url(url_name):
if type(url_name) == tuple:
return reverse(url_name[0], args=[*url_name[1:]])
else:
if url_name.find(str(DUMMY_ID)) == -1:
return reverse(url_name, args=[DUMMY_ID])
return url_name
def row_link(url_name, column_id):
return [render_replace(column=column_id, html=get_url(url_name), var=str(DUMMY_ID))]
```
#### File: django_datatables/plugins/colour_rows.py
```python
from django.template.loader import render_to_string
class ColourRows:
def __init__(self, datatable, colour_dict):
if isinstance(colour_dict, list):
self.colour_dict = colour_dict
else:
self.colour_dict = [colour_dict]
for c in self.colour_dict:
c['column'] = datatable.find_column(c['column'])[1]
self.datatable = datatable
def render(self):
return render_to_string('datatables/plugins/colour_rows.html',
{'datatable': self.datatable, 'colour_dict': self.colour_dict})
``` |
{
"source": "jonesim/django-gdrive-backup",
"score": 2
} |
#### File: django-gdrive-backup/gdrive_backup/tasks.py
```python
from celery import shared_task
from .backup import Backup
@shared_task
def backup():
Backup().backup_db_and_folders()
``` |
{
"source": "jonesim/django-menus",
"score": 2
} |
#### File: menu_examples/templatetags/example_tags.py
```python
from django import template
from menu_examples.views import setup_main_menu
register = template.Library()
@register.simple_tag(takes_context=True)
def main_menu(context):
return setup_main_menu(context['request']).render()
```
#### File: django-menus/django_menus/modal_item.py
```python
from django_modals.helper import show_modal
from django_menus.menu import MenuItem
class ModalMenuItem(MenuItem):
def __init__(self, modal_name, menu_display=None, modal_slug_args=None, **kwargs):
if not modal_slug_args:
modal_slug_args = []
elif not isinstance(modal_slug_args, list):
modal_slug_args = [modal_slug_args]
super().__init__(show_modal(modal_name, *modal_slug_args),
menu_display,
link_type=MenuItem.JAVASCRIPT,
**kwargs)
``` |
{
"source": "jonesim/django-modals",
"score": 2
} |
#### File: management/commands/import_modal_data.py
```python
from django.apps import apps
from django.core.management.base import BaseCommand
from modal_examples.import_data import import_data
class Command(BaseCommand):
def handle(self, *args, **options):
import_data(apps.get_app_config('modal_examples').path)
```
#### File: django_examples/modal_examples/tasks.py
```python
from time import sleep
from celery import shared_task
from tempfile import NamedTemporaryFile
from ajax_helpers.utils import ajax_command
from django.urls import reverse
@shared_task(bind=True)
def DemoTask(self, config=False, **kwargs):
message = 'initial'
if config:
return {'progress': True, 'message': message, 'title': 'Processing....'}
for i in range(100):
if i > 66:
message = 'last part'
elif i > 33:
message = 'middle third'
self.update_state(state='PROGRESS', meta={'current': i, 'total': 100, 'message': message, 'kwargs': kwargs})
sleep(0.01)
f = NamedTemporaryFile(delete=False)
pdf = open('/app/sample.pdf', 'rb')
f.write(pdf.read())
f.close()
return {
'download': f.name,
'commands': [ajax_command('message', text='Completed Task'),
ajax_command('close'),
ajax_command('redirect', url=reverse('get_task_file', args=(self.request.id, )))],
'type': 'application/pdf',
'filename': 'test1.pdf',
'attachment': kwargs['slug'].get('attachment') == 'True'}
```
#### File: modal_examples/views/validation.py
```python
from django.core.exceptions import ValidationError
from modal_examples.models import Company
from show_src_code.modals import ModelFormModal
from django_modals.processes import PROCESS_EDIT
from .views import MainMenuTemplateView
class ValidationExamples(MainMenuTemplateView):
template_name = 'example_views/validation.html'
class ValidationClean(ModelFormModal):
model = Company
form_fields = ['name', 'active']
process = PROCESS_EDIT
def clean(self, form, cleaned_data):
if cleaned_data['name'] == 'NA':
raise ValidationError("Can't enter NA")
```
#### File: django-modals/django_modals/helper.py
```python
import json
from base64 import urlsafe_b64encode
from ajax_helpers.templatetags.ajax_helpers import button_javascript
from django.urls import reverse, resolve, NoReverseMatch
from django.template.loader import render_to_string
from crispy_forms.layout import HTML, Div
from django.utils.safestring import mark_safe
DUMMY_SLUG = 'DUMMY-SLUG'
modal_buttons = {
'edit': '<i class="fas fa-edit"></i>',
'add': '<i class="fas fa-plus-circle p-1"></i>',
'delete': '<i class="fas fa-trash"></i>',
}
progress_bar_html = '''
<div class="progress" style="margin-top: 5px;">
<div id='file_progress_bar{}' class="progress-bar{}" role="progressbar" aria-valuenow="0"
aria-valuemin="0" aria-valuemax="100" style="width: 0%">
</div>
</div>
'''
def progress_bar(progress_id=None, css=''):
if progress_id is not None:
progress_id = '_' + str(progress_id)
else:
progress_id = ''
if css:
css = ' ' + css
return progress_bar_html.format(progress_id, css)
def make_slug(*args, make_pk=False):
slug = ''.join([str(a) for a in args])
if make_pk and '-' not in slug:
slug = 'pk-' + slug
return slug
def show_modal(modal_name, *args, base64=False, datatable=False, href=False, button=None,
button_classes='btn btn-primary mx-1', row=False, font_awesome=None):
try:
javascript = f"django_modal.show_modal('{reverse(modal_name, args=[DUMMY_SLUG])}')"
except NoReverseMatch:
javascript = f"django_modal.show_modal('{reverse(modal_name)}')"
if base64:
slug = urlsafe_b64encode(json.dumps(base64).encode('utf8')).decode('ascii')
else:
slug = make_slug(*args)
if datatable:
if base64:
slug = '%ref%'
else:
if slug:
slug += '-'
slug += 'pk-%ref%'
if row:
slug += '-row-%row%'
if href:
javascript = 'javascript:' + javascript
if button is not None:
button_text = modal_buttons.get(button, button)
if font_awesome:
button_text = f'<i class="{font_awesome}"></i> {button_text}'
javascript = f'<a {css_classes(button_classes)} href="javascript:{javascript}">{button_text}</a>'
if not slug:
slug = '-'
return javascript.replace(DUMMY_SLUG, slug)
def render_modal(template_name='django_modals/modal_base.html', **kwargs):
if 'request' in kwargs and 'modal_url' not in kwargs:
kwargs['modal_url'] = kwargs['request'].get_full_path()
button_kwargs = {a: kwargs[a] for a in ['button_group_class', 'button_container_class'] if a in kwargs}
kwargs['contents'] = mark_safe(kwargs.get('contents', '') + modal_button_group(kwargs.get('modal_buttons', None),
**button_kwargs))
return render_to_string(template_name, kwargs)
def css_classes(classes):
return f' class="{classes}"' if classes else ''
def crispy_modal_link(modal_name, text, div=False, div_classes='', button_classes=''):
link = HTML(show_modal(modal_name, button=text, button_classes=button_classes))
if div:
link = Div(link, css_class=div_classes)
return link
def modal_button(title, commands, css_class='btn-primary'):
if type(commands) == str:
params = [{'function': commands}]
elif type(commands) == dict:
params = [commands]
else:
params = commands
return mark_safe(f'''<button onclick='django_modal.process_commands_lock({json.dumps(params)})'
class="btn {css_class}">{title}</button>''')
def modal_button_method(title, method_name, css_class='btn-primary', **kwargs):
return modal_button(title, dict(function='post_modal', button=dict(button=method_name, **kwargs)), css_class)
def modal_button_group(buttons=None, button_container_class=None, button_group_class='btn-group'):
group_class = f'form-buttons{" " + button_container_class if button_container_class else ""}'
if type(buttons) == str:
return f'<div class="{group_class}"><div class="{button_group_class}">{buttons}</div></div>'
if buttons:
return (f'<div class="{group_class}">'
f'<div class="{button_group_class}">{"".join(buttons)}</div></div>')
return ''
def modal_delete_javascript(url_name, pk):
return mark_safe(button_javascript('delete', url_name=url_name, url_args=[pk]).replace('"', "'"))
def reverse_modal(modal_name, slug='-', base64=None):
if base64:
slug = urlsafe_b64encode(json.dumps(base64).encode('utf8')).decode('ascii')
try:
return reverse(modal_name, args=[slug])
except NoReverseMatch:
if slug == '-':
return reverse(modal_name)
else:
raise NoReverseMatch
def ajax_modal_redirect(modal_name, slug='-', base64=None):
return [{'function': 'close'}, {'function': 'show_modal', 'modal': reverse_modal(modal_name, slug=slug,
base64=base64)}]
def ajax_modal_replace(request, modal_name=None, modal_class=None, slug='-', ajax_function='overwrite_modal', **kwargs):
request.method = 'get'
if modal_class:
view_class = modal_class
else:
request.path = reverse_modal(modal_name, slug)
view_class = resolve(request.path).func.view_class
return {'function': ajax_function, 'html': view_class.as_view()(request, slug=slug, **kwargs).rendered_content}
```
#### File: django-modals/django_modals/mixins.py
```python
from django.template.loader import render_to_string
class ScratchPad:
def button_scratchpad(self, *, scratchpad):
if scratchpad == 0:
return self.command_response(
'append_to', html=render_to_string('django_modals/scratchpad.html'),
check_id='scratchpad', selector='body'
)
else:
return self.command_response('null')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['scratchpad'] = True
return context
```
#### File: django-modals/django_modals/modals.py
```python
import base64
import json
import inspect
from django.forms.fields import Field
from django.forms.models import modelform_factory, fields_for_model
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic.base import TemplateResponseMixin, TemplateView
from django.views.generic.edit import BaseFormView
from django.views.generic.detail import SingleObjectMixin
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.decorators import method_decorator
from ajax_helpers.mixins import AjaxHelpers
from . import processes
from .forms import ModelCrispyForm
from .helper import render_modal, modal_button, modal_button_group, ajax_modal_redirect, modal_button_method, \
ajax_modal_replace
class ModalException(Exception):
pass
@method_decorator(ensure_csrf_cookie, name='dispatch')
class BaseModalMixin(AjaxHelpers):
kwargs: dict
button_group_class = None
button_container_class = None
menu_config = {'href_format': "javascript:django_modal.show_modal('{}')"}
ajax_commands = ['button', 'select2', 'ajax']
button_group_css = None
size = 'lg'
no_parent_template = 'django_modals/blank_page_form.html'
def __init__(self):
super().__init__()
if not hasattr(self, 'modal_mode'):
self.modal_mode = True
self.slug = {}
def get_context_data(self, **kwargs):
# noinspection PyUnresolvedReferences
context = super().get_context_data(**kwargs) if hasattr(super(), 'get_context_data') else {}
context.update({'request': self.request, 'slug': self.slug})
context['modal_url'] = kwargs.get('modal_url', self.request.get_full_path())
context['no_header_x'] = getattr(self, 'no_header_x', None)
context['center_header'] = kwargs.get('center_header', getattr(self, 'center_header', None))
context['size'] = kwargs.get('size', self.size)
context['modal_type'] = self.kwargs.get('modal_type')
return context
def split_slug(self, kwargs):
if 'slug' in kwargs and kwargs['slug'] != '-':
s = kwargs['slug'].split('-')
if len(s) == 1:
self.slug['pk'] = s[0]
else:
self.slug.update({s[k]: s[k+1] for k in range(0, int(len(s)-1), 2)})
if 'pk' in self.slug:
self.kwargs['pk'] = self.slug['pk']
def process_slug_kwargs(self):
return True
def split_base64(self, kwargs):
if 'base64' in kwargs:
base64_data = json.loads(base64.urlsafe_b64decode(self.kwargs['base64']))
if not isinstance(base64_data, dict):
base64_data = {'base64': base64_data}
self.slug.update(base64_data)
def dispatch(self, request, *args, **kwargs):
self.split_slug(kwargs)
self.split_base64(kwargs)
if self.process_slug_kwargs():
# noinspection PyUnresolvedReferences
return super().dispatch(request, *args, **self.kwargs)
else:
raise ModalException('User does not have permission')
def button_refresh_modal(self, **_kwargs):
return self.command_response(ajax_modal_replace(self.request, modal_class=self.__class__,
slug=self.kwargs.get('slug', '-')))
def button_group(self):
button_kwargs = {
'button_group_class': self.kwargs.get('button_group_class', self.button_group_class),
'button_container_class': self.kwargs.get('button_container_class', self.button_container_class)
}
button_kwargs = {k: v for k, v in button_kwargs.items() if v}
return modal_button_group(self.buttons, **button_kwargs)
def check_for_background_page(self, context):
if not self.request.is_ajax() and self.modal_mode:
context['modal_type'] = 'no-parent'
context['no_header_x'] = True
context['form'] = render_modal(template_name=self.template_name, **context)
# noinspection PyAttributeOutsideInit
self.template_name = self.no_parent_template
def modal_replace(self, modal_name=None, modal_class=None, slug='-', **kwargs):
return self.command_response(ajax_modal_replace(self.request, modal_name, slug=slug,
modal_class=modal_class, **kwargs))
def message(self, message, title=None, **modal_kwargs):
if title is not None:
modal_kwargs['modal_title'] = title
return self.modal_replace(modal_class=Modal, message=message, ajax_function='modal_html', **modal_kwargs)
def confirm(self, message, title=None, button_group_type='confirm', **kwargs):
return self.message(message, title=title, button_group_type=button_group_type, **kwargs)
def modal_redirect(self, modal_name, slug='-'):
return self.command_response(ajax_modal_redirect(modal_name, slug))
class BaseModal(BaseModalMixin, TemplateView):
template_name = 'django_modals/modal_base.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['header_title'] = kwargs.get('modal_title', getattr(self, 'modal_title', None))
self.check_for_background_page(context)
return context
class Modal(BaseModal):
def modal_content(self):
return self.kwargs.get('message', '')
def get_modal_buttons(self):
if 'buttons' in self.kwargs:
return self.kwargs['buttons']
button_group_type = self.kwargs.get('button_group_type')
if button_group_type == 'confirm':
return [
modal_button_method('Confirm', self.kwargs.get('button_function', 'confirm'), 'btn-success'),
modal_button('Cancel', 'close', 'btn-secondary')
]
elif button_group_type == 'yes_cancel':
return [
modal_button_method('Yes', self.kwargs.get('button_function', 'confirm'), 'btn-danger'),
modal_button('Cancel', 'close', 'btn-success')
]
else:
return [modal_button('OK', 'close', 'btn-success')]
@property
def extra_context(self):
if not self._extra_content:
modal_content = self.modal_content()
if not self.buttons:
self.buttons = self.get_modal_buttons()
self._extra_content = {'form': mark_safe(modal_content + self.button_group())}
return self._extra_content
def __init__(self):
if not hasattr(self, 'buttons'):
self.buttons = []
self._extra_content = None
super().__init__()
class TemplateModal(Modal):
modal_template = None
def modal_context(self):
context = self.kwargs.get('context', {})
return context
def modal_content(self):
return render_to_string(self.modal_template, self.modal_context())
def __init__(self, modal_template=None, modal_title=None, size=None, **kwargs):
# These kwargs will be overwritten if called as_view()
self.kwargs = kwargs
if size:
self.size = size
if modal_title:
self.modal_title = modal_title
if modal_template:
self.modal_template = modal_template
super().__init__()
def modal_html(self, request):
self.request = request
context = self.get_context_data()
if 'message' in self.kwargs:
context['message'] = self.kwargs['message']
return render_to_string(self.template_name, context)
class FormModalMixin(BaseModalMixin):
template_name = 'django_modals/modal_base.html'
def form_invalid(self, form):
if self.request.GET.get('formonly', False):
form = self.get_form()
return HttpResponse(str(form))
return self.refresh_form(form)
def post_save(self, created):
pass
def form_valid(self, form):
org_id = self.object.id if hasattr(self, 'object') else None
save_function = getattr(form, 'save', None)
if save_function:
save_function()
self.post_save(created=org_id is None)
if not self.response_commands:
self.add_command('reload')
return self.command_response()
def refresh_form(self, form):
self.add_command('html', selector=f'#{form.helper.form_id}', parent=True, html=str(form))
return self.command_response('modal_refresh_trigger', selector=f'#{form.helper.form_id}')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['css'] = 'modal'
if context['form']:
context['header_title'] = context['form'].get_title()
else:
context['form'] = kwargs['message']
self.check_for_background_page(context)
return context
def __init__(self, *args, **kwargs):
if not hasattr(self, 'process'):
self.process = None
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
def button_make_edit(self, **_kwargs):
self.slug['modal'] = 'editdelete'
new_slug = '-'.join([f'{k}-{v}' for k, v in self.slug.items()])
self.request.method = 'GET'
self.process = processes.PROCESS_EDIT_DELETE
self.request.path = reverse(self.request.resolver_match.url_name, kwargs={'slug': new_slug})
return self.command_response('overwrite_modal',
html=render_to_string(self.template_name, self.get_context_data()))
def button_refresh_modal(self, **kwargs):
if self.slug.get('readonly') or kwargs.get('whole_modal'):
return super().button_refresh_modal()
else:
form = self.get_form()
form.clear_errors()
return self.form_invalid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request_user'] = self.request.user
kwargs['no_buttons'] = self.request.GET.get('no_buttons')
if hasattr(self, 'form_setup') and callable(self.form_setup):
kwargs['form_setup'] = self.form_setup
if hasattr(self, 'clean') and callable(self.clean):
kwargs['clean'] = self.clean
kwargs.update({k: getattr(self, k, None) for k in ['modal_title', 'slug']})
if hasattr(self, 'helper_class'):
kwargs['helper_class'] = self.helper_class
kwargs['process'] = self.process
return kwargs
class FormModal(FormModalMixin, TemplateResponseMixin, BaseFormView):
pass
class ProcessFormFields:
def __init__(self, form_fields, widgets=None, field_classes=None, labels=None, help_texts=None,
error_messages=None):
self.fields = []
self.widgets = widgets if widgets else {}
self.labels = labels if labels else {}
self.help_texts = help_texts if help_texts else {}
self.error_messages = error_messages if error_messages else {}
self.field_classes = field_classes if field_classes else {}
self.layout_field_classes = {}
self.layout_field_params = {}
for f in form_fields:
if type(f) == tuple:
self.fields.append(f[0])
param_dict = dict(f[1])
for k in f[1]:
if k == 'widget':
self.widgets[f[0]] = param_dict.pop(k)
if k == 'label':
self.labels[f[0]] = param_dict.pop(k)
if k == 'help_text':
self.help_texts[f[0]] = param_dict.pop(k)
if k == 'error_messages':
self.error_messages[f[0]] = param_dict.pop(k)
if k == 'layout_field_class':
self.layout_field_classes[f[0]] = param_dict.pop(k)
if param_dict:
self.layout_field_params[f[0]] = param_dict
else:
self.fields.append(f)
def form_init_kwargs(self):
return {f: getattr(self, f) for f in ['layout_field_classes', 'layout_field_params'] if getattr(self, f, None)}
def extra_kwargs(self):
return {f: getattr(self, f) for f in ['widgets', 'field_classes', 'labels', 'help_texts',
'error_messages'] if getattr(self, f, None)}
class ModelFormModal(SingleObjectMixin, FormModal):
form_fields = []
template_name = 'django_modals/modal_base.html'
base_form = ModelCrispyForm
delete_message = 'Are you sure you want to delete?'
delete_title = 'Warning'
field_classes = None
permission_delete = processes.PERMISSION_DISABLE
permission_edit = processes.PERMISSION_OFF
permission_view = processes.PERMISSION_OFF
permission_create = processes.PERMISSION_OFF
@staticmethod
def formfield_callback(f, **kwargs):
form_class = kwargs.get('form_class')
if isinstance(form_class, Field):
if hasattr(form_class, 'field_setup'):
# noinspection PyCallingNonCallable
form_class.field_setup(f)
return form_class
elif form_class:
return form_class(**kwargs)
return f.formfield(**kwargs)
def get_form_class(self):
if not self.form_class:
processed_form_fields = ProcessFormFields(self.form_fields, widgets=getattr(self, 'widgets', None),
field_classes=getattr(self, 'field_classes', None),
labels=getattr(self, 'labels', None),
help_texts=getattr(self, 'help_texts', None),
error_messages=getattr(self, 'error_messages', None))
self.form_init_args = processed_form_fields.form_init_kwargs()
self.form_class = modelform_factory(self.model, form=self.base_form, fields=processed_form_fields.fields,
formfield_callback=self.formfield_callback,
**processed_form_fields.extra_kwargs())
return self.form_class
def __init__(self, *args, **kwargs):
self.form_init_args = {}
super().__init__(*args, **kwargs)
self.object = None
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if hasattr(self, 'object'):
kwargs.update({'instance': self.object})
kwargs.update(self.form_init_args)
return kwargs
def object_delete(self):
pass
def button_confirm_delete(self, **_kwargs):
if self.process in [processes.PROCESS_DELETE, processes.PROCESS_EDIT_DELETE]:
self.object.delete()
self.object_delete()
if not self.response_commands:
self.add_command('close', no_refresh=True)
self.add_command('reload')
return self.command_response()
def button_delete(self, **_kwargs):
return self.confirm(self.delete_message, self.delete_title, button_function='confirm_delete',
button_group_type='yes_cancel', size='md')
@staticmethod
def user_has_perm(cls_or_instance, user, process):
permission_type = getattr(cls_or_instance, processes.process_data[process].class_attribute)
if permission_type == processes.PERMISSION_METHOD:
# If permission method is not a staticmethod and function is called by class rather than instance
# send None instead of self
if inspect.isclass(cls_or_instance) and len(inspect.signature(cls_or_instance.permission).parameters) == 3:
permission = cls_or_instance.permission(None, user, process)
else:
permission = cls_or_instance.permission(user, process)
elif permission_type == processes.PERMISSION_OFF:
permission = True
elif permission_type == processes.PERMISSION_DISABLE:
permission = False
elif permission_type == processes.PERMISSION_AUTHENTICATED:
permission = user.is_authenticated
elif permission_type == processes.PERMISSION_STAFF:
permission = user.is_staff or user.is_superuser
else:
# noinspection PyProtectedMember
perms = [f'{cls_or_instance.model._meta.app_label}.{p}_{cls_or_instance.model._meta.model_name}'
for p in processes.process_data[process].django_permission]
permission = user.has_perms(perms)
return permission
def get_process(self, user, process):
while True:
permission = self.user_has_perm(self, user, process)
if permission:
break
process = processes.process_data[process].fallback
if not process:
break
return permission, process
def get_model(self):
pass
def get_queryset(self):
query = super().get_queryset()
if hasattr(self.model, 'query_filter'):
return self.model.query_filter(query, self.request, modal=self)
return query
def process_slug_kwargs(self):
if 'pk' not in self.slug:
self.process = processes.PROCESS_CREATE
elif 'modal' in self.slug:
self.process = processes.modal_url_type[self.slug['modal']]
else:
if self.process is None:
self.process = processes.PROCESS_EDIT_DELETE
if self.model is None:
if self.form_class:
self.model = self.form_class.get_model(self.slug)
else:
self.model = self.get_model()
if 'pk' in self.kwargs:
self.object = self.get_object()
else:
self.object = self.model()
# noinspection PyProtectedMember
fields = self.model._meta.get_fields()
field_dict = {}
for f in fields:
field_dict[f.name.lower()] = f
for i in self.slug:
if i in field_dict and field_dict[i].many_to_many:
self.initial[i] = [self.slug[i]]
else:
setattr(self.object, i, self.slug[i])
has_perm, self.process = self.get_process(self.request.user, self.process)
return has_perm
def select2_ajax_search(self, page_len=10, filter_field=None, filter_search='istartswith', search=None, page=None,
extra_filter=None, **_kwargs):
field_name = inspect.stack()[1][3][len('select2_'):]
field = fields_for_model(self.model, field_classes=self.field_classes, fields=[field_name],
formfield_callback=self.formfield_callback)[field_name]
if filter_field and search:
query_filter = {f'{filter_field}__{filter_search}': search}
else:
query_filter = {}
if extra_filter:
query_filter.update(extra_filter)
if hasattr(field, 'model'):
# noinspection PyUnresolvedReferences
choices = field.model.objects.filter(**query_filter)
else:
choices = field.choices.queryset.filter(**query_filter)
if page:
choices = choices[page_len * (page - 1): page_len * page + 1]
if hasattr(field, 'select_str'):
# noinspection PyCallingNonCallable
results = [{'id': str(c.id), 'text': field.select_str(c)} for c in choices[:page_len]]
else:
results = [{'id': str(c.id), 'text': str(c)} for c in choices[:page_len]]
return JsonResponse({'results': results, 'pagination': {'more': len(choices) > len(results)}})
class MultiForm:
def __init__(self, model, fields, form_id=None, initial=None, widgets=None, **kwargs):
self.model = model
self.fields = fields
self.kwargs = kwargs
self.form_id = form_id
self.initial = initial if initial else {}
self.widgets = widgets if widgets else {}
def make_form_id(self, used_ids):
if not self.form_id:
self.form_id = self.model.__name__ + 'Form'
if self.form_id in used_ids:
self.form_id += '_{}'
count = 1
while self.form_id.format(count) in used_ids:
count += 1
self.form_id = self.form_id.format(count)
used_ids.append(self.form_id)
def get_kwargs(self):
kwargs = {'form_id': self.form_id, 'initial': self.initial, 'no_buttons': True}
kwargs.update(self.kwargs)
return kwargs
class MultiFormModal(BaseModal):
template_name = 'django_modals/multi_form.html'
modal_title = ''
base_form = ModelCrispyForm
forms = []
menu_config = {'href_format': "javascript:django_modal.show_modal('{}')"}
def get_form_classes(self):
for f in self.forms:
processed_form_fields = ProcessFormFields(f.fields, widgets=f.widgets)
self.form_setup_args.append({
'form_class': modelform_factory(f.model, form=self.base_form, fields=processed_form_fields.fields,
**processed_form_fields.extra_kwargs()),
'processed_form_fields': processed_form_fields
})
def __init__(self, *args, **kwargs):
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
self.form_setup_args = []
def get_form_kwargs(self):
all_kwargs = []
used_ids = []
if self.request.method in ('POST', 'PUT'):
form_data = json.loads(self.request.body)
else:
form_data = {}
for f in self.forms:
f.make_form_id(used_ids)
kwargs = f.get_kwargs()
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': form_data[f.form_id],
# 'files': self.request.FILES,
})
if hasattr(self, 'form_setup') and callable(self.form_setup):
kwargs['form_setup'] = self.form_setup
all_kwargs.append(kwargs)
all_kwargs[-1]['no_buttons'] = False
return all_kwargs
def get_forms(self):
self.get_form_classes()
form_kwargs = self.get_form_kwargs()
forms = []
for c, s in enumerate(self.form_setup_args):
kwargs = form_kwargs[c]
kwargs.update(s['processed_form_fields'].form_init_kwargs())
form = s['form_class'](**kwargs)
for field_name, field in form.fields.items():
field.widget.attrs.update({'id': f'id_{c}_{field_name}'})
forms.append(form)
return forms
def get_context_data(self, **kwargs):
self.extra_context = {
'forms': self.get_forms(),
'header_title': self.modal_title
}
context = super().get_context_data(**kwargs)
return context
def refresh_form(self, forms):
self.add_command('html', selector=f'#{forms[0].form_id}', parent=True,
html=' '.join([str(f) for f in forms]))
return self.command_response('modal_refresh_trigger', selector=f'#{forms[0].form_id}')
def forms_valid(self, forms):
pass
def post(self, request, *args, **kwargs):
post_response = super().post(request, *args, **kwargs)
if post_response:
return post_response
forms = self.get_forms()
for f in forms:
if not f.is_valid():
return self.refresh_form(forms)
return self.forms_valid({f.helper.form_id: f for f in forms})
``` |
{
"source": "jonesim/show-src-code",
"score": 2
} |
#### File: show-src-code/show_src_code/modals.py
```python
import django_modals.modals as modals
from django.utils.safestring import mark_safe
from django.utils.module_loading import import_string
from django_modals.modals import Modal
from .source_code import template_source, html_code
class BaseSourceCodeModal(Modal):
size = 'xl'
modal_title = 'Source Code'
code = {'template_src': 'crud'}
def modal_content(self):
code = ''
if 'pk' in self.slug:
# Try to get callable from dictionary
function_class = self.code.get(self.slug['pk'])
if callable(function_class):
code = html_code(function_class)
else:
a = import_string(self.kwargs['pk'])
code = html_code(a)
if 'template' in self.slug:
if 'templateSection' in self.slug:
code += template_source(self.slug['template'].replace(':', '/'), self.slug['templateSection'])
else:
code += template_source(self.slug['template'].replace(':', '/'))
return code
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['script'] = mark_safe('hljs.highlightAll();')
return context
class CodeMixin:
def get_context_data(self, **kwargs):
# noinspection PyUnresolvedReferences
context = super().get_context_data(**kwargs)
context['footer'] = mark_safe('''<div class="p-1" style="text-align:right;background-color:#efefef">
<button class='btn btn-sm btn-outline-secondary' onclick='django_modal.send_inputs({"button": "code"})'>
<i class="fab fa-python"></i> Source Code</button></div>''')
return context
def button_code(self, **_kwargs):
code = ''
try:
if hasattr(self, 'form_class') and self.form_class is not None:
code = html_code(self.form_class)
except OSError:
pass
if isinstance(self, TemplateModal):
code += template_source(self.modal_template)
code += html_code(self.__class__)
# noinspection PyUnresolvedReferences
return self.message(message=code, size='xl', title='Source Code', script=mark_safe('hljs.highlightAll();'))
class FormModal(CodeMixin, modals.FormModal):
pass
class ModelFormModal(CodeMixin, modals.ModelFormModal):
pass
class MultiFormModal(CodeMixin, modals.MultiFormModal):
pass
class BaseModal(CodeMixin, modals.BaseModal):
pass
class Modal(CodeMixin, modals.Modal):
pass
class TemplateModal(CodeMixin, modals.TemplateModal):
pass
```
#### File: show_src_code/templatetags/src_code.py
```python
from django import template
from django.templatetags.static import static
from django.utils.safestring import mark_safe
from django_modals.helper import show_modal as show_modal_helper, make_slug
register = template.Library()
@register.simple_tag(takes_context=True)
def show_src_code(context, modal, *args, button='Source Code', button_classes='btn btn-sm btn-outline-secondary',
**kwargs):
slug = make_slug(*args, make_pk=True)
slug += '-template-' + context.template.name.replace('/', ':')
return mark_safe(show_modal_helper(modal, slug, button=button, button_classes=button_classes, **kwargs))
@register.simple_tag
def highlightjs_includes():
return mark_safe(
f'<link rel="stylesheet" href={static("show_src_code/highlightjs.css")}>'
f'<script src="{static("show_src_code/highlightjs.js")}"></script>'
)
@register.simple_tag
def highlightjs_includes_cdn():
return mark_safe(
f'<link rel="stylesheet" href="https://unpkg.com/@highlightjs/[email protected]/styles/default.min.css">'
f'<script src="https://unpkg.com/@highlightjs/[email protected]/highlight.min.js"></script>'
)
``` |
{
"source": "jonesinator/crabigator",
"score": 3
} |
#### File: crabigator/tests/test_wanikani.py
```python
from __future__ import print_function
import os
from unittest import TestCase
from crabigator.wanikani import WaniKani, WaniKaniError
# TestCase exposes too many public methods. Disable the pylint warning for it.
# pylint: disable=too-many-public-methods
class TestWaniKani(TestCase):
"""Unit test cases for the WaniKani API wrapper."""
@classmethod
def test_wanikani(cls):
"""Test all public methods in crabigator.wanikani."""
wanikani = WaniKani(os.environ['WANIKANI_API_KEY'])
print(wanikani.user_information)
print(wanikani.study_queue)
print(wanikani.level_progression)
print(wanikani.srs_distribution)
print(wanikani.recent_unlocks)
print(wanikani.get_recent_unlocks(3))
print(wanikani.critical_items)
print(wanikani.get_recent_unlocks(65))
print(wanikani.radicals)
print(wanikani.get_radicals([1, 2]))
print(wanikani.kanji)
print(wanikani.get_kanji([1, 2]))
print(wanikani.vocabulary)
print(wanikani.get_vocabulary([1, 2]))
try:
wanikani.get_vocabulary([9999])
except WaniKaniError as ex:
print(ex)
``` |
{
"source": "joneslxj/SEM_segmenation",
"score": 2
} |
#### File: forWeb_SEM/itools/forWeb_image_process.py
```python
import os
import cv2
import collections
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy import signal
# from multiprocessing import process
# from win64api import GetSystemMetrics
#-----------------------------------------------------------------------------------------------------------------------
learning_rate = 0.0001 # estimator works,better than cnn. on 20180711
batch_size = 80
display_step = 100
n_hidden_1 = 256
n_hidden_2 = 128
num_classes = 3
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
def image_process_1(input_file,output_path,threshold):
kernel = np.ones((3, 3), np.float32) / 10
kernel[1, 1] = 0.2
# for file in os.listdir(input_path):
# if file.endswith(".tif"):
image = cv2.imread(os.path.join(input_file), cv2.IMREAD_GRAYSCALE)
# image1 = cv2.filter2D(image, -1, kernel) # smooth.
image1 = cv2.medianBlur(image, 5) # median filtering
# image1 = cv2.bilateralFilter(image,5,115,115) # bilateral filtering
kernel = np.ones((3, 3), np.uint8)
kernel[1, 1] = 0
image_por = cv2.inRange(image1, threshold[1][0]-1, threshold[1][1])
image_por[np.nonzero(image_por)] = threshold[1][0]
# image_por = cv2.morphologyEx(image_por,cv2.MORPH_CLOSE,kernel)
image_org = cv2.inRange(image1, threshold[2][0], threshold[2][1])
image_org[np.nonzero(image_org)] = threshold[2][0]
# image_org = cv2.morphologyEx(image_org,cv2.MORPH_CLOSE,kernel)
image_mat = cv2.inRange(image1, threshold[3][0], threshold[3][1])
image_mat[np.nonzero(image_mat)] = 1 #threshold[3][1]
# image_mat = cv2.morphologyEx(image_mat, cv2.MORPH_CLOSE, kernel) # remove small pixel...
image_mat1 = signal.convolve2d(image_mat, kernel, mode='same', fillvalue=1) # remove single pixel
image_mat2 = image_mat.copy()
image_mat2[np.logical_and(image_mat1 == 8, image_mat == 0)] = threshold[3][1]
image_mat = image_mat2.copy()
image_mat[np.nonzero(image_mat)] = threshold[3][1]
image_pyr = cv2.inRange(image1, threshold[4][0], threshold[4][1])
image_pyr[np.nonzero(image_pyr)] = threshold[4][1]
# image_pyr = cv2.morphologyEx(image_pyr,cv2.MORPH_CLOSE,kernel)
image_por[image_por+image_mat==np.uint8(threshold[1][0]+threshold[3][1])] = 0
image_org[image_org+image_mat==np.uint8(threshold[2][0]+threshold[3][1])] = 0
image_pyr[image_pyr+image_mat==np.uint8(threshold[4][1]+threshold[3][1])] = 0
image2 = image_por + image_org + image_mat + image_pyr
fig1 = plt.figure(1) # orginal image
plt.imshow(image1, cmap='gray')
fig1.suptitle('Input image')
fig2 = plt.figure(2) # Pyrite image
plt.imshow(image_pyr, cmap='gray')
fig2.suptitle('Pyrite')
fig3 = plt.figure(3) # Matrix image
plt.imshow(image_mat, cmap='gray')
fig3.suptitle('Matrix')
fig4 = plt.figure(4) # Organic image
plt.imshow(image_org, cmap='gray')
fig4.suptitle('Organic')
fig5 = plt.figure(5) # Porosity image
plt.imshow(image_por, cmap='gray')
fig5.suptitle('Pore')
fig6 = plt.figure(6)
plt.imshow(image2, cmap=plt.cm.get_cmap('jet', 4)) # jet
plt.colorbar()
fig6.suptitle('Classified')
# plt.show()
cv2.imwrite(os.path.join(output_path, 'img_RawImg.tif'), image1)
cv2.imwrite(os.path.join(output_path, 'img_Pyrite.tif'), image_pyr)
cv2.imwrite(os.path.join(output_path, 'img_Matrix.tif'), image_mat)
cv2.imwrite(os.path.join(output_path, 'img_Organic.tif'), image_org)
cv2.imwrite(os.path.join(output_path, 'img_Pore.tif'), image_por)
cv2.imwrite(os.path.join(output_path, 'img_Classfied_1.tif'), image2)
# plt.imsave(os.path.join(output_path, 'img_Classfied_1.tif'), image2, cmap=plt.cm.get_cmap('jet', 4))
pyrite_pixel = np.count_nonzero(image_pyr != 0)
matrix_pixel = np.count_nonzero(image_mat != 0)
organic_pixel = np.count_nonzero(image_org != 0)
pore_pixel = np.count_nonzero(image_por != 0)
total_pixel = np.prod(image.shape)
print('\nworking on ', input_file)
print('\nPyrite: {:2.2f}%'.format(100 * pyrite_pixel / total_pixel))
print('Matrix: {:2.2f}%'.format(100 * matrix_pixel / total_pixel))
print('Organic:{:2.2f}%'.format(100 * organic_pixel / total_pixel))
print('Pore: {:2.2f}%\n'.format(100 * pore_pixel / total_pixel))
def gen_training_dataset(input_path0,input_path1,input_pre_path,output_path,threshold,n_hw): # organic pore or matrix pore / MNIST way
# opore / organic / matrix / pyrite
n_height = n_hw[0]
n_width = n_hw[1]
temp = os.path.join(output_path, 'temp')
pred_path = os.path.join(output_path, 'predict')
inputs = __image_load(output_path, img_ext='tif')
targets = __image_load(temp, img_ext='tif')
predicts = __image_load(pred_path, img_ext='tif')
x = np.array(inputs)
x0 = np.array(predicts)
y = np.zeros(shape=(len(inputs),3),dtype=np.int)
z = np.zeros(shape=len(inputs),dtype=np.int)
for i in range(len(inputs)):
p_pyrite = np.count_nonzero(targets[i] == threshold[4][1])
p_matrix = np.count_nonzero(targets[i] == threshold[3][1]) #
p_organic = np.count_nonzero(targets[i] == threshold[2][1])
p_pore = np.count_nonzero(targets[i] == threshold[1][1])
if p_pyrite>=int(0.5*n_height*n_width): # image_mat[np.nonzero(image_mat)] = 255 np.count_nonzero(image_pyr != 0)
y[i] = [0, 0, 1]
z[i] = 2
if p_pyrite<int(0.5*n_height*n_width):
if p_matrix >= int(0.85*n_height*n_width):
y[i] = [0, 1, 0]
z[i] = 1
if p_matrix < int(0.85*n_height*n_width): # need update,
y[i] = [1, 0, 0]
z[i] = 0
threshold0 = int(0.9 * len(x))
train = (np.stack(x[:threshold0]), np.stack(y[:threshold0]), np.stack(z[:threshold0]))
evaluate = (np.stack(x[threshold0:]), np.stack(y[threshold0:]), np.stack(z[threshold0:]))
predict = (np.stack(x0[0:]),0) #tuple must have at least two, so add 0
# save like MNIST
# print('check: ',type(predict),type(evaluate),evaluate[0][0:5],predict[0][0:5])
return train,evaluate,predict
def __image_load(image_path, img_ext, is_binary=False, channels=1):
images = []
for file in sorted(os.listdir(image_path)): #load image in order form folder by Jon
if file.endswith(img_ext):
filename = os.path.join(image_path, file)
if channels == 1:
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
if is_binary:
img = cv2.threshold(img, 127, 1, cv2.THRESH_BINARY)[1]
else: # channels == 3
img = cv2.imread(filename, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if img.shape[0] == img.shape[1]:
img = np.array(img).reshape(-1)#.tolist()
images.append(img)
return images
#-----------------------------------------------------------------------------------------------------------------------
def weight_variable(shape):
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def deeplearning(output_path,train, evaluate, predict, iteration):
# load tensorflow
size_img = predict[0][0].shape
sess = tf.InteractiveSession()
pred_path = os.path.join(output_path, 'predict')
x = tf.placeholder(tf.float32,[None,size_img[0]])
y = tf.placeholder(tf.float32,[None,3])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(x,[-1,int(np.sqrt(size_img)),int(np.sqrt(size_img)),1])
W_conv1 = weight_variable([7,7,1,32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([7,7,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([16*16*64,1024])
b_fc1 = bias_variable([1024])
h_pool3 = tf.reshape(h_pool2,[-1,16*16*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3,W_fc1)+b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
W_fc2 = weight_variable([1024,3])
b_fc2 = bias_variable([3])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y*tf.log(y_conv),reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy) #GradientDescent
correct_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
predict0 = tf.argmax(y_conv,1)
predict1 = tf.argmax(y,1)
tf.global_variables_initializer().run()
batch0 = (1/(1+np.exp(-evaluate[0]/255)), evaluate[1], evaluate[2]) #
batch1 = (1/(1+np.exp(-predict[0]/255)), 0) #
for i in range(iteration):
idx = np.random.randint(0, train[1].shape[0], batch_size)
# idx = np.array([7389,7390,7391,7392,7393,7394,7395,7398,7399,7400])
batch = (1 / (1 + np.exp(-train[0][idx] / 255)), train[1][idx], train[2][idx]) #
# for i in range(iteration):
# print(batch[0][0:],'\n',len(batch),batch[0].shape,batch[0][0].shape, batch[1].shape,'\n',batch)
# plt.imshow(np.reshape(batch[0][0], [64, -1]), cmap='gray')
# plt.show()
# exit()
train_step.run(feed_dict={x: batch[0], y: batch[1], keep_prob: 1})
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})
# prediction0 = y_conv.eval(feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})
if i % 200 == 0:
print('step %d,training accuracy %g ' %(i,train_accuracy))
temp0 = predict0.eval(feed_dict={x:batch0[0],keep_prob:1.0}) #,y:batch0[1]
temp1 = predict1.eval(feed_dict={y:batch0[1],keep_prob:1.0}) #x:batch0[0],
correct_prediction = np.sum(temp0 == temp1) # need check output.
accuracy = 100 * correct_prediction / temp0.shape[0]
print('Evaluation results:')
print('\nreal:\n',temp1[:200],'\npreds:\n', temp0[:200], '\ni:{}, accuracy: {}%'.format(i,accuracy))
preds = np.zeros(batch1[0].shape[0])
for i in range(16): #range(batch1[0].shape[0]):
preds[i*576:(i+1)*576] = predict0.eval(feed_dict={x:batch1[0][i*576:(i+1)*576],keep_prob:1.0}) #
return preds
# print('test accuracy %g' %accuracy.eval(feed_dict={x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0}))
#-----------------------------------------------------------------------------------------------------------------------
def neural_net(x_dict):
x = x_dict['images']
layer_1 = tf.layers.dense(x,n_hidden_1)
layer_2 = tf.layers.dense(layer_1,n_hidden_2)
out_layer = tf.layers.dense(layer_2,num_classes)
return out_layer
def model_fn(features,labels,mode):
logits = neural_net(features)
pred_classes = tf.argmax(logits,axis=1)
pred_probas = tf.nn.softmax(logits)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode,predictions=pred_classes)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=tf.cast(labels,dtype=tf.int32)))
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op,global_step=tf.train.get_global_step())
acc_op = tf.metrics.accuracy(labels=labels,predictions=pred_classes)
estim_specs = tf.estimator.EstimatorSpec(mode=mode,
predictions=pred_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy':acc_op})
print('feature:', features, '\nlabel:',labels,'\nmode:', mode)
return estim_specs
def deeplearning_0(train, evaluate, predict, num_steps):
# load tensorflow
num_input = predict[0].shape
n_images = 200 # for evaluating
# idx = np.random.randint(0, train[1].shape[0], batch_size)
batch = (train[0]/255, train[1], train[2]) # 1/(1+np.exp(-train[0][idx]/255))
batch0 = (evaluate[0]/255, evaluate[1], evaluate[2]) # 1/(1+np.exp(-evaluate[0][idx]/255))
batch1 = (predict[0]/255,0) #tulip
# print(batch0[0].shape,batch1[0].shape,'\n',batch0[0][0:2],'\n',num_input) # batch1[0][0:2]
for i in range(5):
input_fn = tf.estimator.inputs.numpy_input_fn(x={'images':batch[0]},y=batch[2],batch_size=batch_size,num_epochs=None,shuffle=True)
model = tf.estimator.Estimator(model_fn)
model.train(input_fn,steps=num_steps) # training
input_fn = tf.estimator.inputs.numpy_input_fn(x={'images': batch0[0][:n_images]}, shuffle=False)
pred_eval = np.array(list(model.predict(input_fn))) # evaluating
correct_prediction = np.sum(pred_eval == batch0[2][:n_images]) # need check output.
accuracy = 100 * correct_prediction / pred_eval.shape[0]
print('Evaluation results:')
print('i:',i,'\nreal:\n', batch0[2][:n_images], '\npreds:\n', pred_eval, '\naccuracy: {}%'.format(accuracy))
input_fn = tf.estimator.inputs.numpy_input_fn(x={'images':batch1[0]},shuffle=False)
preds = np.array(list(model.predict(input_fn))) # predicting
print(preds)
return preds
def image_analysis(output_path,predict,preds,threshold):
print(predict[0].shape,type(predict))
pred_path = os.path.join(output_path, 'predict')
os.makedirs(pred_path, exist_ok=True)
size_img = predict[0].shape[0]
hw_img = predict[0].shape[1]
img_group = np.zeros(predict[0].shape)
for i in range(size_img):
img_temp = predict[0][i]
image_por = cv2.inRange(img_temp, threshold[1][0]-1, threshold[1][1])
if preds[i]==0 or preds[i]==2:
image_por[np.nonzero(image_por)] = 0
else:
image_por[np.nonzero(image_por)] = 60
image_org = cv2.inRange(img_temp, threshold[2][0], threshold[2][1])
image_org[np.nonzero(image_org)] = 110
image_mat = cv2.inRange(img_temp, threshold[3][0], threshold[3][1])
if preds[i] == 0:
image_mat[np.nonzero(image_mat)] = 160
elif preds[i] == 1:
image_mat[np.nonzero(image_mat)] = 200
image_pyr = cv2.inRange(img_temp, threshold[4][0], threshold[4][1])
image_pyr[np.nonzero(image_pyr)] = 255
image0 = image_por + image_org + image_mat + image_pyr
img_group[i,:] = np.transpose(image0)
ni = int(np.sqrt(size_img))
nhw = int(np.sqrt(hw_img))
img_group = np.reshape(img_group, [ni, ni, nhw,-1]).transpose(1,0,2,3).swapaxes(1,2).reshape(ni*nhw,-1)
cv2.imwrite(os.path.join(pred_path, 'img_Outcome.tif'), img_group)
# print(type(img_group),img_group.shape)
fig1 = plt.figure(1)
plt.imshow(img_group, cmap=plt.cm.get_cmap('jet', 256)) # jet
plt.colorbar()
fig1.suptitle('AI_Classified')
plt.show()
plt.imsave(os.path.join(pred_path, 'img_Outcome_color.tif'), img_group, cmap=plt.cm.get_cmap('jet', 256))
# @numba.jit(nopython=True,parallel=True)
def clay_index(x0,y0,img):
n_area = 64
# n_size = img.shape
num_surround = np.zeros([n_area])
idx0 = np.argwhere(img == 0)
angle0 = np.arctan2((idx0[:, 1] - y0), (idx0[:, 0] - x0))
angle0[angle0 < 0] += 2 * np.pi
idx_group = np.floor(angle0 / (2 * np.pi / n_area))
num_count = collections.Counter(idx_group)
num_key = np.array(list(num_count.keys())).astype(int)
num_value = np.array(list(num_count.values())).astype(int)
num_surround[num_key] = num_value
num_surround1 = num_surround.copy()
num_surround1[num_surround1 != 0] = 1
n_nonzeros = np.count_nonzero(num_surround != 0)
n_per = n_nonzeros / n_area
clay_index = 0
s1 = np.dot(num_surround1, np.concatenate((num_surround1[int(n_area/2+1):], num_surround1[0:int(n_area/2+1)]), axis=0))
s2 = np.dot(num_surround1, num_surround1)
bool_clay = n_per>0.7 and s1/s2>0.099 and max(num_surround)>=3
if bool_clay: #clay
clay_index = 128
# print(s1, ' _ ', s2, ' _ ', s1 / s2)
return clay_index #n_per, c_per
def classify_clay(output_path,fn_img,threshold):
# note: using circle analysis/separate the clay and matrix mineral . Jon on 2018-07-16
#
n_pix = 1
xy_radium = 41
image = cv2.imread(os.path.join(output_path, fn_img), cv2.IMREAD_GRAYSCALE)
img_size = image.shape
image_temp1 = cv2.inRange(image, 0, 0)
image_temp1[np.nonzero(image_temp1)] = 0 # other
image_temp2 = cv2.inRange(image, threshold[3][1], threshold[3][1])
image_temp2[np.nonzero(image_temp2)] = 255 # clay and matrix
image_mineral = image_temp1 + image_temp2
img_clay = np.zeros(img_size,np.uint8)
# grain_clay = 999*np.ones([img_size, 2])
istep = n_pix//2
y_centre = istep
while y_centre < img_size[0]:
y0 = min([y_centre, xy_radium])
y1 = max(y1 for y1 in [0, y_centre - xy_radium] if y1 >= 0)
y2 = min(y2 for y2 in [y_centre + xy_radium + 1, img_size[0] + 1] if y2 <= img_size[0] + 1)
x_centre = istep
while x_centre < img_size[1]:
if image_mineral[x_centre,y_centre]==255:
# img = np.zeros([2*xy_radium+1,2*xy_radium+1]) # get surround img
x0 = min([x_centre,xy_radium])
x1 = max(x1 for x1 in [0, x_centre-xy_radium] if x1 >= 0)
x2 = min(x2 for x2 in [x_centre+xy_radium+1, img_size[1]+1] if x2 <= img_size[1]+1)
img = image_mineral[x1:x2,y1:y2]
# print(img[xy_radium,xy_radium],image_mineral[x_centre,y_centre])
clay_index0 = clay_index(x0,y0,img) #get clay or mineral, #,n_per, c_per,
img_clay[x_centre - istep:x_centre + istep + 1, y_centre - istep:y_centre + istep + 1] = clay_index0
x_centre += n_pix
y_centre += n_pix
if y_centre%10==0:
print('finished: {0:.2f}%'.format(100*y_centre/img_size[0]))
kernel = np.ones((5,5),np.uint8)
img_clay = cv2.dilate(img_clay,kernel,iterations=2)
img_clay = cv2.medianBlur(img_clay, 5)
img_clay = cv2.erode(img_clay,kernel,iterations=2)
# img_mat = cv2.inRange(image, threshold[3][1], threshold[3][1])
image1 = image_mineral + img_clay # 255 + 128
image_clay = cv2.inRange(image1, np.uint8([383]), np.uint8([383]))
image_clay[np.nonzero(image_clay)] = threshold[3][0] #clay
# image2 = image_mineral + image_clay
image_grain = cv2.inRange(image1, 255, 255)
image_grain[np.nonzero(image_grain)] = threshold[3][1] #grain
# image_targe = image_targe1 + image_targe2
cv2.imwrite(os.path.join(output_path, 'img_Clay.tif'), image_clay)
cv2.imwrite(os.path.join(output_path, 'img_Grain.tif'), image_grain)
# @jit(nopython=True,parallel=True) #cuda.jit #cuda.jit('void(int32[:],int32[:],int32[:],float32[:])',device=False) # nogil=True
def test_loop(n_pix,xy_radium,img_size,image_mineral):
n_area = 64
img_clay = np.zeros(img_size,np.uint8) #img_clay = np.empty_like(image_mineral) #
istep = round(n_pix/2)
# grain_clay = np.zeros([img_size,3])
# y_centre = istep
n_xpoint = round(img_size[1] / n_pix)
n_ypoint = round(img_size[0] / n_pix)
for iy in range(n_ypoint): # while y_centre < img_size[0]: #numba.prange(istep,img_size[0],n_pix):
y_centre = iy * n_pix
# start = datetime.datetime.now(dtype=np.int64)
y0 = np.min(np.array([y_centre, xy_radium]))
y1 = np.max(np.array([0, y_centre - xy_radium])) #np.max(y1 for y1 in np.array([0, y_centre - xy_radium]) if y1 >= 0)
y2 = np.min(np.array([y_centre + xy_radium + 1, img_size[0] + 1])) #np.min(y2 for y2 in np.array([y_centre + xy_radium + 1, img_size[0] + 1]) if y2 <= img_size[0] + 1)
# x_centre = istep
for ix in range(n_xpoint):
x_centre = ix * n_pix
if image_mineral[x_centre,y_centre]==255:
# img = np.zeros([2*xy_radium+1,2*xy_radium+1]) # get surround img
x0 = np.min(np.array([x_centre,xy_radium]))
x1 = np.max(np.array([0, x_centre-xy_radium])) #np.max(x1 for x1 in [0, x_centre-xy_radium] if x1 >= 0)
x2 = np.min(np.array([x_centre+xy_radium+1, img_size[1]+1])) #np.min(x2 for x2 in [x_centre+xy_radium+1, img_size[1]+1] if x2 <= img_size[1]+1)
# print(type(x1),type(x2),type(y1),type(y2))
img = image_mineral[x1:x2,y1:y2]
idx0 = np.argwhere(img == 0)
num_surround = np.zeros(n_area)
angle0 = np.arctan2((idx0[:, 1] - y0), (idx0[:, 0] - x0))
angle0[angle0 < 0] += 2 * np.pi
idx_group = np.floor(angle0 / (2 * np.pi / n_area))
num_count = collections.Counter(idx_group)
num_key = np.array(list(num_count.keys())).astype(int)
num_value = np.array(list(num_count.values())).astype(int)
num_surround[num_key] = num_value
# print(num_surround)
n_nonzeros = np.count_nonzero(num_surround != 0)
#n_nonzeros = len(np.nonzero(num_surround)[0])
s1 = np.dot(num_surround, np.concatenate((num_surround[int(n_area / 2 + 1):], num_surround[0:int(n_area / 2 + 1)]), axis=0))
s2 = np.dot(num_surround, num_surround)
# grain_clay[i,j,0] = n_nonzeros / n_area
clay_index0 = 0
bool_clay = n_nonzeros / n_area > 0.59 and s1 / s2 > 0.099 and np.max(num_surround) >= 5
if bool_clay: # clay
clay_index0 = 255
# grain_clay[i, j, 2] = 1
# print(s1, ' _ ', s2, ' _ ', s1 / s2)
img_clay[x_centre - istep:x_centre + istep + 1, y_centre - istep:y_centre + istep + 1] = clay_index0
if y_centre%10==0:
print((100*y_centre/img_size[0]),) # 'finished: {0:.2f}%'.format
return img_clay
def classify_clay_GPU(output_path,fn_img,threshold0): #
#note: using circle analysis/separate the clay and matrix mineral . Jon on 2018-07-16
# threshold = {1:[1, 110],2:[111, 160],3:[161, 199],4:[200, 255]}
n_pix = 7
xy_radium = 39
image = cv2.imread(os.path.join(output_path, fn_img), cv2.IMREAD_GRAYSCALE)
img_size = image.shape
image_temp1 = cv2.inRange(image, 0, 0)
image_temp1[np.nonzero(image_temp1)] = 0 # other
image_temp2 = cv2.inRange(image, 199,199) #threshold0[2,1], threshold0[2,1]
image_temp2[np.nonzero(image_temp2)] = 255 # clay and matrix
image_mineral = image_temp1 + image_temp2
img_clay = test_loop(n_pix, xy_radium, img_size, image_mineral)
# init = tf.global_variables_initializer()
# with tf.Session() as sess:
# sess.run(init)
# img_clay = sess.run(img_clay)
# img_mat = cv2.inRange(image, threshold[3][1], threshold[3][1])
# img_mat[np.nonzero(img_mat)] = 255
image1 = image_mineral + img_clay
image_clay = cv2.inRange(image1, 510, 510)
image_clay[np.nonzero(image_clay)] = threshold0[2,0] #clay
# image2 = image_mineral + image_clay
image_grain = cv2.inRange(image1, 255, 255)
image_grain[np.nonzero(image_grain)] = threshold0[2,1] #grain
# image_targe = image_targe1 + image_targe2
cv2.imwrite(os.path.join(output_path, 'img_Clay.tif'), image_clay)
cv2.imwrite(os.path.join(output_path, 'img_Grain.tif'), image_grain)
def calculate_component(output_path,fn_img_pore,fn_img_organic,fn_img_grain,fn_img_clay,fn_img_pyrite,threshold):
image_por = cv2.imread(os.path.join(output_path, fn_img_pore), cv2.IMREAD_GRAYSCALE)
image_org = cv2.imread(os.path.join(output_path, fn_img_organic), cv2.IMREAD_GRAYSCALE)
image_grain = cv2.imread(os.path.join(output_path, fn_img_grain), cv2.IMREAD_GRAYSCALE)
image_clay = cv2.imread(os.path.join(output_path, fn_img_clay), cv2.IMREAD_GRAYSCALE)
image_pyr = cv2.imread(os.path.join(output_path, fn_img_pyrite), cv2.IMREAD_GRAYSCALE)
image_all = image_por + image_org + image_grain + image_clay + image_pyr
cv2.imwrite(os.path.join(output_path, 'classfied_All.tif'), image_all)
plt.imsave(os.path.join(output_path, 'classfied_All(color).tif'), image_all, cmap=plt.cm.get_cmap('jet', 256))
#
total_pixel = np.prod(image_all.shape)
pyrite_pixel = np.count_nonzero(image_pyr != 0) * 100 / total_pixel
matrix_pixel = np.count_nonzero(image_grain != 0) * 100 / total_pixel
clay_pixel = np.count_nonzero(image_clay != 0) * 100 / total_pixel
organic_pixel = np.count_nonzero(image_org != 0) * 100 / total_pixel
pore_pixel = np.count_nonzero(image_por != 0) * 100 / total_pixel
print('\nPyrite: {:2.2f}%'.format(pyrite_pixel))
print('Grain: {:2.2f}%'.format(matrix_pixel))
print('Clay: {:2.2f}%'.format(clay_pixel))
print('Organic:{:2.2f}%'.format(organic_pixel))
print('T.Pore: {:2.2f}%'.format(pore_pixel))
fid = open(os.path.join(output_path,'Results.txt'), 'w')
fid.write('Pixel Percentage:\n')
fid.write('Pyrite: {:2.2f}%\n'.format(pyrite_pixel))
fid.write('Grain: {:2.2f}%\n'.format(matrix_pixel))
fid.write('Clay: {:2.2f}%\n'.format(clay_pixel))
fid.write('Organic:{:2.2f}%\n'.format(organic_pixel))
fid.write('T.Pore: {:2.2f}%\n'.format(pore_pixel))
fid.close()
dirName = os.path.basename(output_path)
fid1 = open(os.path.join(os.path.abspath(os.path.join(output_path, '..')),'Result_all.txt'),'a')
fid1.write('Sample_Name Pyrite Grain Clay Organic T.Pore\n{:s} {:2.2f}% {:2.2f}% {:2.2f}% {:2.2f}% {:2.2f}%\n'
.format(dirName,pyrite_pixel,matrix_pixel,clay_pixel,organic_pixel,pore_pixel))
fid1.close()
``` |
{
"source": "jonesmat/goldtown",
"score": 3
} |
#### File: goldtown/ui/overworld_scene.py
```python
import pygame
from pygame import locals
from pygame import Rect
class Scene():
def __init__(self):
self.viewport_rect = Rect(0, 0, 1, 1)
self.device_rect = Rect(0, 0, 1, 1)
def get_vp_x_from_dev_x(self, dev_x):
ratio = self.device_rect.w / self.viewport_rect.w
device_shift = dev_x - device_rect.x
vp_shift = device_shift / ratio;
return viewport_rect.x + vp_shift
def get_vp_y_from_dev_y(self, dev_y):
ratio = self.device_rect.h / self.viewport_rect.h
device_shift = dev_y - device_rect.y
vp_shift = device_shift / ratio
return viewport_rect.y + vp_shift
def get_dev_x_from_vp_x(self, vp_x):
ratio = self.device_rect.w / self.viewport_rect.w
vp_shift = vp_x - viewport_rect.x
dev_shift = vp_shift / ratio;
return device_rect.x + dev_shift
def get_dev_y_from_vp_y(self, vp_y):
ratio = self.device_rect.h / self.viewport_rect.h
vp_shift = vp_y - viewport_rect.y
dev_shift = vp_shift / ratio;
return device_rect.y + dev_shift
class OverworldScene(Scene):
def __init__(self):
super().__init__()
self.viewport_rect = Rect(0, 0, 1000, 1000)
def update(self, clock_delta):
pass
def draw(self, surface):
self.device_rect = surface.get_rect()
surface.fill(pygame.Color(255, 255, 255))
``` |
{
"source": "jonesmat/outbreak_z",
"score": 3
} |
#### File: entities/bloodsplat/entity.py
```python
from random import randint
import pygame
from entities.base_entity import GameEntity
import entities.bloodsplat.states as states
class BloodSplat(GameEntity):
""" When Bullet meets Zombie, splat! """
SIZE = 1 # meters wide and tall
def __init__(self, game, resource_mgr):
self.blood_splat_image = pygame.image.load('entities/bloodsplat/blood_splat.png').convert_alpha()
# Set random image rotation.
rotate = pygame.transform.rotate
rotation = randint(1, 360)
GameEntity.__init__(self, game, "bloodsplat", rotate(self.blood_splat_image, rotation), resource_mgr)
self.size = BloodSplat.SIZE
# Create an instance of each of the states
fading_state = states.BloodStateFading(self)
# Add the states to the state machine
self.brain.add_state(fading_state)
```
#### File: entities/graveyard/states.py
```python
from time import time
from entities.base_entity import State
from entities.zombie.entity import Zombie
class GraveyardStateSpawning(State):
def __init__(self, graveyard, spawn_rate, resource_mgr):
# Call the base class constructor to init the State
State.__init__(self, "spawning")
# Set the entity that this State will manipulate
self.graveyard = graveyard
self.spawn_rate = spawn_rate
self.resource_mgr = resource_mgr
self.next_spawn = None
def do_actions(self):
if time() > self.next_spawn:
zombie = Zombie(self.graveyard.game, self.resource_mgr)
zombie.location = self.graveyard.location
zombie.brain.set_state("wandering")
self.graveyard.game.add_entity(zombie)
self.next_spawn = time() + self.spawn_rate
def check_conditions(self):
pass
def entry_actions(self):
self.next_spawn = time()
def exit_actions(self):
pass
```
#### File: entities/survivor/states.py
```python
from random import randint
from time import time
from pygame.math import Vector2
from entities.base_entity import State
from entities.bullet.entity import Bullet
class SurvivorStateExploring(State):
""" Exploring is the default survivor state when the entity
wanders around looking for supplies """
def __init__(self, survivor):
# Call the base class constructor to init the State
State.__init__(self, "exploring")
# Set the survivor that this State will manipulate
self.survivor = survivor
def do_actions(self):
# Change direction occasionally
if randint(1, 200) == 1 or self.survivor.location == self.survivor.destination:
self.survivor.destination = self.survivor.get_random_destination()
def check_conditions(self):
# If there is a nearby zombie...
zombie = self.survivor.game.get_closest_entity("zombie", self.survivor.location)
if zombie is not None:
self.survivor.zombie_id = zombie.id
return "evading"
# If there is a nearby pile of supplies, and the survivor is low,
# switch to seeking state
if self.survivor.health < 10 or self.survivor.ammo < 10:
supplycrate = self.survivor.game.get_close_entity("supplycrate", self.survivor.location)
if supplycrate is not None:
self.survivor.supplies_id = supplycrate.id
return "seeking"
return None
def entry_actions(self):
# Start with random speed and heading
self.survivor.speed = self.survivor.BASE_SPEED
self.survivor.destination = self.survivor.get_random_destination()
class SurvivorStateAttacking(State):
""" Once the survivor has a Zombie target, this state handles the
targeting and shooting. """
def __init__(self, survivor, resource_mgr):
# Call the base class constructor to init the State
State.__init__(self, "attacking")
# Set the survivor that this State will manipulate
self.survivor = survivor
self.resource_mgr = resource_mgr
def shoot_zombie(self):
""" Acquires the zombie, spawns a bullet, and decrements the ammo """
zombie = self.survivor.game.get(self.survivor.zombie_id)
if zombie is not None:
bullet = Bullet(self.survivor.game, self.resource_mgr)
bullet.location = self.survivor.location
bullet.zombie_id = zombie.id
bullet.brain.set_state("seeking")
self.survivor.game.add_entity(bullet)
self.survivor.ammo -= 1
def do_actions(self):
# Occasionally take a shot at a zombie if ammo permits.
if self.survivor.ammo > 0 and randint(1, 30) == 1:
self.shoot_zombie()
def check_conditions(self):
# If there isn't a nearby zombie, switch to exploring state
zombie = self.survivor.game.get_closest_entity("zombie", self.survivor.location)
if zombie is None:
self.survivor.zombie_id = None
return "exploring"
# If there is a zombie nearby, switch to that target.
self.survivor.zombie_id = zombie.id
# If the survivor is out of ammo, switch to evading.
if self.survivor.ammo <= 0:
return "evading"
# Roughly after a second the survivor should stop shooting and
# attempt to evade for 2 seconds
if randint(1, 50) == 1:
self.survivor.evade_until = time() + 2 # 2 seconds from now
return "evading"
return None
def entry_actions(self):
# Stop moving to fire
self.survivor.destination = self.survivor.location
class SurvivorStateEvading(State):
""" Handles the survivor evading a zombie that's gotten too close. """
def __init__(self, survivor):
# Call the base class constructor to init the State
State.__init__(self, "evading")
# Set the survivor that this State will manipulate
self.survivor = survivor
def do_actions(self):
# Occasionally make sure another zombie isn't closer.
if randint(1, 10) == 1:
self.choose_new_evade_target()
def choose_new_evade_target(self):
""" Determines where the zombie is, then sets the destination for an
area in the opposite direction. """
# Try to first find a zombie that isn't feeding, its a lesser threat.
zombie = self.survivor.game.get_close_entity_in_state("zombie", ["wandering", "seeking"], self.survivor.location, 25)
# If you can't find a non-feeding zombie to run from, see if a feeding one is close.
if zombie is None:
zombie = self.survivor.game.get_close_entity("zombie", self.survivor.location, 25)
# If one is found, RUN!
if zombie is not None:
self.survivor.zombie_id = zombie.id
# In order to point our destination away from the zombie, we
# must get the vector to the zombie...
vec_to_zombie = zombie.location - self.survivor.location
# ... then subtract that zombie from our current location to
# go in the opposite direction.
vec_away = self.survivor.location - vec_to_zombie
# Set the destination as a slightly random vector away that isn't
# negative and also stays a bit away from the max screen size.
w_bound = self.survivor.game.scene.viewport_rect.right
h_bound = self.survivor.game.scene.viewport_rect.bottom
x_point = abs(min([vec_away.x + randint(-20, 20), w_bound - 5]))
y_point = abs(min([vec_away.y + randint(-20, 20), h_bound - 5]))
self.survivor.destination = Vector2(x_point, y_point)
def check_conditions(self):
if self.survivor.evade_until is None:
# Attack a near zombie if we have ammo.
zombie = self.survivor.game.get_close_entity("zombie", self.survivor.location)
if zombie is not None and self.survivor.ammo > 0:
return "attacking"
# If there isn't a nearby zombie, switch to exploring state
if zombie is None:
self.survivor.zombie_id = None
return "exploring"
elif self.survivor.evade_until < time():
# evade_until timer elapsed, clear it
self.survivor.evade_until = None
return None
def entry_actions(self):
# Start with hightend speed with heading away from zombie.
zombie = self.survivor.game.get(self.survivor.zombie_id)
if zombie is not None:
self.survivor.speed = self.survivor.BASE_SPEED * 2
self.survivor.destination = -zombie.location
class SurvivorStateSeeking(State):
""" Once the survivor has spotted a supply crate, this state handles
the entity making his way over to collect """
def __init__(self, survivor):
# Call the base class constructor to init the State
State.__init__(self, "seeking")
# Set the survivor that this State will manipulate
self.survivor = survivor
self.supplies_id = None
def check_conditions(self):
# If there is a nearby zombie...
zombie = self.survivor.game.get_closest_entity("zombie", self.survivor.location)
if zombie is not None:
self.survivor.zombie_id = zombie.id
return "evading"
# If the supplies are gone, go back to exploring
supplycrate = self.survivor.game.get(self.survivor.supplies_id)
if supplycrate is None:
return "exploring"
# If we are next to the supplies, pick them up.
if self.survivor.location.distance_to(supplycrate.location) < 1.0:
self.survivor.game.remove_entity(supplycrate)
self.survivor.ammo = 10
self.survivor.health = 10
return "exploring"
return None
def entry_actions(self):
# Target the supplies.
supplycrate = self.survivor.game.get(self.survivor.supplies_id)
if supplycrate is not None:
self.survivor.destination = supplycrate.location
class SurvivorStateDead(State):
""" Once the survivor is down (dead) he becomes zombie food. In the
off chance that another survivor helps the entity, he can return
to exploring. """
def __init__(self, survivor, dead_image):
# Call the base class constructor to init the State
State.__init__(self, "dead")
# Set the survivor that this State will manipulate
self.survivor = survivor
self.dead_image = dead_image
def do_actions(self):
if randint(1, 10) == 1:
self.survivor.health += 5
def check_conditions(self):
if self.survivor.health >= 10:
return "exploring"
elif self.survivor.health <= -200:
self.survivor.game.turn_survivor(self.survivor)
return None
def entry_actions(self):
self.survivor.speed = 0
self.survivor.image = self.dead_image
```
#### File: entities/zombie/entity.py
```python
from random import randint
import pygame
from pygame.math import Vector2
from entities.base_entity import GameEntity
import entities.zombie.states as states
class Zombie(GameEntity):
""" The Zombie entity """
SIZE = 1 # meters wide and tall
BASE_SPEED = 1 # meters/second
def __init__(self, game, resource_mgr):
self.zombie_image = pygame.image.load('entities/zombie/zombie.png').convert_alpha()
GameEntity.__init__(self, game, 'zombie', self.zombie_image, resource_mgr)
self.size = Zombie.SIZE
# Create an instance of state
wandering_state = states.ZombieStateWandering(self)
seeking_state = states.ZombieStateSeeking(self)
feeding_state = states.ZombieStateFeeding(self, resource_mgr)
# Add the states to the state machine
self.brain.add_state(wandering_state)
self.brain.add_state(seeking_state)
self.brain.add_state(feeding_state)
self.survivor_id = 0
self.health = 3
def draw(self, surface):
""" draws the Zombie class and then any debug graphics """
# Call the draw function of the base class
GameEntity.draw(self, surface)
# Debug drawing of target survivor line.
if self.debug_mode:
if self.survivor_id:
survivor = self.game.get(self.survivor_id)
if survivor is not None:
pygame.draw.line(surface, (255, 25, 25), self.location,
survivor.location)
# blit health
surface.blit(self.resource_mgr.font.render(str(self.health), True, (0, 0, 0)),
self.location - Vector2(5, 22))
```
#### File: jonesmat/outbreak_z/game.py
```python
from pygame.math import Vector2
from entities.survivor.entity import Survivor
from entities.supplycrate.entity import SupplyCrate
from entities.zombie.entity import Zombie
class Game(object):
def __init__(self, resource_mgr, scene):
self.resource_mgr = resource_mgr
self.background = resource_mgr.background_image
self.scene = scene
self.entities = {} # Store all the entities
self.next_entity_id = 0 # Next entity id assigned
self.supply = 0.0
def add_entity(self, entity):
""" Stores the entity then advances the current id """
self.entities[self.next_entity_id] = entity
entity.id = self.next_entity_id
self.next_entity_id += 1
def remove_entity(self, entity):
""" Removes the entity from the game """
del self.entities[entity.id]
def get(self, id):
""" Find the entity, given its id """
if id in self.entities:
return self.entities[id]
else:
return None
def tick(self, time_passed):
""" Call the tick method of each GameEntity """
time_passed_seconds = time_passed / 1000.0
self.supply += time_passed_seconds / 2
local_entities = list(self.entities.values())
for entity in local_entities:
try:
entity.tick(time_passed_seconds)
except KeyError:
pass
def draw(self, surface):
self._draw_background(surface)
for entity in self.entities.values():
entity.draw(surface)
def _draw_background(self, surface):
background_width, background_height = self.background.get_size()
drawn_x = 0
while drawn_x < self.scene.device_rect.right:
drawn_y = 0
while drawn_y < self.scene.device_rect.bottom:
surface.blit(self.background, (drawn_x, drawn_y))
drawn_y += background_height
drawn_x += background_width
def get_close_entity(self, name, location: Vector2, radius=20., ignore_id=None):
""" Finds the first entity within range of a location """
for entity in self.entities.values():
# If an ignore_id is passed, ignore the entity with that id.
if ignore_id is not None and entity.id == ignore_id:
continue
if name is None or entity.name == name:
distance = location.distance_to(entity.location)
if distance < radius:
return entity
return None
def get_closest_entity(self, name, location: Vector2, radius=20.):
""" Find the closest entity within range of a location """
close_entities = []
for entity in self.entities.values():
if name is None or entity.name == name:
distance = location.distance_to(entity.location)
if distance < radius:
close_entities.append((distance, entity))
# Return the closest of the entities within range.
if len(close_entities) > 0:
close_entities = sorted(close_entities, key=lambda e: e[0])
distance, closest_entity = close_entities[0]
return closest_entity
return None
def get_close_entity_in_state(self, name, states, location: Vector2, radius=20.):
""" Find an entity within range of a location that is in one of the
states provided. """
for entity in self.entities.values():
if entity.name == name:
for state in states:
if entity.brain.active_state.name == state:
distance = location.distance_to(entity.location)
if distance < radius:
return entity
return None
def get_entity_count(self, name):
""" Gets the number of entities in the game with that name. """
count = 0
for entity in self.entities.values():
if entity.name == name:
count += 1
return count
def spawn_entity_at_device(self, entity_type, x_point, y_point):
if self.supply - entity_type.SUPPLY_COST >= 0:
if entity_type is Survivor:
survivor = Survivor(self, self.resource_mgr)
survivor.location = self.scene.get_viewport_vec_from_device_points(x_point, y_point)
survivor.brain.set_state("exploring")
self.add_entity(survivor)
self.supply -= 3
elif entity_type is SupplyCrate:
supplycrate = SupplyCrate(self, self.resource_mgr)
supplycrate.location = self.scene.get_viewport_vec_from_device_points(x_point, y_point)
self.add_entity(supplycrate)
self.supply -= 1
def set_debug_mode(self, debug_mode):
for entity in self.entities.values():
entity.debug_mode = debug_mode
def turn_survivor(self, survivor):
''' Turns a survivor into a Zombie! '''
self.remove_entity(survivor)
new_zombie = Zombie(self, self.resource_mgr)
new_zombie.location = Vector2(survivor.location.x, survivor.location.y)
new_zombie.brain.set_state("wandering")
self.add_entity(new_zombie)
```
#### File: jonesmat/outbreak_z/outbreak_z.py
```python
import pygame
from pygame.locals import QUIT, KEYDOWN, K_q, K_ESCAPE, K_BACKQUOTE, MOUSEBUTTONDOWN
from resources.resourcemgr import ResourceMgr
from scenes.game_scene import GameScene
def main():
pygame.init()
SCREEN_SIZE = (1280, 800)
surface = pygame.display.set_mode(SCREEN_SIZE, 0, 32)
clock = pygame.time.Clock()
active_scene = GameScene(ResourceMgr())
active_scene.generate_game()
while True:
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYDOWN:
if event.key == K_q or event.key == K_ESCAPE:
return
if event.key == K_BACKQUOTE:
active_scene.handle_tilde_key_down()
if event.type == MOUSEBUTTONDOWN:
if event.button == 1:
active_scene.handle_mouse_left_down(pygame.mouse.get_pos())
if event.button == 3:
active_scene.handle_mouse_right_down(pygame.mouse.get_pos())
time_passed = clock.tick(30)
active_scene.tick(time_passed)
active_scene.draw(surface)
pygame.display.update()
main()
```
#### File: outbreak_z/resources/resourcemgr.py
```python
import pygame
class ResourceMgr(object):
""" Contains all external resources used by the game. """
def __init__(self):
self.font = pygame.font.SysFont("arial", 16)
self.background_image = pygame.image.load('resources/background.jpg').convert()
self.caution_image = pygame.image.load('resources/caution.png').convert_alpha()
``` |
{
"source": "jonesmwh/business-individual-classifier",
"score": 3
} |
#### File: business-individual-classifier/test/test_utils.py
```python
from typing import List
import pytest
from cleanse_and_tokenize import run_cleanse_tokenize
from generate_raw_names import run_name_generation
from train_model import run_train_model
from utils import encode, decode, pad_token_list, pad_tokens
def test_encode_decode():
# Assert that decoder returns same input that was passed to encoder
sample_input = ["hello world", "this is", "", "a", "test"]
encoded = encode(sample_input)
decoded = decode(encoded)
assert decoded == sample_input
def test_pad_tokens():
test_cases = [[1], [1, 2], [1, 2, 3, 4, 5]]
target_len = 4
padded_cases = pad_token_list(test_cases, target_len)
print(padded_cases)
for case in padded_cases:
assert len(case) == target_len
def test_pad_encode_decode():
target_len = 10
sample_input = ["hello"]
encoded = encode(sample_input)
padded: List[List[int]] = pad_token_list(encoded, target_len)
decoded = decode(padded)
assert decoded == ["¬¬¬¬¬" + "hello"]
def integration_validate_pipeline():
run_name_generation()
run_cleanse_tokenize()
run_train_model()
```
#### File: jonesmwh/business-individual-classifier/utils.py
```python
import csv
import logging
import confuse
from typing import List
default_config_path = "config/config_default.yaml"
def load_config(path: str = default_config_path):
config = confuse.Configuration("business-individual-classifier", __name__)
config.set_file(path)
return config
def init_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
logger = init_logger()
def list_to_csv(list: List[str], output_path: str, header: str = ""):
with open(output_path, 'w', newline='') as writeFile:
writer = csv.writer(writeFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
if header != "": writer.writerow([header])
for element in list:
writer.writerow([element])
writeFile.close()
def pad_token_list(list_tokenized: List[List[int]], total_length: int) -> List[List[int]]:
#Apply pad_tokens to list of token lists
padded = []
for item in list_tokenized:
padded.append(pad_tokens(item, total_length))
return padded
def pad_tokens(tokenized: List[int], total_length: int) -> List[int]:
# Pad/concatenate list of tokens, to equal total_length
token_count = len(tokenized)
diff = token_count - total_length
if diff == 0:
padded = tokenized
if diff < 0:
padded = [ord("¬") for blanks in range(-diff)]+tokenized
if diff > 0:
logger.warning(
f"pad_sequences function detected token sequence of {token_count} items. "
f"This is greater than max length of {total_length}.")
padded = tokenized[-total_length:]
return padded
def decode(encoded: List[List[int]]) -> List[str]:
# decode utf8 to character
return ["".join([chr(char) for char in name]) for name in encoded]
def encode(name_list: List[str]) -> List[List[int]]:
# encode character to utf8
return [[ord(char) for char in name] for name in name_list]
``` |
{
"source": "jonesnc/deluge-client",
"score": 2
} |
#### File: deluge-client/deluge_client/rencode.py
```python
import struct
import sys
from threading import Lock
try:
from future_builtins import zip
except ImportError:
# Ignore on Py3.
pass
__version__ = ('Python', 1, 0, 4)
__all__ = ['dumps', 'loads']
py3 = sys.version_info[0] >= 3
if py3:
long = int # pylint: disable=redefined-builtin
unicode = str # pylint: disable=redefined-builtin
def int2byte(c):
return bytes([c])
else:
def int2byte(c):
return chr(c)
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
DEFAULT_FLOAT_BITS = 32
# Maximum length of integer when written as base 10 string.
MAX_INT_LENGTH = 64
# The bencode 'typecodes' such as i, d, etc have been extended and
# relocated on the base-256 character set.
CHR_LIST = int2byte(59)
CHR_DICT = int2byte(60)
CHR_INT = int2byte(61)
CHR_INT1 = int2byte(62)
CHR_INT2 = int2byte(63)
CHR_INT4 = int2byte(64)
CHR_INT8 = int2byte(65)
CHR_FLOAT32 = int2byte(66)
CHR_FLOAT64 = int2byte(44)
CHR_TRUE = int2byte(67)
CHR_FALSE = int2byte(68)
CHR_NONE = int2byte(69)
CHR_TERM = int2byte(127)
# Positive integers with value embedded in typecode.
INT_POS_FIXED_START = 0
INT_POS_FIXED_COUNT = 44
# Dictionaries with length embedded in typecode.
DICT_FIXED_START = 102
DICT_FIXED_COUNT = 25
# Negative integers with value embedded in typecode.
INT_NEG_FIXED_START = 70
INT_NEG_FIXED_COUNT = 32
# Strings with length embedded in typecode.
STR_FIXED_START = 128
STR_FIXED_COUNT = 64
# Lists with length embedded in typecode.
LIST_FIXED_START = STR_FIXED_START + STR_FIXED_COUNT
LIST_FIXED_COUNT = 64
# Whether strings should be decoded when loading
_decode_utf8 = False
def decode_int(x, f):
f += 1
newf = x.index(CHR_TERM, f)
if newf - f >= MAX_INT_LENGTH:
raise ValueError('overflow')
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f:f + 1] == '-':
if x[f + 1:f + 2] == '0':
raise ValueError
elif x[f:f + 1] == '0' and newf != f + 1:
raise ValueError
return (n, newf + 1)
def decode_intb(x, f):
f += 1
return (struct.unpack('!b', x[f:f + 1])[0], f + 1)
def decode_inth(x, f):
f += 1
return (struct.unpack('!h', x[f:f + 2])[0], f + 2)
def decode_intl(x, f):
f += 1
return (struct.unpack('!l', x[f:f + 4])[0], f + 4)
def decode_intq(x, f):
f += 1
return (struct.unpack('!q', x[f:f + 8])[0], f + 8)
def decode_float32(x, f):
f += 1
n = struct.unpack('!f', x[f:f + 4])[0]
return (n, f + 4)
def decode_float64(x, f):
f += 1
n = struct.unpack('!d', x[f:f + 8])[0]
return (n, f + 8)
def decode_string(x, f):
colon = x.index(b':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f + 1:
raise ValueError
colon += 1
s = x[colon:colon + n]
if _decode_utf8:
s = s.decode('utf8')
return (s, colon + n)
def decode_list(x, f):
r, f = [], f + 1
while x[f:f + 1] != CHR_TERM:
v, f = decode_func[x[f:f + 1]](x, f)
r.append(v)
return (tuple(r), f + 1)
def decode_dict(x, f):
r, f = {}, f + 1
while x[f:f + 1] != CHR_TERM:
k, f = decode_func[x[f:f + 1]](x, f)
r[k], f = decode_func[x[f:f + 1]](x, f)
return (r, f + 1)
def decode_true(x, f):
return (True, f + 1)
def decode_false(x, f):
return (False, f + 1)
def decode_none(x, f):
return (None, f + 1)
decode_func = {}
decode_func[b'0'] = decode_string
decode_func[b'1'] = decode_string
decode_func[b'2'] = decode_string
decode_func[b'3'] = decode_string
decode_func[b'4'] = decode_string
decode_func[b'5'] = decode_string
decode_func[b'6'] = decode_string
decode_func[b'7'] = decode_string
decode_func[b'8'] = decode_string
decode_func[b'9'] = decode_string
decode_func[CHR_LIST] = decode_list
decode_func[CHR_DICT] = decode_dict
decode_func[CHR_INT] = decode_int
decode_func[CHR_INT1] = decode_intb
decode_func[CHR_INT2] = decode_inth
decode_func[CHR_INT4] = decode_intl
decode_func[CHR_INT8] = decode_intq
decode_func[CHR_FLOAT32] = decode_float32
decode_func[CHR_FLOAT64] = decode_float64
decode_func[CHR_TRUE] = decode_true
decode_func[CHR_FALSE] = decode_false
decode_func[CHR_NONE] = decode_none
def make_fixed_length_string_decoders():
def make_decoder(slen):
def f(x, f):
s = x[f + 1:f + 1 + slen]
if _decode_utf8:
s = s.decode('utf8')
return (s, f + 1 + slen)
return f
for i in range(STR_FIXED_COUNT):
decode_func[int2byte(STR_FIXED_START + i)] = make_decoder(i)
make_fixed_length_string_decoders()
def make_fixed_length_list_decoders():
def make_decoder(slen):
def f(x, f):
r, f = [], f + 1
for _ in range(slen):
v, f = decode_func[x[f:f + 1]](x, f)
r.append(v)
return (tuple(r), f)
return f
for i in range(LIST_FIXED_COUNT):
decode_func[int2byte(LIST_FIXED_START + i)] = make_decoder(i)
make_fixed_length_list_decoders()
def make_fixed_length_int_decoders():
def make_decoder(j):
def f(x, f):
return (j, f + 1)
return f
for i in range(INT_POS_FIXED_COUNT):
decode_func[int2byte(INT_POS_FIXED_START + i)] = make_decoder(i)
for i in range(INT_NEG_FIXED_COUNT):
decode_func[int2byte(INT_NEG_FIXED_START + i)] = make_decoder(-1 - i)
make_fixed_length_int_decoders()
def make_fixed_length_dict_decoders():
def make_decoder(slen):
def f(x, f):
r, f = {}, f + 1
for _ in range(slen):
k, f = decode_func[x[f:f + 1]](x, f)
r[k], f = decode_func[x[f:f + 1]](x, f)
return (r, f)
return f
for i in range(DICT_FIXED_COUNT):
decode_func[int2byte(DICT_FIXED_START + i)] = make_decoder(i)
make_fixed_length_dict_decoders()
def loads(x, decode_utf8=False):
global _decode_utf8
_decode_utf8 = decode_utf8
try:
r, l = decode_func[x[0:1]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
def encode_int(x, r):
if 0 <= x < INT_POS_FIXED_COUNT:
r.append(int2byte(INT_POS_FIXED_START + x))
elif -INT_NEG_FIXED_COUNT <= x < 0:
r.append(int2byte(INT_NEG_FIXED_START - 1 - x))
elif -128 <= x < 128:
r.extend((CHR_INT1, struct.pack('!b', x)))
elif -32768 <= x < 32768:
r.extend((CHR_INT2, struct.pack('!h', x)))
elif -2147483648 <= x < 2147483648:
r.extend((CHR_INT4, struct.pack('!l', x)))
elif -9223372036854775808 <= x < 9223372036854775808:
r.extend((CHR_INT8, struct.pack('!q', x)))
else:
s = str(x)
if py3:
s = bytes(s, 'ascii')
if len(s) >= MAX_INT_LENGTH:
raise ValueError('overflow')
r.extend((CHR_INT, s, CHR_TERM))
def encode_float32(x, r):
r.extend((CHR_FLOAT32, struct.pack('!f', x)))
def encode_float64(x, r):
r.extend((CHR_FLOAT64, struct.pack('!d', x)))
def encode_bool(x, r):
r.append({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
def encode_none(x, r):
r.append(CHR_NONE)
def encode_string(x, r):
if len(x) < STR_FIXED_COUNT:
r.extend((int2byte(STR_FIXED_START + len(x)), x))
else:
s = str(len(x))
if py3:
s = bytes(s, 'ascii')
r.extend((s, b':', x))
def encode_unicode(x, r):
encode_string(x.encode('utf8'), r)
def encode_list(x, r):
if len(x) < LIST_FIXED_COUNT:
r.append(int2byte(LIST_FIXED_START + len(x)))
for i in x:
encode_func[type(i)](i, r)
else:
r.append(CHR_LIST)
for i in x:
encode_func[type(i)](i, r)
r.append(CHR_TERM)
def encode_dict(x, r):
if len(x) < DICT_FIXED_COUNT:
r.append(int2byte(DICT_FIXED_START + len(x)))
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
else:
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
encode_func = {}
encode_func[int] = encode_int
encode_func[long] = encode_int
encode_func[bytes] = encode_string
encode_func[list] = encode_list
encode_func[tuple] = encode_list
encode_func[dict] = encode_dict
encode_func[type(None)] = encode_none
encode_func[unicode] = encode_unicode
encode_func[bool] = encode_bool
lock = Lock()
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
with lock:
if float_bits == 32:
encode_func[float] = encode_float32
elif float_bits == 64:
encode_func[float] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
return b''.join(r)
def test():
f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
ld = (({b'a': 15, b'bb': f1, b'ccc': f2, b'': (f3, (), False, True, b'')}, (b'a', 10**20),
tuple(range(-100000, 100000)), b'b' * 31, b'b' * 62, b'b' * 64, 2**30, 2**33, 2**62,
2**64, 2**30, 2**33, 2**62, 2**64, False, False, True, -1, 2, 0),)
assert loads(dumps(ld)) == ld
d = dict(zip(range(-100000, 100000), range(-100000, 100000)))
d.update({b'a': 20, 20: 40, 40: 41, f1: f2, f2: f3, f3: False, False: True, True: False})
ld = (d, {}, {5: 6}, {7: 7, True: 8}, {9: 10, 22: 39, 49: 50, 44: b''})
assert loads(dumps(ld)) == ld
ld = (b'', b'a' * 10, b'a' * 100, b'a' * 1000, b'a' * 10000, b'a' * 100000, b'a' * 1000000, b'a' * 10000000)
assert loads(dumps(ld)) == ld
ld = tuple([dict(zip(range(n), range(n))) for n in range(100)]) + (b'b',)
assert loads(dumps(ld)) == ld
ld = tuple([dict(zip(range(n), range(-n, 0))) for n in range(100)]) + (b'b',)
assert loads(dumps(ld)) == ld
ld = tuple([tuple(range(n)) for n in range(100)]) + (b'b',)
assert loads(dumps(ld)) == ld
ld = tuple([b'a' * n for n in range(1000)]) + (b'b',)
assert loads(dumps(ld)) == ld
ld = tuple([b'a' * n for n in range(1000)]) + (None, True, None)
assert loads(dumps(ld)) == ld
assert loads(dumps(None)) is None
assert loads(dumps({None: None})) == {None: None}
assert 1e-10 < abs(loads(dumps(1.1)) - 1.1) < 1e-6
assert 1e-10 < abs(loads(dumps(1.1, 32)) - 1.1) < 1e-6
assert abs(loads(dumps(1.1, 64)) - 1.1) < 1e-12
assert loads(dumps('Hello World!!'), decode_utf8=True)
try:
import psyco
psyco.bind(dumps)
psyco.bind(loads)
except ImportError:
pass
if __name__ == '__main__':
test()
```
#### File: deluge-client/deluge_client/tests.py
```python
import os
import sys
import pytest
from .client import DelugeRPCClient, RemoteException
if sys.version_info > (3,):
long = int
def client_factory(**kw):
"""Create a disconnected client for test purposes."""
if sys.platform.startswith('win'):
auth_path = os.path.join(os.getenv('APPDATA'), 'deluge', 'auth')
else:
auth_path = os.path.expanduser("~/.config/deluge/auth")
with open(auth_path, 'rb') as f:
filedata = f.read().decode("utf-8").split('\n')[0].split(':')
username, password = filedata[:2]
ip = '127.0.0.1'
port = 58846
kwargs = {'decode_utf8': True}
if kw:
kwargs.update(kw)
client = DelugeRPCClient(ip, port, username, password, **kwargs)
return client
@pytest.fixture
def client(request):
client = client_factory(**getattr(request, 'param', {}))
client.connect()
yield client
try:
client.disconnect()
except:
pass
def test_connect(client):
assert client.connected
def test_call_method(client):
assert isinstance(client.call('core.get_free_space'), (int, long))
def test_call_method_arguments(client):
assert isinstance(client.call('core.get_free_space', '/'), (int, long))
@pytest.mark.parametrize('client',
[{'decode_utf8': True}, {'decode_utf8': False}],
ids=['decode_utf8_on', 'decode_utf8_off'],
indirect=True)
def test_call_method_exception(client):
with pytest.raises(RemoteException) as ex_info:
client.call('core.get_free_space', '1', '2')
assert ('takes at most 2 arguments' in str(ex_info.value) or
'takes from 1 to 2 positional arguments' in str(ex_info.value)) # deluge 2.0
def test_attr_caller(client):
assert isinstance(client.core.get_free_space(), (int, long))
assert isinstance(client.core.get_free_space('/'), (int, long))
def test_call_method_context_manager():
with client_factory() as client:
assert isinstance(client.call('core.get_free_space'), (int, long))
``` |
{
"source": "jonesnc/yearn-client",
"score": 2
} |
#### File: yearn-client/test/test_method.py
```python
from yearn import Client
sock_uri = 'scgi:///home/nathanjones/Projects/yearn-client/rtorrent/config/.local/share/rtorrent/rtorrent.sock'
client = Client(uri=sock_uri)
def test_insert_key():
name = 'event.download.finished'
key = 'new_event'
definition = 'execute = /bin/ash /config/.local/share/rtorrent/event.sh'
client.set_key(name, key)
client.set_key(name, key, definition)
keys = client.keys('event.download.finished')
print(keys)
assert client.has_key(name, key) == True
for torrent in client.torrents_info():
torrent.erase()
new_torrent_hash = client.load_torrent(
'/home/nathanjones/Projects/yearn-client/test/torrent_files/KNOPPIX_V9.1CD-2021-01-25-DE.torrent',
'/config/.local/share/rtorrent/download')
client.torrents_info()[0].start()
```
#### File: yearn-client/yearn/cache.py
```python
from typing import Union
from urllib.parse import urlparse
from xmlrpc.client import Server as HTTPServerProxy
from .scgi import SCGIServerProxy
ServerProxy = Union[HTTPServerProxy, SCGIServerProxy]
class ServerCache(object):
"""Caches the server.
Subclass this for any object that needs access to the ServerProxy.
"""
_server: ServerProxy
def __init__(self, *args, uri: str = None, server: ServerProxy = None,
**kwargs):
if server:
self._server = server
elif uri:
schema = urlparse(uri)[0]
if schema == 'scgi':
self._server = SCGIServerProxy(uri)
elif schema in ['http', 'https']:
self._server = HTTPServerProxy(uri)
else:
raise NotImplementedError()
else:
raise ValueError('MISSING KWARG: uri or server is required!')
super().__init__()
```
#### File: yearn-client/yearn/load.py
```python
import math
import os
from enum import Enum
from pprint import pprint
from time import time
from typing import Optional, Sequence
from xmlrpc import client
import bencodepy
from torrentool.api import Torrent as Torrentool # type: ignore
from yearn.cache import ServerCache
from yearn.torrent import Torrent
class FilePriorities(Enum):
OFF = 0
NORMAL = 1
HIGH = 2
class LoadAPIMixin(ServerCache):
def load_torrent(self, torrent_file, directory=None,
use_dir_as_base=False, perform_check_hash=False,
set_addtime=False, add_started=False, set_completed=False
) -> Optional[Torrent]:
with open(torrent_file, 'rb') as f:
raw_torrent_data = client.Binary(f.read())
load_args = ['', raw_torrent_data]
directory = directory.replace('"', '\\"')
if use_dir_as_base:
directory_cmd = f'd.directory_base.set="{directory}"'.encode('utf-8')
else:
directory_cmd = f'd.directory.set="{directory}"'.encode('utf-8')
load_args.append(directory_cmd)
if perform_check_hash:
perform_check_hash_cmd = 'd.check_hash='
load_args.append(perform_check_hash_cmd)
if set_addtime:
now = int(time())
set_custom_addtime_cmd = f'd.custom.set=addtime,{now}'
load_args.append(set_custom_addtime_cmd)
tt_class = Torrentool.from_file(torrent_file)
if set_completed:
chunk_size = tt_class._struct['info']['piece length']
files = []
total_size = tt_class.total_size
if 'files' in tt_class._struct['info']:
tt_files = tt_class._struct['info']['files']
else:
tt_files = [{
'path': [
tt_class._struct['info']['name']
],
'length': tt_class._struct['info']['length']
}]
for file in tt_files:
file_length = file['length']
host_torrent_file_dir = os.path.dirname(torrent_file)
local_file_path = os.path.join(
host_torrent_file_dir,
# for files in sub-dir, 'path' will have multiple
# elements
*file['path']
)
try:
stat = os.stat(local_file_path)
mtime = math.trunc(stat.st_mtime)
resume = {
'completed': math.ceil(file_length / chunk_size),
'mtime': mtime,
'priority': FilePriorities.OFF.value,
}
except FileNotFoundError:
resume = {
'completed': 0,
'mtime': 0,
'priority': FilePriorities.OFF.value,
}
files.append(resume)
bitfield = math.ceil(total_size / chunk_size)
libtorrent_resume = {
'bitfield': bitfield,
'files': files
}
torrent_file_data = tt_class._struct.copy()
torrent_file_data['libtorrent_resume'] = libtorrent_resume
raw_data_with_resume = bencodepy.encode(torrent_file_data)
load_args[1] = raw_data_with_resume
if add_started:
self._server.load.raw_start_verbose(*load_args)
else:
self._server.load.raw_verbose(*load_args)
torrent = Torrent(
server=self._server,
torrent_hash=tt_class.info_hash)
return torrent
```
#### File: yearn/scripts/load.py
```python
import os
import re
import click
import torrentool
from torrentool.api import Torrent as Torrentool
from yearn import Client
from yearn.torrent import Torrent
def get_all_torrent_files(path):
for root, _dirs, files in os.walk(path, topdown=False):
torrent_dir = os.path.abspath(root)
for name in files:
if '.torrent' in name:
yield (torrent_dir, name)
def load(path, scgi, mount=None):
if mount:
host_base_dir, mount_base_dir = mount.split(':')
client = Client(uri=scgi)
torrent_hashes = client.info_hashes()
if os.path.isdir(path):
for dir, torrent_file in get_all_torrent_files(path):
full_host_torrent_file_path = os.path.join(dir, torrent_file)
try:
torrent = Torrentool.from_file(full_host_torrent_file_path)
except torrentool.exceptions.BencodeDecodingError:
click.echo(
f'Invalid torrent file: {full_host_torrent_file_path}')
continue
torrent_hash = torrent.info_hash.upper()
if torrent_hash not in torrent_hashes:
mount_full_path = dir.replace(host_base_dir, mount_base_dir)
click.echo(f'Adding {torrent_file}')
client.load_torrent(
torrent_file=full_host_torrent_file_path,
directory=mount_full_path,
use_dir_as_base=True,
perform_check_hash=False,
set_addtime=True,
add_started=True,
set_completed=True,
)
else:
torrent_in_rtorrent = client.get_torrent(
torrent_hash=torrent_hash)
if not torrent_in_rtorrent.complete:
print((
f'{torrent_in_rtorrent.name} '
f'({torrent_in_rtorrent.hash}) is incomplete!'))
elif os.path.isfile(path):
pass
else:
click.echo('path is a special file! Skipping.')
```
#### File: yearn/scripts/yearn.py
```python
import click
from yearn.scripts.load import load as script_load
@click.group()
def cli():
pass
@cli.command()
@click.argument('path', default='.')
@click.option('--scgi', default=None)
@click.option('--mount', default=None) # TODO: support multiple mounts
def load(path, scgi, mount):
click.echo('Loading torrent files...')
click.echo(f'Using SCGI: {scgi}')
click.echo(f'Using path: {path}')
click.echo(f'Using mount: {mount}')
script_load(path, scgi, mount)
```
#### File: yearn-client/yearn/torrent.py
```python
from datetime import datetime
from typing import List
import humanfriendly # type: ignore
from yearn.cache import ServerCache
from yearn.exceptions import XmlrpcResultTypeException
from yearn.tracker import Tracker
class Torrent(ServerCache):
def __init__(self, server, torrent_hash):
super().__init__(server=server)
self.hash: str = torrent_hash
def __str__(self):
return f'Torrent (hash={self.hash},name={self.name})'
def __repr__(self):
return self.__str__()
def open(self):
return self._server.d.open(self.hash)
def resume(self):
return self._server.d.resume(self.hash)
def close(self):
return self._server.d.close(self.hash)
def start(self):
return self._server.d.start(self.hash)
def erase(self):
return self._server.d.erase(self.hash)
@property
def name(self):
return self._server.d.name(self.hash)
@property
def base_filename(self):
return self._server.d.base_filename(self.hash)
@property
def base_path(self):
return self._server.d.base_path(self.hash)
@property
def base_directory(self):
return self._server.d.base_directory(self.hash)
@property
def base_directory_base(self):
return self._server.d.base_directory_base(self.hash)
@property
def accepting_seeders(self):
result = self._server.d.accepting_seeders(self.hash)
if isinstance(result, int):
return result == 1
raise XmlrpcResultTypeException(
f'Expected type int, got {type(result)}')
@accepting_seeders.setter
def accepting_seeders(self, accepting_seeders: bool):
if accepting_seeders:
self._server.d.accepting_seeders.enable(self.hash)
else:
self._server.d.accepting_seeders.disable(self.hash)
@property
def bitfield(self):
result = self._server.d.bitfield(self.hash)
if isinstance(result, str):
return result
else:
raise XmlrpcResultTypeException(
f'Expected type str, got {type(result)}')
@property
def size_bytes(self):
result = self._server.d.size_bytes(self.hash)
if isinstance(result, int):
return result
else:
raise XmlrpcResultTypeException(
f'Expected type int, got {type(result)}')
@property
def chunk_size(self):
result = self._server.d.chunk_size(self.hash)
if isinstance(result, int):
return humanfriendly.format_size(result)
else:
raise XmlrpcResultTypeException(
f'Expected type int, got {type(result)}')
@property
def chunk_size_bytes(self):
result = self._server.d.chunk_size(self.hash)
if isinstance(result, int):
return result
else:
raise XmlrpcResultTypeException(
f'Expected type int, got {type(result)}')
@property
def size_chunks(self):
result = self._server.d.size_chunks(self.hash)
if isinstance(result, int):
return result
else:
raise XmlrpcResultTypeException(
f'Expected type int, got {type(result)}')
@property
def bytes_done(self):
return self._server.d.bytes_done(self.hash)
@property
def bytes_left(self):
return self._server.d.left_bytes(self.hash)
@property
def timestamp_start(self):
time_result = self._server.d.timestamp.started(self.hash)
if isinstance(time_result, int):
datetime_time = datetime.fromtimestamp(float(time_result))
return datetime_time
return None
@property
def is_paused(self) -> bool:
return (
self._server.d.is_open(self.hash) == 1 and
not self._server.d.is_active(self.hash) == 1 and
not self._server.d.state(self.hash) == 1)
@property
def complete(self) -> bool:
return self._server.d.complete(self.hash) == 1
@property
def is_hash_checked(self) -> bool:
return self._server.d.is_hash_checked(self.hash) == 1
@property
def is_hash_checking(self) -> bool:
return self._server.d.is_hash_checking(self.hash) == 1
@property
def is_multi_file(self) -> bool:
return self._server.d.is_multi_file(self.hash) == 1
@property
def trackers(self) -> List[Tracker]:
trackers_results = self._server.t.multicall(self.hash, '', 't.url=')
tracker_list = []
if isinstance(trackers_results, list):
for idx, tracker_url in enumerate(trackers_results):
tracker = Tracker(self._server, self.hash, tracker_url, idx)
tracker_list.append(tracker)
return tracker_list
else:
raise XmlrpcResultTypeException(
f'Expected type list, got {type(trackers_results)}')
``` |
{
"source": "jonesPD/solaredge_setapp",
"score": 3
} |
#### File: solaredge_setapp/solaredge_setapp/maintenance.py
```python
import solaredge_setapp
import solaredge_setapp.maintenance_pb2
import datetime
class Maintenance:
def parse_protobuf(self, bytes):
parsed = {}
try:
proto = solaredge_setapp.maintenance_pb2.Maintenance()
proto.ParseFromString(bytes)
parsed = {
"serial": str(proto.header.id),
"timestamp": int(proto.header.timestamp),
"standby": bool(proto.standby.activated.value),
"utc_offset": int(proto.date_and_time.gmt_offset.value),
"ntp_server": str(proto.date_and_time.ntp.value),
"afci": {
"enabled": bool(proto.afci.enable.value),
"manual_reconnect": bool(proto.afci.manual_reconnect.value),
"test_result": solaredge_setapp.AfciTestResult(int(proto.afci.test.result)).name
},
}
parsed["inverters"] = []
for inverter in proto.diagnostics.inverters.primary, proto.diagnostics.inverters.left, proto.diagnostics.inverters.right:
if inverter.inv_sn.value:
if inverter.isolation.r_iso.scaling:
inverter_isolation_r_iso = float(inverter.isolation.r_iso.value / inverter.isolation.r_iso.scaling)
else:
inverter_isolation_r_iso = float(inverter.isolation.r_iso.value)
if inverter.isolation.alpha.scaling:
inverter_isolation_alpha = float(inverter.isolation.alpha.value / inverter.isolation.alpha.scaling)
else:
inverter_isolation_alpha = float(inverter.isolation.alpha.value)
parsed["inverters"].append({
"serial": str(inverter.inv_sn.value),
"isolation": {
"fault_location": int(inverter.isolation.fault_location.value),
"r_iso": inverter_isolation_r_iso,
"alpha": inverter_isolation_alpha
},
"optimizers_status": {
"total": int(inverter.optimizers_status.enabled.value),
"online": int(inverter.optimizers_status.connected.value)
},
"optimizers": [{
"serial": str(po.sn.value),
"online": bool(po.reports.value),
"po_voltage": int(po.output_v.value),
"po_power": int(po.energy.value),
"module_voltage": int(po.input_v.value),
"module_current": int(po.input_c.value),
"temperature": int(po.temperature.value.value),
"timestamp": 0 if not bool(po.reports.value) else int(datetime.datetime.strptime(
"{year} {month} {day} {hour} {minutes} {seconds}".format(
year=po.date.year.value,
month=po.date.month.value,
day=po.date.day.value,
hour=po.date.hour.value,
minutes=po.date.minute.value,
seconds=po.date.second.value
), "%Y %m %d %H %M %S").timestamp())
} for po in inverter.optimizer]
})
except AttributeError as e:
print(f"AttributeError: {e}")
return parsed
``` |
{
"source": "jonespm/course-inventory",
"score": 3
} |
#### File: course-inventory/db/db_creator.py
```python
from __future__ import annotations
# standard libraries
import logging, os
from typing import Dict, List, Sequence, Union
from urllib.parse import quote_plus
# third-party libraries
from sqlalchemy.engine import create_engine, Engine
from yoyo import get_backend, read_migrations
# Initialize settings and global variables
logger = logging.getLogger(__name__)
PARENT_PATH = os.path.dirname(os.path.abspath(__file__))
MIGRATIONS_PATH = os.path.join(PARENT_PATH, 'migrations')
class DBCreator:
'''
Utility class for managing the application's database. Leverages SQLAlchemy
and yoyo-migrations. The migrate, drop_records, and reset_database methods can be
used fluently, i.e. with method chaining (see reset_database for an example).
'''
def __init__(self, db_params: Dict[str, str]) -> None:
'''
Sets the database name; sets the connection string; uses the connection string
to create a SQLAlchemy engine object.
'''
self.db_name: str = db_params['dbname']
self.conn_str: str = (
'mysql+mysqldb' +
f"://{db_params['user']}" +
f":{quote_plus(db_params['password'])}" +
f"@{db_params['host']}" +
f":{db_params['port']}" +
f"/{db_params['dbname']}?charset=utf8&ssl=true"
)
self.engine: Engine = create_engine(self.conn_str)
def get_table_names(self) -> List[str]:
'''
Gets table names using the SQLAlchemy Engine object.
'''
logger.debug('get_table_names')
return self.engine.table_names()
def migrate(self) -> DBCreator:
'''
Updates database schema using yoyo-migrations and the migration files in the
migrations directory collocated with this file (db_creator.py).
'''
logger.debug('migrate')
backend = get_backend(self.conn_str)
migrations = read_migrations(MIGRATIONS_PATH)
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations))
return self
def drop_records(self, spec_table_names: Union[Sequence[str], None] = None) -> DBCreator:
'''
Drops records from either the specified database tables or all of the
application-managed database tables; tables managed by yoyo-migrations are ignored.
'''
logger.debug('drop_records')
app_table_names = [
table_name for table_name in self.get_table_names() if 'yoyo' not in table_name
]
logger.debug(f'app_table_names: {app_table_names}')
if spec_table_names is None:
# Drop all non-yoyo tables
logger.info('Dropping all application (non-yoyo) tables')
drop_table_names = app_table_names
else:
# Drop specified table names if they're valid (i.e. an application-managed table)
logger.info('Dropping specific provided tables if they are valid')
drop_table_names = []
for spec_table_name in spec_table_names:
if spec_table_name in app_table_names:
drop_table_names.append(spec_table_name)
else:
logger.error(f'Invalid table name was provided: {spec_table_name}')
conn = self.engine.connect()
conn.execute('SET FOREIGN_KEY_CHECKS=0;')
for drop_table_name in drop_table_names:
logger.debug(f'Table Name: {drop_table_name}')
conn.execute(f'DELETE FROM {drop_table_name};')
logger.info(f'Dropped records in {drop_table_name} in {self.db_name}')
conn.execute('SET FOREIGN_KEY_CHECKS=1;')
return self
def reset_database(self) -> DBCreator:
'''
Drops records in application-managed tables and applies outstanding migrations
'''
self.drop_records().migrate()
return self
def get_pk_values(self, table_name: str, primary_key: str) -> List[Union[int, None]]:
'''
Retrieves primary key values from the table. Only works with one primary key.
'''
pk_values = []
conn = self.engine.connect()
rs = conn.execute(f"SELECT {primary_key} FROM {table_name}")
for row in rs:
pk_values.append(row[0])
return pk_values
``` |
{
"source": "jonespm/student-dashboard-django",
"score": 2
} |
#### File: student-dashboard-django/dashboard/cron.py
```python
from datetime import datetime
import logging
from collections import namedtuple
from typing import Any, Dict, List, Union
import hjson
import pandas as pd
import pytz
import pangres
from django.conf import settings
from django.db import connections as conns, models
from django.db.models import QuerySet
from django_cron import CronJobBase, Schedule
from google.cloud import bigquery
from sqlalchemy import create_engine, types
from sqlalchemy.engine import ResultProxy
from dashboard.common import db_util, utils
from dashboard.models import Course, Resource, AcademicTerms, ResourceAccess
logger = logging.getLogger(__name__)
db_name = settings.DATABASES['default']['NAME']
db_user = settings.DATABASES['default']['USER']
db_password = settings.DATABASES['default']['PASSWORD']
db_host = settings.DATABASES['default']['HOST']
db_port = settings.DATABASES['default']['PORT']
logger.debug("db-name:" + db_name)
logger.debug("db-user:" + db_user)
engine = create_engine("mysql+mysqldb://{user}:{password}@{host}:{port}/{db}?charset=utf8mb4"
.format(db=db_name, # your mysql database name
user=db_user, # your mysql user for the database
password=<PASSWORD>, # password for user
host=db_host,
port=db_port))
# Set up queries array from configuration file
CRON_QUERY_FILE = settings.CRON_QUERY_FILE
logger.info(CRON_QUERY_FILE)
try:
with open(CRON_QUERY_FILE) as cron_query_file:
queries = hjson.load(cron_query_file)
except FileNotFoundError:
logger.error(
f'Cannot find cron queries file "{CRON_QUERY_FILE}".')
queries = dict()
# Split a list into *size* shorter pieces
def split_list(a_list: list, size: int = 20):
return [a_list[i:i + size] for i in range(0, len(a_list), size)]
# the util function
def util_function(data_warehouse_course_id, sql_string, mysql_table, table_identifier=None, param_object=None):
df = pd.read_sql(sql_string, conns['DATA_WAREHOUSE'], params=param_object)
logger.debug(df)
# Sql returns boolean value so grouping course info along with it so that this could be stored in the DB table.
if table_identifier == 'weight' and data_warehouse_course_id:
df['course_id'] = data_warehouse_course_id
df.columns = ['consider_weight', 'course_id']
# drop duplicates
df = df.drop_duplicates(keep='first')
logger.debug(" table: " + mysql_table + " insert size: " + str(df.shape[0]))
# write to MySQL
try:
df.to_sql(con=engine, name=mysql_table, if_exists='append', index=False)
except Exception as e:
logger.exception(f"Error running to_sql on table {mysql_table}")
raise
# returns the row size of dataframe
return f"{str(df.shape[0])} {mysql_table} : {data_warehouse_course_id}\n"
# execute database query
def execute_db_query(query: str, params: List = None) -> ResultProxy:
with engine.connect() as connection:
connection.detach()
if params:
return connection.execute(query, params)
else:
return connection.execute(query)
# remove all records inside the specified table
def delete_all_records_in_table(table_name: str, where_clause: str = "", where_params: List = None):
# delete all records in the table first, can have an optional where clause
result_proxy = execute_db_query(f"delete from {table_name} {where_clause}", where_params)
return(f"\n{result_proxy.rowcount} rows deleted from {table_name}\n")
def soft_update_datetime_field(
model_inst: models.Model,
field_name: str,
warehouse_field_value: Union[datetime, None],
) -> List[str]:
"""
Uses Django ORM to update DateTime field of model instance if the field value is null and the warehouse data is non-null.
"""
model_name: str = model_inst.__class__.__name__
current_field_value: Union[datetime, None] = getattr(model_inst, field_name)
# Skipping update if the field already has a value, provided by a previous cron run or administrator
if current_field_value is not None:
logger.info(
f'Skipped update of {field_name} for {model_name} instance ({model_inst.id}); existing value was found')
else:
if warehouse_field_value:
warehouse_field_value = warehouse_field_value.replace(tzinfo=pytz.UTC)
setattr(model_inst, field_name, warehouse_field_value)
logger.info(f'Updated {field_name} for {model_name} instance ({model_inst.id})')
return [field_name]
return []
# cron job to populate course and user tables
class DashboardCronJob(CronJobBase):
schedule = Schedule(run_at_times=settings.RUN_AT_TIMES)
code = 'dashboard.DashboardCronJob' # a unique code
def __init__(self) -> None:
"""Constructor to be used to declare valid_locked_course_ids instance variable."""
super().__init__()
self.valid_locked_course_ids: List[int]
# verify whether course ids are valid
def verify_course_ids(self):
# whether all course ids are valid ids
invalid_course_id_list = []
course_dfs = []
logger.debug("in checking course")
# loop through multiple course ids
for course_id in Course.objects.get_supported_courses():
logger.debug(course_id)
# select course based on course id
course_sql = queries['course'].format(course_id=course_id)
logger.debug(course_sql)
course_df = pd.read_sql(course_sql, conns['DATA_WAREHOUSE'])
logger.debug(course_df)
# error out when course id is invalid, otherwise add DataFrame to list
if course_df.empty:
logger.error(f"""Course {course_id} don't have the entry in data warehouse yet. """)
invalid_course_id_list.append(course_id)
else:
course_dfs.append(course_df)
if len(course_dfs) > 0:
courses_data = pd.concat(course_dfs).reset_index()
else:
logger.info("No course records were found in the database.")
courses_data = pd.DataFrame(
columns=["id", "canvas_id", "enrollment_term_id", "name", "start_at", "conclude_at"])
CourseVerification = namedtuple("CourseVerification", ["invalid_course_ids", "course_data"])
return CourseVerification(invalid_course_id_list, courses_data)
# update USER records from DATA_WAREHOUSE
def update_user(self):
# cron status
status = ""
logger.debug("in update with data warehouse user")
# delete all records in the table first
status += delete_all_records_in_table("user")
# loop through multiple course ids
for course_id in self.valid_locked_course_ids:
# select all student registered for the course
user_sql = queries['user'].format(
course_id=course_id, canvas_data_id_increment=settings.CANVAS_DATA_ID_INCREMENT)
logger.debug(user_sql)
status += util_function(course_id, user_sql, 'user')
return status
# update unizin metadata from DATA_WAREHOUSE
def update_unizin_metadata(self):
# cron status
status = ""
logger.debug("in update unizin metadata")
# delete all records in the table first
status += delete_all_records_in_table("unizin_metadata")
# select all student registered for the course
metadata_sql = "select key as pkey, value as pvalue from unizin_metadata"
logger.debug(metadata_sql)
status += util_function("", metadata_sql, 'unizin_metadata')
return status
# update file records from Canvas that don't have names provided
def update_canvas_resource(self):
# cron status
status = ""
logger.debug("in update canvas resource")
# Select all the files for these courses
# convert int array to str array
course_ids = list(map(str, self.valid_locked_course_ids))
file_sql = queries['resource']
logger.debug(file_sql)
df_attach = pd.read_sql(file_sql, conns['DATA_WAREHOUSE'], params={'course_ids': tuple(course_ids)})
logger.debug(df_attach)
# Update these back again based on the dataframe
# Remove any rows where file_state is not available!
for row in df_attach.itertuples(index=False):
if row.file_state == 'available':
Resource.objects.filter(resource_id=row.id).update(name=row.display_name)
status += f"Row {row.id} updated to {row.display_name}\n"
else:
Resource.objects.filter(resource_id=row.id).delete()
status += f"Row {row.id} removed as it is not available\n"
return status
# update RESOURCE_ACCESS records from BigQuery or LRS data sources
def update_resource_access(self):
# cron status
status = ""
# return string with concatenated SQL insert result
return_string = ""
if settings.LRS_IS_BIGQUERY:
# Instantiates a client
bigquery_client = bigquery.Client()
# BQ Total Bytes Billed to report to status
total_bytes_billed = 0
data_last_updated = Course.objects.filter(id__in=self.valid_locked_course_ids).get_data_earliest_date()
logger.info(f"Deleting all records in resource_access after {data_last_updated}")
status += delete_all_records_in_table("resource_access", f"WHERE access_time > %s", [data_last_updated, ])
# loop through multiple course ids, 20 at a time
# (This is set by the CRON_BQ_IN_LIMIT from settings)
for data_warehouse_course_ids in split_list(self.valid_locked_course_ids, settings.CRON_BQ_IN_LIMIT):
# query to retrieve all file access events for one course
# There is no catch if this query fails, event_store.events needs to exist
final_query = []
for k, query_obj in settings.RESOURCE_ACCESS_CONFIG.items():
# concatenate the multi-line presentation of query into one single string
query = query_obj['query']
if (data_last_updated is not None):
# insert the start time parameter for query
if query_obj.get('query_data_last_updated_condition'):
query += f" {query_obj['query_data_last_updated_condition']} "
elif settings.LRS_IS_BIGQUERY:
query += " and event_time > CAST(@data_last_updated as DATETIME) "
final_query.append(query)
final_query = " UNION ALL ".join(final_query)
# convert int array to string array
data_warehouse_course_ids_short = [
db_util.incremented_id_to_canvas_id(id) for id in data_warehouse_course_ids]
course_ids_short = list(map(str, data_warehouse_course_ids_short))
logger.debug(final_query)
logger.debug(data_warehouse_course_ids)
if settings.LRS_IS_BIGQUERY:
query_params = [
bigquery.ArrayQueryParameter('course_ids', 'STRING', data_warehouse_course_ids),
bigquery.ArrayQueryParameter('course_ids_short', 'STRING', course_ids_short),
bigquery.ScalarQueryParameter('canvas_data_id_increment', 'INT64',
settings.CANVAS_DATA_ID_INCREMENT)
]
if (data_last_updated is not None):
# insert the start time parameter for query
query_params.append(bigquery.ScalarQueryParameter(
'data_last_updated', 'TIMESTAMP', data_last_updated))
query_params.append(bigquery.ArrayQueryParameter(
'canvas_event_urls', 'STRING', settings.CANVAS_EVENT_URLS))
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
# Location must match that of the dataset(s) referenced in the query.
bq_job = bigquery_client.query(final_query, location='US', job_config=job_config)
# This is the call that could result in an exception
resource_access_df: pd.DataFrame = bq_job.result().to_dataframe()
total_bytes_billed += bq_job.total_bytes_billed
logger.debug(total_bytes_billed)
else:
query_params = {
'course_ids': data_warehouse_course_ids,
'course_ids_short': course_ids_short,
'canvas_data_id_increment': settings.CANVAS_DATA_ID_INCREMENT,
}
if (data_last_updated is not None):
query_params['data_last_updated'] = data_last_updated
resource_access_df = pd.read_sql(final_query, conns['LRS'], params=query_params)
resource_access_row_count = len(resource_access_df)
if resource_access_row_count == 0:
logger.info('No resource access data found. Continuing...')
continue
logger.debug('resource_access_df row count: '
f'({resource_access_row_count})')
logger.debug(f'resource_access_df:\n'
f'{resource_access_df}\n'
f'{resource_access_df.dtypes}')
if 'user_login_name' not in resource_access_df.columns:
logger.warning('Update queries in configuration file '
'to include column "user_login_name".')
else:
# process data which contains user login names, but not IDs
if -1 in resource_access_df['user_id'].values:
login_names = ','.join(
map(repr, resource_access_df['user_login_name']
.drop_duplicates().dropna().values))
logger.debug(f'login_names:\n{login_names}')
# get user ID as string because pd.merge will convert
# int64 to scientific notation; converting SN to int64
# causes Obi-Wan problems (off by one)
user_id_df = pd.read_sql(
'select sis_name as user_login_name,'
'cast(user_id as char) as user_id_str '
f'from user where sis_name in ({login_names})',
engine)
logger.debug(f'user_id_df:\n'
f'{user_id_df}\n'
f'{user_id_df.dtypes}')
# combine user login and ID data
resource_access_df = pd.merge(
resource_access_df, user_id_df,
on='user_login_name', how='outer')
# replace real user_id values for missing ones (-1)
resource_access_df.loc[
resource_access_df['user_id'] == -1,
'user_id'] = resource_access_df['user_id_str']
# drops must be in this order; especially dropna() LAST
resource_access_df = resource_access_df \
.drop(columns=['user_id_str', 'user_login_name']) \
.dropna()
logger.debug(f'resource_access_df:\n'
f'{resource_access_df}\n'
f'{resource_access_df.dtypes}')
else:
resource_access_df = resource_access_df.drop(
columns='user_login_name')
resource_access_df = resource_access_df.dropna()
# drop duplicates
resource_access_df = resource_access_df.drop_duplicates(
['resource_id', 'user_id', 'access_time'], keep='first')
logger.debug('resource_access_df row count (de-duped): '
f'({len(resource_access_df)})')
logger.debug(f'resource_access_df:\n'
f'{resource_access_df}\n'
f'{resource_access_df.dtypes}')
# Make resource data from resource_access data
resource_df = resource_access_df.filter(["resource_id", "resource_type", "name"])
resource_df = resource_df.drop_duplicates(["resource_id"])
# pangres.upsert() requires DataFrame to have index
resource_df = resource_df.set_index('resource_id')
logger.debug(f'resource_df:\n'
f'{resource_df}\n'
f'{resource_df.dtypes}')
resource_access_df = resource_access_df.drop(
columns=['resource_type', 'name'])
ra_len_before = len(resource_access_df)
# Drop rows with NA in any column
resource_access_df = resource_access_df.dropna()
logger.info(f'{ra_len_before - len(resource_access_df)} / '
f'{ra_len_before} resource_access_df rows with '
'NA values dropped')
logger.debug(f'resource_access_df:\n'
f'{resource_access_df}\n'
f'{resource_access_df.dtypes}')
# First, update resource table
try:
dtype = {'resource_id': types.VARCHAR(255)}
pangres.upsert(engine=engine, df=resource_df,
table_name='resource', if_row_exists='update',
create_schema=False, add_new_columns=False,
dtype=dtype)
except Exception as e:
logger.exception('Error running upsert on table resource')
raise
# Next, update resource_access table
try:
resource_access_df.to_sql(con=engine, name='resource_access',
if_exists='append', index=False)
except Exception as e:
logger.exception('Error running to_sql on table '
'resource_access')
raise
return_string += \
f'{len(resource_access_df)} rows for courses [' + ', '.join(
map(repr, data_warehouse_course_ids)) + ']\n'
logger.info(return_string)
if settings.LRS_IS_BIGQUERY:
total_tbytes_billed = total_bytes_billed / 1024 / 1024 / 1024 / 1024
# $5 per TB as of Feb 2019 https://cloud.google.com/bigquery/pricing
total_tbytes_price = round(5 * total_tbytes_billed, 2)
status += (f'TBytes billed for BQ: {total_tbytes_billed} = '
f'${total_tbytes_price}\n')
return status
def update_groups(self):
# cron status
status = ""
# delete all records in assignment_group table
status += delete_all_records_in_table("assignment_groups")
# update groups
# Loading the assignment groups inforamtion along with weight/points associated ith arn assignment
logger.debug("update_assignment_groups(): ")
# loop through multiple course ids
for course_id in self.valid_locked_course_ids:
assignment_groups_sql = queries['assignment_groups'].format(course_id=course_id)
logger.debug(assignment_groups_sql)
status += util_function(course_id, assignment_groups_sql,
'assignment_groups')
return status
def update_assignment(self):
# Load the assignment info w.r.t to a course such as due_date, points etc
status = ""
logger.info("update_assignment(): ")
# delete all records in assignment table
status += delete_all_records_in_table("assignment")
# loop through multiple course ids
for course_id in self.valid_locked_course_ids:
assignment_sql = queries['assignment'].format(course_id=course_id)
logger.debug(assignment_sql)
status += util_function(course_id, assignment_sql,
'assignment')
return status
def submission(self):
# student submission information for assignments
# cron status
status = ""
logger.info("update_submission(): ")
# delete all records in resource_access table
status += delete_all_records_in_table("submission")
# loop through multiple course ids
# filter out not released grades (submission_dim.posted_at date is not null) and partial grades (submission_dim.workflow_state != 'graded')
for course_id in self.valid_locked_course_ids:
submission_sql = queries['submission'].format(
course_id=course_id, canvas_data_id_increment=settings.CANVAS_DATA_ID_INCREMENT)
logger.debug(submission_sql)
status += util_function(course_id, submission_sql,
'submission')
return status
def weight_consideration(self):
# load the assignment weight consider information with in a course. Some assignments don't have weight consideration
# the result of it return boolean indicating weight is considered in table calculation or not
status = ""
logger.info("weight_consideration()")
# delete all records in assignment_weight_consideration table
status += delete_all_records_in_table("assignment_weight_consideration")
# loop through multiple course ids
for course_id in self.valid_locked_course_ids:
is_weight_considered_sql = queries['assignment_weight'].format(course_id=course_id)
logger.debug(is_weight_considered_sql)
status += util_function(course_id, is_weight_considered_sql,
'assignment_weight_consideration', 'weight')
logger.debug(status + "\n\n")
return status
def update_term(self) -> str:
"""
Searches warehouse data for new terms and adds them while leaving existing terms as they are.
"""
status: str = ''
logger.info('update_term()')
term_sql: str = queries['term']
logger.debug(term_sql)
warehouse_term_df: pd.DataFrame = pd.read_sql(term_sql, conns['DATA_WAREHOUSE'])
existing_terms_ids: List[int] = [term.id for term in list(AcademicTerms.objects.all())]
new_term_ids: List[int] = [int(id) for id in warehouse_term_df['id'].to_list() if id not in existing_terms_ids]
if not new_term_ids:
logger.info('No new terms were found to add to the academic_terms table.')
else:
new_term_df: pd.DataFrame = warehouse_term_df.loc[warehouse_term_df['id'].isin(new_term_ids)]
try:
new_term_df.to_sql(con=engine, name='academic_terms', if_exists='append', index=False)
term_message: str = f'Added {len(new_term_df)} new records to academic_terms table: {new_term_ids}'
logger.info(term_message)
status += term_message + '\n'
except Exception as e:
logger.error(f'Error running to_sql on term table: {e}')
raise
return status
def update_course(self, warehouse_courses_data: pd.DataFrame) -> str:
"""
Updates course records with data returned from verify_course_ids, only making changes when necessary.
"""
status: str = ''
logger.debug('update_course()')
logger.debug(warehouse_courses_data.to_json(orient='records'))
courses: QuerySet = Course.objects.filter(id__in=self.valid_locked_course_ids)
courses_string: str = ', '.join([str(x) for x in self.valid_locked_course_ids])
status += f'{str(len(courses))} course(s): {courses_string}\n'
for course in courses:
updated_fields: List[str] = []
warehouse_course_dict: Dict[str, Any] = warehouse_courses_data.loc[warehouse_courses_data['id']
== course.id].iloc[0].to_dict()
warehouse_course_name: str = warehouse_course_dict['name']
if course.name != warehouse_course_name:
course.name = warehouse_course_name
logger.info(f'Name for {course.id} has been updated.')
updated_fields.append('name')
warehouse_term_id: int = int(warehouse_course_dict['enrollment_term_id'])
if (course.term is None) or (course.term.id != warehouse_term_id):
course.term = AcademicTerms.objects.get(id=warehouse_term_id)
logger.info(f'Term for {course.id} has been updated.')
updated_fields.append('term')
warehouse_date_start: Union[datetime, None] = (
warehouse_course_dict['start_at'].to_pydatetime() if pd.notna(
warehouse_course_dict['start_at']) else None
)
updated_fields += soft_update_datetime_field(course, 'date_start', warehouse_date_start)
warehouse_date_end: Union[datetime, None] = (
warehouse_course_dict['conclude_at'].to_pydatetime() if pd.notna(
warehouse_course_dict['conclude_at']) else None
)
updated_fields += soft_update_datetime_field(course, 'date_end', warehouse_date_end)
if updated_fields:
course.save()
status += f'Course {course.id}: updated {", ".join(updated_fields)}\n'
return status
def do(self):
logger.info("** MyLA cron tab")
status = ""
run_start = datetime.now(pytz.UTC)
status += f"Start cron: {str(run_start)} UTC\n"
course_verification = self.verify_course_ids()
invalid_course_id_list = course_verification.invalid_course_ids
if len(invalid_course_id_list) > 0:
# error out and stop cron job
status += f"ERROR: Those course ids are invalid: {invalid_course_id_list}\n"
status += "End cron: " + str(datetime.now()) + "\n"
logger.info("************ total status=" + status + "/n/n")
return (status,)
# Lock in valid course IDs that data will be pulled for.
self.valid_locked_course_ids = course_verification.course_data['id'].to_list()
logger.info(f'Valid locked course IDs: {self.valid_locked_course_ids}')
# continue cron tasks
logger.info("** term")
status += self.update_term()
if len(self.valid_locked_course_ids) == 0:
logger.info("Skipping course-related table updates...")
status += "Skipped course-related table updates.\n"
else:
# Update the date unless there is an exception
exception_in_run = False
logger.info("** course")
status += self.update_course(course_verification.course_data)
logger.info("** user")
status += self.update_user()
logger.info("** assignment")
status += self.update_groups()
status += self.update_assignment()
status += self.submission()
status += self.weight_consideration()
logger.info("** resources")
if 'show_resources_accessed' not in settings.VIEWS_DISABLED:
try:
status += self.update_resource_access()
status += self.update_canvas_resource()
except Exception as e:
logger.error(f"Exception running BigQuery update: {str(e)}")
status += str(e)
exception_in_run = True
if settings.DATABASES.get('DATA_WAREHOUSE', {}).get('IS_UNIZIN'):
logger.info("** informational")
status += self.update_unizin_metadata()
courses_added_during_cron: List[int] = list(
set(Course.objects.get_supported_courses()) - set(self.valid_locked_course_ids))
if courses_added_during_cron:
logger.warning(
f'During the run, users added {len(courses_added_during_cron)} course(s): {courses_added_during_cron}')
logger.warning(f'No data was pulled for these courses.')
# Set all of the courses to have been updated now (this is the same set update_course runs on)
if not exception_in_run:
logger.info(f"Updating all valid courses from when this run was started at {run_start}")
Course.objects.filter(id__in=self.valid_locked_course_ids).update(data_last_updated=run_start)
else:
logger.warn("data_last_updated not updated because of an Exception during this run")
status += "End cron: " + str(datetime.now()) + "\n"
logger.info("************ total status=" + status + "\n")
return status
``` |
{
"source": "jonespm/student_explorer",
"score": 2
} |
#### File: student_explorer/seumich/tests.py
```python
import os, re
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.client import Client
from django.urls import reverse
from seumich.models import (UsernameField,
Advisor,
Date,
Mentor,
Status,
Student,
Term,
SourceSystem,
AdvisorRole,
Assignment,
ClassSite,
Cohort,
EventType,
ClassSiteTerm,
StudentAdvisorRole,
StudentCohortMentor,
ClassSiteScore,
StudentClassSiteScore,
StudentClassSiteAssignment,
StudentClassSiteStatus,
WeeklyClassSiteScore,
WeeklyStudentClassSiteEvent,
WeeklyStudentClassSiteStatus,
WeeklyStudentClassSiteScore)
from seumich.mixins import SeumichDataMixin
class SeumichTest(TestCase):
settings.DEBUG = True
student = Student.objects.get(id=1)
mentor = Mentor.objects.get(id=2)
class_site = ClassSite.objects.get(id=2)
week_end_date = Date.objects.get(id=2088)
cohort = Cohort.objects.get(id=1)
assignment = Assignment.objects.get(id=13)
_due_date = Date.objects.get(id=2098)
status = Status.objects.get(id=1)
term = Term.objects.get(id=1)
fixtures = ['dev_users.json']
databases = ['default', 'seumich']
def setUp(self):
self.client = Client()
os.system((
f"mysql -h {settings.DATABASES['default']['HOST']} "
f"-u {settings.DATABASES['default']['USER']} "
f"-p{settings.DATABASES['default']['PASSWORD']} "
f"test_student_explorer < seumich/fixtures/dev_data_drop_create_and_insert.sql"
))
def test_from_db_value(self):
obj = UsernameField()
self.assertEqual('grace', obj.from_db_value('Grace', None, None, None))
self.assertEqual('james', obj.from_db_value('JAMES', None, None, None))
def test_get_db_prep_value(self):
obj = UsernameField()
self.assertEqual('GRACE', obj.get_db_prep_value('Grace', None, None))
self.assertEqual('JAMES', obj.get_db_prep_value('james', None, None))
def test_advisor_string_representation(self):
"""
Testing whether the advisor's fetched description
matches the expected description
"""
advisor = Advisor.objects.get(id=1)
self.assertEqual(str(advisor), 'zander')
def test_date_string_representation(self):
"""
Testing whether the date's fetched description
matches the expected description
"""
date = Date.objects.get(id=2016)
self.assertEqual(str(date), '2015-07-09')
def test_cohorts(self):
"""
Testing the 'cohorts' property of Model 'Mentor'
"""
self.assertQuerysetEqual(
list(self.mentor.cohorts),
['<Cohort: Special Probation F14>',
'<Cohort: Special Probation W15>',
'<Cohort: Special Probation F15>']
)
def test_mentor_string_representation(self):
"""
Testing whether the mentor's fetched username
matches the expected username
"""
self.assertEqual(str(self.mentor), 'burl')
def test_status_string_representation(self):
"""
Testing whether the status's fetched description
matches the expected description
"""
status = Status.objects.get(id=3)
self.assertEqual(str(status), 'Red')
def test_student_email(self):
"""
Testing whether the student's fetched email address
matches the expected email address
"""
student = Student.objects.get(id=21)
self.assertEqual(student.email_address, '<EMAIL>')
def test_student_string_representation(self):
"""
Testing whether the student's fetched description
matches the expected description
"""
self.assertEqual(str(self.student), 'grace')
def test_begin_date(self):
"""
Testing whether the term's fetched begin date
matches the expected begin date
"""
self.assertEqual(self.term.begin_date,
Date.objects.get(date="2015-09-08"))
def test_end_date(self):
"""
Testing whether the term's fetched end date
matches the expected end date
"""
self.assertEqual(self.term.end_date,
Date.objects.get(date="2015-12-14"))
def test_week_end_dates(self):
"""
Testing whether the term's fetched week end dates
match the expected week end dates
"""
dates_list = [Date.objects.get(date='2015-09-12'),
Date.objects.get(date='2015-09-19'),
Date.objects.get(date='2015-09-26'),
Date.objects.get(date='2015-10-03'),
Date.objects.get(date='2015-10-10'),
Date.objects.get(date='2015-10-17'),
Date.objects.get(date='2015-10-24'),
Date.objects.get(date='2015-10-31'),
Date.objects.get(date='2015-11-07'),
Date.objects.get(date='2015-11-14'),
Date.objects.get(date='2015-11-21'),
Date.objects.get(date='2015-11-28'),
Date.objects.get(date='2015-12-05'),
Date.objects.get(date='2015-12-12')]
self.assertEqual(self.term.week_end_dates(), dates_list)
def test_term_string_representation(self):
"""
Testing whether the term's fetched description
matches the expected description
"""
self.assertEqual(str(self.term), 'Fall 2015')
def test_sourcesystem_string_representation(self):
"""
Testing whether the source system's fetched description
matches the expected description
"""
source_system = SourceSystem.objects.get(code='CNVS')
self.assertEqual(str(source_system), 'Canvas')
def test_advisor_role_string_representation(self):
"""
Testing whether the advisor role's fetched description
matches the expected description
"""
advisor_role = AdvisorRole.objects.get(id=1)
self.assertEqual(str(advisor_role), 'Department Advisor')
def test_assignment_string_representation(self):
"""
Testing whether the assignment's fetched description
matches the expected description
"""
assignment = Assignment.objects.get(id=15)
self.assertEqual(str(assignment), 'Quiz extra 2')
def test_class_site_string_representation(self):
"""
Testing whether the class site's fetched description
matches the expected description
"""
class_site = ClassSite.objects.get(id=1)
self.assertEqual(str(class_site), 'Math 101')
def test_cohort_string_representation(self):
"""
Testing whether the cohort's fetched description
matches the expected description
"""
cohort = Cohort.objects.get(id=3)
self.assertEqual(str(cohort), 'Special Probation F15')
def test_event_type_string_representation(self):
"""
Testing whether the event type's fetched description
matches the expected description
"""
event_type = EventType.objects.get(id=1)
self.assertEqual(str(event_type), 'session start')
def test_class_site_term_string_representation(self):
"""
Testing whether the class site term's fetched description
matches the expected description
"""
class_site_term = (ClassSiteTerm.objects.
filter(class_site=self.class_site, term=self.term))
self.assertEqual(
str(class_site_term[0]),
'Math 101 Lab was held in Fall 2015')
def test_student_advisor_role_string_representation(self):
"""
Testing whether the student advisor role's fetched description
matches the expected description
"""
student = Student.objects.get(id=4)
advisor = Advisor.objects.get(id=1)
role = AdvisorRole.objects.get(id=3)
student_advisor_role = (StudentAdvisorRole.objects.
filter(student=student, advisor=advisor,
role=role))
self.assertEqual(str(student_advisor_role[0]),
'zander advises may as Honors Advisor')
def test_student_cohort_mentor_string_representation(self):
"""
Testing whether the student cohort mentor's fetched description
matches the expected description
"""
student_cohort_mentor = (StudentCohortMentor.objects.
filter(student=self.student,
mentor=self.mentor,
cohort=self.cohort))
self.assertEqual(str(student_cohort_mentor[0]),
'grace is in the Special Probation F14 cohort')
def test_class_site_score_string_representation(self):
"""
Testing whether the class site score's fetched description
matches the expected description
"""
class_site_score = (ClassSiteScore.objects.
filter(class_site=self.class_site))
self.assertEqual(str(class_site_score[0]),
'Math 101 Lab has an average score of 81.9')
def test_student_class_site_score_string_representation(self):
"""
Testing whether the student class site score's fetched description
matches the expected description
"""
student_class_site_score = (StudentClassSiteScore.objects.
filter(student=self.student,
class_site=self.class_site))
self.assertEqual(str(student_class_site_score[0]),
'grace has an average score of 86.3 in Math 101 Lab')
def test_studentclasssiteassignment_string_representation(self):
"""
Testing whether the student class site assignment's fetched description
matches the expected description
"""
student_class_site_assignment = (StudentClassSiteAssignment.objects.
filter(student=self.student,
class_site=self.class_site,
assignment=self.assignment))
self.assertEqual(
str(student_class_site_assignment[0]),
'grace has assignment Quiz extra 1 in Math 101 Lab')
def test_studentclasssiteassignment_due_date(self):
"""
Testing whether the student class site assignment's due date
matches the expected due date
"""
student_class_site_assignment = (StudentClassSiteAssignment.objects.
filter(_due_date=self._due_date))
self.assertEqual(
student_class_site_assignment[0].due_date,
Date.objects.get(date="2015-09-29"))
student_class_site_assignment = (StudentClassSiteAssignment.objects.
filter(_due_date=None))
self.assertEqual(student_class_site_assignment[0].due_date, None)
def test_studentclasssiteassignment_percentage(self):
"""
Testing whether the student's percentage
matches the expected percentage
"""
student_class_site_assignment = (StudentClassSiteAssignment.objects.
filter(student=self.student,
class_site=self.class_site,
assignment=self.assignment))
self.assertEqual(student_class_site_assignment[0].percentage, 82.0)
def test_studentclasssiteassignment_class_percentage(self):
"""
Testing whether the class's percentage
matches the expected percentage
"""
student_class_site_assignment = (StudentClassSiteAssignment.objects.
filter(student=self.student,
class_site=self.class_site,
assignment=self.assignment))
self.assertEqual(
round(student_class_site_assignment[0].class_percentage, 2), 84.76)
def test_studentclasssiteassignment_formatted_grader_comment_with_newlines(self):
"""
Testing whether the formatted_grader_comment method
properly replaces newline literals (with or without \r) with
HTML break tags
"""
student_class_site_assignment = StudentClassSiteAssignment.objects.get(
student=Student.objects.get(id=1),
assignment=Assignment.objects.get(id=15),
class_site=ClassSite.objects.get(id=2)
)
formatted_grader_comment = student_class_site_assignment.formatted_grader_comment
self.assertEqual(formatted_grader_comment.count('<br><br />'), 4)
self.assertTrue(r'\r' not in formatted_grader_comment)
def test_studentclasssiteassignment_formatted_grader_comment_with_curly_braces(self):
"""
Testing whether the formatted_grader_comment method
properly replaces '{' and '}' with '{{' and '}}' in the
input string before passing it to format_html.
"""
# Set up
self.client.login(username='burl', password='<PASSWORD>')
student_class_site_assignment = StudentClassSiteAssignment.objects.get(
student=Student.objects.get(id=3),
assignment=Assignment.objects.get(id=10),
class_site=ClassSite.objects.get(id=2)
)
# Test that '{something}' is present in the method's output.
curly_braces_exp_pattern = re.compile(r"{[^{}]+}")
match = curly_braces_exp_pattern.search(student_class_site_assignment.formatted_grader_comment)
self.assertTrue(match)
# Test that the page with the comment properly loads.
response = self.client.get('/students/shannon/class_sites/2/')
self.assertEqual(response.status_code, 200)
def test_studentclasssiteassignment_relative_to_average(self):
"""
Testing whether the student's relative to average
matches the expected relative to average
"""
student_class_site_assignment = (StudentClassSiteAssignment.objects.
filter(student=self.student,
class_site=self.class_site,
assignment=self.assignment))
self.assertEqual(
student_class_site_assignment[0].relative_to_average, 'near')
def test_studentclasssiteassignment__percentage(self):
obj = StudentClassSiteAssignment()
self.assertEqual(75.0, obj._percentage(3.0, 4.0))
self.assertEqual(0.0, obj._percentage(0, 4.0))
self.assertEqual(None, obj._percentage(10.0, 0))
self.assertEqual(75.0, obj._percentage(3, 4))
self.assertEqual(None, obj._percentage(10, None))
self.assertEqual(None, obj._percentage(None, 10))
self.assertEqual(None, obj._percentage(None, None))
def test_studentclasssitestatus_string_representation(self):
"""
Testing whether the student class site status's fetched
description matches the expected description
"""
student_class_site_status = (StudentClassSiteStatus.
objects.filter(student=self.student,
class_site=self.class_site,
status=self.status))
self.assertEqual(
str(student_class_site_status[0]),
'grace has status Green in Math 101 Lab')
def test_weeklyclasssitescore_string_representation(self):
"""
Testing whether the weekly class site score's fetched description
matches the expected description
"""
weekly_class_site_score = (WeeklyClassSiteScore.objects.
filter(class_site=self.class_site,
week_end_date=self.week_end_date))
self.assertEqual(
str(weekly_class_site_score[0]),
'Average score is 81 in Math 101 Lab on 2015-09-19')
def test_WeeklyClassSiteScore(self):
c = ClassSite.objects.get(pk=1)
w = WeeklyClassSiteScore.objects.filter(
class_site=c)
self.assertEqual('Math 101', str(w[0].class_site))
self.assertEqual('0', str(w[0].score))
self.assertEqual('2015-09-12', str(w[0].week_end_date))
def test_WeeklyClassSiteScore_date_range(self):
c = ClassSite.objects.get(pk=1)
t = Term.objects.get(id=1)
w = WeeklyClassSiteScore.objects.filter(
class_site=c,
week_end_date__gte=t.begin_date,
week_end_date__lte=t.end_date)
self.assertEqual(8, len(w))
def test_weeklystudentclasssiteevent_string_representation(self):
"""
Testing whether the weekly student class site event's fetched
description matches the expected description
"""
weekly_student_class_site_event = (WeeklyStudentClassSiteEvent.
objects.filter(student=self.
student,
class_site=self.
class_site,
week_end_date=self.
week_end_date))
self.assertEqual(
str(weekly_student_class_site_event[0]),
'grace in Math 101 Lab on 2015-09-19 had 3 events (0.68 %ile)')
def test_WeeklyStudentClassSiteEvent(self):
events = WeeklyStudentClassSiteEvent.objects.filter(
student=self.student, class_site=self.class_site)
self.assertEqual(8, len(events))
self.assertEqual('Math 101 Lab', str(events[1].class_site))
self.assertEqual('grace', str(events[1].student))
self.assertEqual(3, int(events[1].event_count))
self.assertEqual('2015-09-19', str(events[1].week_end_date))
def test_weeklystudentclasssitescore_string_representation(self):
"""
Testing whether the weekly student class site score's fetched
description matches the expected description
"""
weekly_student_class_site_score = (WeeklyStudentClassSiteScore.
objects.filter(student=self.
student,
class_site=self.
class_site,
week_end_date=self.
week_end_date))
self.assertEqual(
str(weekly_student_class_site_score[0]),
'grace has score 62 in Math 101 Lab on 2015-09-19')
def test_WeeklyStudentClassSiteScore(self):
w = WeeklyStudentClassSiteScore.objects.filter(
student=self.student, class_site=self.class_site)
self.assertEqual('Math 101 Lab', str(w[0].class_site))
self.assertEqual('grace', str(w[0].student))
self.assertEqual('62', str(w[0].score))
self.assertEqual('2015-09-12', str(w[0].week_end_date))
def test_weeklystudentclasssitestatus_string_representation(self):
"""
Testing whether the weekly student class site status's fetched
description matches the expected description
"""
status = Status.objects.get(id=2)
weekly_student_class_site_status = (WeeklyStudentClassSiteStatus.
objects.filter(student=self.
student,
class_site=self.
class_site,
week_end_date=self.
week_end_date,
status=status))
self.assertEqual(
str(weekly_student_class_site_status[0]),
'grace has status Yellow in Math 101 Lab on 2015-09-19')
def test_WeeklyStudentClassSiteStatus(self):
w = WeeklyStudentClassSiteStatus.objects.filter(
student=self.student, class_site=self.class_site)
self.assertEqual('Math 101 Lab', str(w[0].class_site))
self.assertEqual('grace', str(w[0].student))
self.assertEqual('Not Applicable', str(w[0].status))
self.assertEqual('2015-09-12', str(w[0].week_end_date))
def test_index_view(self):
self.client.login(username='zander', password='<PASSWORD>')
response = self.client.get(reverse('seumich:index'))
self.assertRedirects(response, reverse('seumich:advisor',
kwargs={'advisor': 'zander'}))
def test_advisor_list_view_redirect(self):
url = reverse('seumich:advisors_list')
response = self.client.get(url)
url = "/accounts/login/?next=%s" % url
self.assertRedirects(response, url)
def test_advisor_list_view(self):
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(reverse('seumich:advisors_list'))
self.assertQuerysetEqual(response.context['advisors'],
[
'<Mentor: zander>',
'<Mentor: burl>',
'<Mentor: lavera>',
'<Mentor: smrech>'], ordered=False)
def test_student_list_view_redirect(self):
url = reverse('seumich:students_list')
response = self.client.get(url)
url = "/accounts/login/?next=%s" % url
self.assertRedirects(response, url)
def test_student_list_view(self):
self.client.login(username='burl', password='<PASSWORD>')
url = "%s?search=grace" % reverse('seumich:students_list')
response = self.client.get(url)
self.assertContains(response, 'grace')
url = "%s?search=foxx" % reverse('seumich:students_list')
response = self.client.get(url)
self.assertContains(response, 'desmond')
url = "%s?search=10000023" % reverse('seumich:students_list')
response = self.client.get(url)
self.assertContains(response, 'nocourses')
url = "%s?univ_id=10000001" % reverse('seumich:students_list')
response = self.client.get(url)
self.assertRedirects(response, '/students/grace/')
def test_advisor_view_redirect(self):
url = reverse('seumich:advisor', kwargs={'advisor': 'burl'})
response = self.client.get(url)
url = "/accounts/login/?next=%s" % url
self.assertRedirects(response, url)
def test_advisor_view(self):
url = reverse('seumich:advisor', kwargs={'advisor': 'lavera'})
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(url)
self.assertContains(response, 'gianna')
self.assertContains(response, 'deirdre')
self.assertContains(response, 'gabriela')
self.assertContains(response, 'james')
self.assertNotContains(response, 'grace')
def test_advisor_view_user_without_mentor(self):
# Set up
user_ebenezer = get_user_model().objects.create(
username="ebenezer", first_name="Ebenezer", last_name="Scrooge"
)
user_ebenezer.set_password("<PASSWORD>")
user_ebenezer.save()
# Test
self.client.login(username="ebenezer", password="<PASSWORD>")
url = reverse("seumich:advisor", kwargs={"advisor": "ebenezer"})
response = self.client.get(url)
full_name = response.context["studentListHeader"]
self.assertEqual(full_name, "<NAME>")
self.assertEqual(len(response.context["students"]), 0)
# Clean up
user_ebenezer.delete()
def test_advisor_view_mentor_without_user(self):
# Set up
expected_students = [
{'username': 'james', 'first_name': 'James', 'last_name': 'Bond'},
{'username': 'gianna', 'first_name': 'Gianna', 'last_name': 'Fekete'},
{'username': 'deirdre', 'first_name': 'Deirdre', 'last_name': 'Haupt'},
{'username': 'gabriela', 'first_name': 'Gabriela', 'last_name': 'Rea'}
]
user_lavera = get_user_model().objects.get(username="lavera")
user_lavera.delete()
# Test
self.client.login(username='burl', password='<PASSWORD>')
url = reverse('seumich:advisor', kwargs={'advisor': 'lavera'})
response = self.client.get(url)
students = list(response.context['students'].values('username', 'first_name', 'last_name'))
self.assertCountEqual(expected_students, students)
# Clean up
user_lavera.save()
def test_student_view_redirect(self):
url = reverse('seumich:student', kwargs={'student': 'james'})
response = self.client.get(url)
url = "/accounts/login/?next=%s" % url
self.assertRedirects(response, url)
def test_student_view(self):
url = reverse('seumich:student', kwargs={'student': 'grace'})
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(url)
self.assertQuerysetEqual(response.context['advisors'],
[('<StudentCohortMentor: grace is in the '
'Special Probation F14 cohort>')])
self.assertQuerysetEqual(
list(response.context['student'].class_sites.all()), [
'<ClassSite: Math 101>', '<ClassSite: Math 101 Lab>'])
self.assertContains(response, '83.8')
self.assertContains(response, '86.3')
self.assertContains(response, '88.1')
self.assertContains(response, '81.9')
self.assertNotContains(response, '87.1')
url = reverse('seumich:student', kwargs={'student': 'james'})
response = self.client.get(url)
self.assertContains(response, '150.0')
self.assertContains(response, '100.0')
self.assertContains(response, '95.0')
self.assertContains(response, '80.0')
self.assertContains(response, '81.9')
self.assertContains(response, 'N/A')
def test_student_class_site_view_redirect(self):
url = reverse('seumich:student_class',
kwargs={'student': 'grace', 'classcode': 1})
response = self.client.get(url)
url = "/accounts/login/?next=%s" % url
self.assertRedirects(response, url)
def test_student_class_site_view(self):
url = reverse('seumich:student_class',
kwargs={'student': 'grace', 'classcode': 1})
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(url)
self.assertQuerysetEqual(response.context['advisors'],
[('<StudentCohortMentor: grace is in the '
'Special Probation F14 cohort>')])
self.assertCountEqual(response.context['scoreData'],
[{'key': 'Student', 'values':
[[1, 0], [2, 0], [3, 0], [4, 0], [5, 65],
[6, 68], [7, 68], [8, 68], [9], [10],
[11], [12], [13], [14]], 'color': '#255c91'},
{'key': 'Class', 'values': [[1, 0],
[2, 0], [3, 0], [4, 0], [5, 58],
[6, 58], [7, 58], [8, 57], [9], [10],
[11], [12], [13], [14]], 'color': '#F0D654'}]
)
self.assertQuerysetEqual(response.context['assignments'],
[('<StudentClassSiteAssignment: grace has '
'assignment Assessment in Math 101>'),
('<StudentClassSiteAssignment: grace has '
'assignment Exam 1 in Math 101>')])
url = reverse('seumich:student_class',
kwargs={'student': 'james', 'classcode': 3})
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(url)
self.assertCountEqual(response.context['scoreData'],
[{'key': 'Student', 'values': [],
'color': '#255c91'},
{'key': 'Class', 'values': [],
'color': '#F0D654'}]
)
def test_seumich_data_mixin(self):
seumich_data_mixin = SeumichDataMixin()
collection = StudentAdvisorRole.objects.filter(
student=Student.objects.get(
id=2),
advisor=Advisor.objects.get(id=1),
role=AdvisorRole.objects.get(id=1))
self.assertEqual(
seumich_data_mixin.aggrate_relationships(
collection, 'advisor', 'role'
), [{'roles': [AdvisorRole.objects.get(id=1)],
'advisor': Advisor.objects.get(id=1)}])
def test_cohort_view(self):
url = reverse('seumich:cohort',
kwargs={'code': 'SPPRO-W15'})
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(url)
self.assertContains(response, 'janell')
self.assertContains(response, 'gabriela')
self.assertContains(response, 'wendi')
self.assertContains(response, 'james')
self.assertContains(response, 'mike')
self.assertContains(response, 'geneva')
self.assertContains(response, 'theo')
self.assertContains(response, 'caroyln')
self.assertNotContains(response, 'grace')
self.assertNotContains(response, 'jeana')
def test_class_site_view(self):
url = reverse('seumich:class_site',
kwargs={'class_site_id': 6})
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(url)
self.assertContains(response, 'theo')
self.assertContains(response, 'jeana')
self.assertContains(response, 'deeanna')
self.assertContains(response, 'james')
self.assertContains(response, 'mike')
self.assertNotContains(response, 'grace')
self.assertNotContains(response, 'wendi')
def test_logout(self):
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(reverse('auth_logout'))
self.assertEqual(response.status_code, 302)
def test_about(self):
response = self.client.get('/about')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'student_explorer/about.html')
def test_about_advisor_without_student(self):
"""
Verify renderer_content for the new section is correct
correct path: {% url 'about' %} evaluates to "/about"
"""
self.client.login(username='zander', password='<PASSWORD>')
url = reverse('seumich:advisor', kwargs={'advisor': 'zander'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '\"/about\"')
self.assertNotContains(response, '\"/about/\"')
def test_feedback(self):
self.client.login(username='burl', password='<PASSWORD>')
response = self.client.get(reverse('feedback:feedback'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'feedback/feedback.html')
message = {
'feedback_message': 'Sample Message',
}
response = self.client.post(
reverse('feedback:feedback'),
message,
follow=True
)
self.assertContains(response,
"Thank you for submitting your feedback!")
``` |
{
"source": "JonesRobM/SAPPHIRE",
"score": 2
} |
#### File: Sapphire/CNA/FramePattern.py
```python
import sys
import numpy as np
import os
import time
from CNA import Utilities
sys.path.append("../../")
class patterns():
def __init__(self, frame = 0, System = None, Pattern_Input = None, MasterKey = None):
tick = time.time()
self.frame = frame
self.System = System
self.Pattern_Input = Pattern_Input
if self. System is not None:
self.filename = System['base_dir']+System['movie_file_name']
self.npz_dir = System['base_dir'] + 'CNA_npz'
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\n')
f.write(' # '*50)
f.write('\nComputing CNA patterns for frame %s.\n'%frame)
f.close()
else:
try:
self.filename = 'movie.xyz'
except FileNotFoundError:
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nCould not find a suitable file to examine.\n')
f.close()
self.script_path = os.path.dirname(os.path.realpath(__file__))+'/'
self.cwd = os.getcwd()
if MasterKey is None:
self.MasterKey = Utilities.CNA_Masterkey().Key()
else:
self.MasterKey = MasterKey
if self.Pattern_Input['APPEND_DICTIONARY'] is True:
os.chdir(self.script_path)
os.chdir('../')
self.Temp_Dict = np.load(
'CNA_npz/pattern_dictionary.npz',
allow_pickle=True)
self.Pattern_Dict = {}
for file in self.Temp_Dict.files:
self.Pattern_Dict[file] = self.Temp_Dict[file]
elif (self.Pattern_Input['NEW_DICTIONARY'] is True):
self.Pattern_Dict = {}
self.Pattern_Dict = self.pattern_dictionary_maker()
self.dictionary_saver()
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write("\nGenerating CNA Patterns took %.3f seconds.\n" %(time.time()-tick))
os.chdir(self.cwd)
def run(self):
Info = self.Pattern_Dict[self.System['movie_file_name'][:-4]+'-'+str(self.frame)]
Pats = np.zeros(len(Info))
for i, atom in enumerate(Info):
for j, val in enumerate(atom):
if val:
Pats[i] = j+1
return Pats
def pattern_CNA_Reader(self):
"""
Armand
Formatting from the npz files, gives the cna patterns found and prints them.
This isnt meant to be run normally, add it within the filename loop when you
want to inspect a FEW files. Doesn't use the masterkey, so prepare to have a
LOT of data printed at you at once.
"""
self.CNA_arrays=np.load(self.npz_dir+'/CNA_'+self.filename[:-4]+'-'+str(self.frame)+'.npz', allow_pickle=True)
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nTypes of CNA bonds found with each atom:\n')
f.close()
for i in range(len(self.CNA_arrays['signature_cna_count'])):
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\n%d Atoms had CNA patterns (no: %d)\n'%(self.CNA_arrays['\nSignature_cna_count'][i], i))
f.close()
non_zero_values=np.nonzero(self.CNA_arrays['signature_cna'][i])
for j in range(len(non_zero_values[0])):
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\n%s on %s of its bonds.\n' %(self.MasterKey[non_zero_values[0][j]],
self.CNA_arrays['signature_cna'][i][non_zero_values[0][j]]))
f.close()
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nCoordination number: %s\n'%np.sum(self.CNA_arrays['signature_cna'][i]))
f.close()
def cna_pattern_master_key_maker(self):
"""
Armand
This function creates a new cna pattern masterkey by running through ALL
files within xyz_dir. This is meant for studying all cna patterns with the
variable SAVING_XYZ == True, not for Support Vector Clustering.
"""
self.CNA_arrays=np.load(self.npz_dir+'/CNA_'+self.filename[:-4]+'-'+str(self.frame)+'.npz', allow_pickle=True)
self.cna_patterns=[]
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write("\nCreating the CNA pattern master key...\n")
f.close()
#Looping over the atoms
for i in range(len(self.CNA_arrays['signature_cna_count'])):
#Creating the atomistic pattern list
self.atom_pattern=[]
#Finding the non zero CNA signatures, and looping over them
non_zero_values = np.nonzero(self.CNA_arrays['signature_cna'][i])
for j in range(len(non_zero_values[0])):
#Retrieving the CNA signature from the Master Key
cna_sign = self.MasterKey[non_zero_values[0][j]]
#Counting them
count = self.CNA_arrays['signature_cna'][i][non_zero_values[0][j]]
#Appending the tuples found within the list
self.atom_pattern.append((cna_sign,count))
#Checking if the atomic pattern is in the cna_pattern_masterkey
if self.atom_pattern not in self.cna_patterns:
self.cna_patterns.append(self.atom_pattern)
#Ordering the pattern masterkey by the Coordination Number
self.cna_pattern_array = np.asarray(self.cna_patterns)
self.cna_pattern_master_key=np.copy(self.cna_pattern_array)
a=[]
for i in range(len(self.cna_pattern_array)):
a.append(np.sum(self.cna_pattern_array[i],axis=0)[1])
l=np.asarray(a).argsort()[::-1]
for i in range(len(l)):
self.cna_pattern_master_key[i]=self.cna_pattern_array[l[i]]
#returning the pattern masterkey
return self.cna_pattern_master_key
def pattern_dictionary_maker(self):
"""
Armand
This is where the magic happens. The function first asks for a new MasterKey
or receives one from memory. The function goes over all files within xyz_dir,
and uses the npz files in npz_dir to find all of the atoms whose patterns
are in MasterKey.
"""
#READING THE MASTERKEY FROM PATTERN DICTIONARY NPZ
if (self.Pattern_Input['FROM_MEMORY'] is True):
self.Pattern_Dict['masterkey'] = np.load(
self.System['base_dir']
+ self.System['npz_dir']
+ 'pattern_dictionary.npz',
allow_pickle = True)['masterkey']
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write()('\nKey CNA Patterns found in memory:\n')
f.close()
#CREATING A NEW MASTERKEY
elif (self.Pattern_Input['FROM_MEMORY'] is False):
#USING THE MASTERKEY FOR SUPPORT VECTOR CLUSTERING
if (self.Pattern_Input['BULK_MASTERKEY'] is True):
self.Pattern_Dict = Utilities.Bulk_Masterkey(self.Pattern_Dict).Key()
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nUsing bulk pattern dictionary from Utilities.\n')
#CREATING A NEW MASTERKEY FROM ALL PATTERNS FOUND WITHIN THE THE XYZ_DIR
if(self.Pattern_Input['BULK_MASTERKEY'] is False):
self.Pattern_Dict['masterkey'] = self.cna_pattern_master_key_maker(self.System, self.MasterKey)
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nFound key CNA Patterns:\n')
#printing it
for key in self.Pattern_Dict['masterkey']:
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a+') as f:
f.write('\n')
f.write('\t'.join(str(item) for item in key))
f.write('\n')
f.close()
#Looping over all files again
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nCalculating CNA Patterns of: '+self.System['movie_file_name']+'\n')
f.write('\n Reading CNA arrays from:\n' + self.npz_dir)
f.close()
self.CNA_arrays=np.load(self.npz_dir+'/CNA_'+self.System['movie_file_name'][:-4]+'-'+str(self.frame)+'.npz', allow_pickle=True)
#pattern_CNA_Reader(arrays_filename, MasterKey)
#Loading the CNA arrays
self.Pattern_Dict[self.System['movie_file_name'][:-4]+'-'+str(self.frame)] = np.zeros(
(len(self.CNA_arrays['particle_cnas']),
len(self.Pattern_Dict['masterkey'])),dtype=bool)
#Looping over the atoms
for i in range(len(self.CNA_arrays['particle_cnas'])):
#Creating the atomistic pattern list
self.atom_pattern=[]
#Finding the non zero CNA signatures, and looping over them
self.non_zero_values = np.nonzero(self.CNA_arrays['particle_cnas'][i])
for j in range(len(self.non_zero_values[0])):
#Retrieving the CNA signature from the Master Key
self.cna_sign = self.MasterKey[self.non_zero_values[0][j]]
#Counting them
self.count = self.CNA_arrays['particle_cnas'][i][self.non_zero_values[0][j]]
#Appending the tuples found within the list
self.atom_pattern.append((self.cna_sign, self.count))
#Checking if the atomic pattern is in the cna_pattern_masterkey
if self.atom_pattern in list(self.Pattern_Dict['masterkey']):
k = list(self.Pattern_Dict['masterkey']).index(self.atom_pattern)
self.Pattern_Dict[self.System['movie_file_name'][:-4]+'-'+str(self.frame)][i][k] = True
return self.Pattern_Dict
def dictionary_saver(self):
#Saving the created dictionary in an npz file
self.values_to_save={}
for key in self.Pattern_Dict:
#creating an argument dictionary to input in np.savez
self.values_to_save[key]=self.Pattern_Dict[key]
#saving the npz file
os.chdir(self.script_path)
os.chdir('../')
self.path_to_npz = self.System['base_dir'] + 'CNA_npz/pattern_dictionary.npz'
np.savez(self.path_to_npz, **self.values_to_save)
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write("\nPatterns saved in %s.\n"%('CNA_npz/pattern_dictionary.npz'))
f.close()
def movie_writer(self, Positions = None, Elements = None, Outfile = 'Pattern_Movie.xyz'):
if not os.path.isfile(self.System['base_dir']+Outfile):
with open(self.System['base_dir'] + Outfile, "w+") as moviefile:
moviefile.write(str(len(Elements)) + '\n')
moviefile.write("CNA Patterns \n")
XYZ = np.column_stack((Elements, Positions))
Pats = np.zeros(len(Positions))
for i, atom in enumerate(self.Pattern_Dict[self.System['movie_file_name'][:-4]+'-'+str(self.frame)]):
for j, val in enumerate(atom):
if val:
Pats[i] = j+1
Temp = np.column_stack((XYZ, Pats))
for items in Temp:
moviefile.write(' \t'.join(str(item) for item in items) + '\n')
moviefile.write(str(len(XYZ))+'\n')
moviefile.write('\n')
else:
with open(self.System['base_dir'] + Outfile, "w+") as moviefile:
XYZ = np.column_stack((Elements, Positions))
Pats = np.zeros(len(Positions))
for i, atom in enumerate(self.Pattern_Dict[self.System['movie_file_name'][:-4]+'-'+str(self.frame)]):
for j, val in enumerate(atom):
if val:
Pats[i] = j+1
Temp = np.column_stack((XYZ, Pats))
for items in Temp:
moviefile.write(' \t'.join(str(item) for item in items) + '\n')
moviefile.write(str(len(XYZ))+'\n')
moviefile.write('\n')
```
#### File: Sapphire/CNA/FrameSignature.py
```python
import numpy as np
import os
import networkx as nx
from Sapphire.CNA import Utilities
class CNA(object):
"""
RMJ 10/04/22
Class template structure on calculating common neighbour analysis (CNA)
signatures and patterns with the former of the form (r,s,t), and the latter
[n_i (r_i, s_i, t_i)] over all recognised local atomic signature indices i.
r is the number of nearest neighbours common to both atoms in the pair;
s is the number of bonds between shared neighbours;
t is the longest chain which can be made from bonding s atoms if they are nearest neighbours.;
Parameters
----------
system : Full Sapphire calculation information regarding base directories and file composition.
adj : scipy sparse matrix - returned from Post_Process.Adjacent.ReturnAdj()
the 1st param name adj
Masterkey : tuple - The user may provide their own cna masterkey if they wish to compare
against a theoretical cna signature list
Fingerprint : boolean - Whether or not to compute the cna patterns
Type : boolean - Whether or not to write out the full cna profile to an external file
Returns
-------
numpy array
2 x m matrix where m is the number of unique recognised signatures
Will be of the form (n, (r,s,t)) where n is the number of unique counts
of the signature (r,s,t)
list
N dimensional list where N is the number of atoms considered.
Each list element will be a tuple of the CNA pattern for that given atom
"""
def __init__(self, System = None, Adj = None, Masterkey = None,
Fingerprint = True, Type = False, Frame = 0):
self.System = System
self.Type = Type
self.Frame = Frame
if Adj is not None:
try:
self.adj = Adj.todense()
except Exception as e:
pass
else:
pass
if Fingerprint:
self.Fingerprint = np.zeros(self.adj.shape[0], dtype = object)
self.Keys = np.zeros(self.adj.shape[0], dtype = object)
if Masterkey is None:
self.Masterkey = ((0,0,0),
(1,0,0),
(2,0,0),(2,1,1),
(3,0,0),(3,1,1),(3,2,2),
(4,0,0),(4,1,1),(4,2,1),(4,2,2),(4,3,3),(4,4,4),
(5,2,1),(5,2,2),(5,3,2),(5,3,3),(5,4,4),(5,5,5),
(6,6,6))
else:
self.Masterkey = Masterkey
self.Sigs = {}
for item in self.Masterkey:
self.Sigs[item] = 0
self.Pat_Key = Utilities.Pattern_Key().Key() #Calling dictionary of recognised patterns
self.Keys = list(self.Pat_Key)
self.Max_Label = max(len(str(label)) for label in self.Keys)
def ensure_dir(self, base_dir='', file_path=''):
"""
Robert:
A simple script to verify the existence of a directory
given the path to it. If it does not exist, will create it.
"""
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def Ascii_Bars(self, Finger):
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a', encoding='utf-8') as f:
f.write('\nCNA Pattern distribution for full system at frame %s.\n' %self.Frame)
Temp = np.zeros(len(self.Keys), int)
for atom in Finger:
if str(atom) in self.Keys:
Temp[self.Keys.index(str(atom))] += 1
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a', encoding='utf-8') as f:
for i, count in enumerate(Temp):
bar_chunks, remainder = divmod(int(count * 8 / (len(Finger) / 50)), 8)
# First draw the full width chunks
bar = '█' * bar_chunks
# Then add the fractional part. The Unicode code points for
# block elements are (8/8), (7/8), (6/8), ... , so we need to
# work backwards.
if remainder > 0:
bar += chr(ord('█')+ (8 - remainder))
# If the bar is empty, add a left one-eighth block
bar = bar or '|'
f.write(f'{self.Keys[i].rjust(self.Max_Label)} | {count:#4d} {bar}\n')
def NN(self, atom):
"""
Parameters
----------
atom : integer
the atomic index being considered relative to the ordering or atoms in the frame of the trajectory
Returns
-------
self.neigh : list
indices of all atoms considered to be neighbours of the reference atom.
"""
self.neigh = []
for i, atoms in enumerate(self.adj[:,atom]):
if int(atoms) == 1:
self.neigh.append(i)
return self.neigh
def R(self, atom, friend):
"""
Parameters
----------
atom : integer
the reference atomic index being considered relative to the ordering or atoms in the frame of the trajectory
friend : integer
the neighbour atomic index being considered relative to the ordering or atoms in the frame of the trajectory
Returns
-------
self.bonds : list
indices of all atoms which are mutually bonded to both the atom and its friend
"""
self.bonds = []
for i, x in enumerate(self.adj[:,atom]):
if int(x) == 1:
if self.adj[:,friend][i] == 1:
self.bonds.append(i)
self.r = len(self.bonds)
return self.r
def S(self):
self.s = 0
self.perm = []
for i, b in enumerate(self.bonds):
for j, c in enumerate(self.bonds[i:]):
a = int(self.adj[:,b][c])
if a == 1:
self.s += a
self.perm.append((b,c))
return self.s
def T (self):
self.G = nx.Graph()
for bond in self.bonds:
self.G.add_node(str(bond))
for item in self.perm:
self.G.add_edge(*(str(item[0]), str(item[1])))
chain = []
for n1 in self.bonds:
for n2 in self.bonds:
paths = nx.all_simple_paths(self.G, source=str(n1), target=str(n2))
for path in map(nx.utils.pairwise, paths):
chain.append(len(list(path)))
cycles = [len(x) for x in nx.cycle_basis(self.G)]
try:
chain.append(max(cycles))
except ValueError:
pass
try:
self.t = max(chain)
except ValueError:
self.t = 0
return self.t
def calculate(self):
for i, atom in enumerate(self.adj):
self.particle_cnas = []
self.NN(i)
for neigh in self.neigh:
sig = tuple((self.R(i,neigh), self.S(), self.T()))
try:
self.Sigs[sig]+=1
except KeyError:
self.Sigs[sig] = 1
self.particle_cnas.append(sig)
if self.Fingerprint is not False:
self.Fingerprint[i] = self.fingers()
self.write()
def fingers(self):
Temp = set(self.particle_cnas)
self.Keys.append(Temp)
return tuple((self.particle_cnas.count(x), x) for x in Temp)
def write(self):
if self.Type == 'Full':
from Sapphire.IO import OutputInfoFull as Out # Case 1
#Write object for the CoM
Attributes = getattr(Out, str('cna_sigs')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in np.array(list(self.Sigs.values()), dtype = int)) +'\n')
if self.Fingerprint is not False:
#Write object for the homo CoM distances
Attributes = getattr(Out, str('pattern_indices')) #Loads in the write information for the object
OutFile = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Attributes['Dir'])
self.MakeFile(Attributes)
with open(OutFile, 'a') as outfile:
outfile.write(str(self.Frame) + ' ' + ' '.join(str(item) for item in self.Fingerprint) +'\n')
```
#### File: Sapphire/Emerald/main.py
```python
import math
import time
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
#Input: file xyz
#Output: matrix of i atoms in row with columns of symbol, x, y, z,
#number of atoms and title
class Emerald(object):
"""
Robert:
In principle, this will serve as the class descriptor for the key actions
that Emerald purports to support.
Ideally this shouold be either a stand-alone module or to be called from
the primary Sapphire routine.
Parameters
----------
filename : TYPE
DESCRIPTION.
Returns
-------
coordinates1 : TYPE
DESCRIPTION.
"""
def __init__(self, *kwargs):
return None
def read_file(filename):
"""
Robert:
This simply returns the atoms-like object which may be supported by ase.
I do not believe that it is necessary.
"""
coordinates1 = []
xyz = open(filename)
n_atoms1 = int(xyz.readline())
title1 = xyz.readline()
for line in xyz:
atom, x, y, z = line.split()
coordinates1.append([atom, float(x), float(y), float(z)])
xyz.close()
print("filename: %s" % filename)
print("title: %s" % title1)
print("number of atoms: %d" % n_atoms1)
print("number of coordinates: %d" % len(coordinates1))
return coordinates1, n_atoms1, title1
#Input: matrix of coordinates Symbol, x, y, z
#and the number of atoms (len(coordinates)).
#Output: coordinates of center of mass
def get_coordinatesCM(coordinates, n):
"""
This function assumes that all species are of equivilent mass!
Do not use - core Sapphire does this better.
"""
xcm = 0.
ycm = 0.
zcm = 0.
for i in range(len(coordinates)):
xcm = coordinates[i][1]+xcm
xcm1 = xcm/n
for i in range(len(coordinates)):
ycm = coordinates[i][2]+ycm
ycm1 = ycm/n
for i in range(len(coordinates)):
zcm = coordinates[i][3]+zcm
zcm1 = zcm/n
return xcm1, ycm1, zcm1
#Input: coordinates x, y, z
#Output: matrix of Euclidean distance
def Euc_Dist(coordinates):
"""
Robert:
This function already exists more efficiently in DistFuncs.
"""
Distance = []
for i in range(len(coordinates)-1):
for j in range(i+1, len(coordinates)):
Euc = get_distance(coordinates, i, j)
Distance.append(Euc)
return Distance
#Input: matrix of coordinates: Symbol, x, y, z
#the coordinates of mass center get_coordinatesCM
#Output: matrix of coordinates rescaled: Symbol, x, y, z
def riscale_coordination(coordinates, x, y, z):
"""
Robert:
I do not understand why you would want this quantity.
"""
coordinatesCM = []
for i in range(len(coordinates)):
coordinatesCM.append(
[str(coordinates[i][0]), coordinates[i][1]-x,
coordinates[i][2]-y, coordinates[i][3]-z])
return coordinatesCM
#Input: matrix of coordinates: Symbol, x, y, z
#Output: distance i j
def get_distance(coordinates, i, j):
return math.sqrt(
pow(coordinates[i][1] - coordinates[j][1], 2)
+ pow(coordinates[i][2] - coordinates[j][2], 2)
+ pow(coordinates[i][3] - coordinates[j][3], 2))
#Input: matrix of coordinates (use the riscale_coordination: Symbol, x, y, z)
#Output: radius
def get_radius(coordinates):
radius = []
for i in range(len(coordinates)):
radius.append(
math.sqrt(pow(coordinates[i][1], 2)
+ pow(coordinates[i][2], 2)
+ pow(coordinates[i][3], 2)))
return radius
#Input: coordinates and cutoff
#Output: coordination number of all atoms in coordinates
#and a matrix quali1 which contain the nearest neighbour
#index
def get_coordination_number(coordinates, cutoff1):
count1 = 0
count = []
quale = 0
quali = []
quali1 = []
for i in range(len(coordinates)):
for j in range(len(coordinates)):
if get_distance(coordinates, i, j) <= cutoff1 and \
i != j:
count1 = count1 + 1
quale = j
quali.append(quale)
quali1.append(quali)
quali = []
count.append(count1)
count1 = 0
return count, quali1
#Input: coordinates and cutoff
#Output: general coordination number of all atoms
def get_generalized_CN(coordinates, cutoff1):
count2 = 0
generalcount = []
count, quali = get_coordination_number(coordinates, cutoff1)
#For every i we use the line quali[i]
for i in range(len(coordinates)):
for j in range(len(quali[i])):
count2 = count2 + count[quali[i][j]]
#FCC 12 max CN
generalcount.append(count2 / 12)
count2 = 0
return generalcount
#Input: coordinates and cutoff
#Output: solid angol of all atoms
def get_solid_angol(coordinates, cutoff1):
temp = []
solidangle = []
rho = 0.
temp1 = 0.
count, quali = get_coordination_number(coordinates, cutoff1)
for i in range(len(coordinates)):
for j in range(len(quali[i])):
temp1 = get_distance(coordinates, i, quali[i][j])
temp.append(temp1)
m = len(temp)
for q in range(len(temp)):
rho += temp[q]
rho1 = rho/(m-2)
rho = 0.
solidangle.append(
(pow(rho1, 2)*math.pi)/pow(math.sqrt(
pow(coordinates[i][1], 2)
+ pow(coordinates[i][2], 2)
+ pow(coordinates[i][3], 2)), 2))
return solidangle
#Input: coordinates and cutoff
#Output: the list of the surface atoms and the
#bulk list as: Symbol, x, y, z, count, general count,
#radius and solid angle.
#The output is also the individual
#columns of radius, count and general count
def get_which_surface(coordinates, cutoff1):
bulk = []
surface = []
radiuss = []
countt = []
gcn = []
count, quali = get_coordination_number(coordinates, cutoff1)
radius = get_radius(coordinates)
solidangle = get_solid_angol(coordinates, cutoff1)
generalcount = get_generalized_CN(coordinates, cutoff1)
for i in range(len(coordinates)):
if count[i] <= 10:
surface.append(
[str(coordinates[i][0]), coordinates[i][1],
coordinates[i][2], coordinates[i][3],
count[i], generalcount[i],
radius[i], solidangle[i]])
radiuss.append(radius[i])
countt.append(count[i])
gcn.append(generalcount[i])
else:
bulk.append(
[str(coordinates[i][0]), coordinates[i][1],
coordinates[i][2], coordinates[i][3],
count[i], generalcount[i],
radius[i], solidangle[i]])
return surface, radiuss, countt, bulk, gcn
def get_which_bulk(coordinates, cutoff1):
bulk = []
count, quali = get_coordination_number(coordinates, cutoff1)
radius = get_radius(coordinates)
solidangle = get_solid_angol(coordinates, cutoff1)
generalcount = get_generalized_CN(coordinates, cutoff1)
for i in range(len(coordinates)):
if count[i] > 10:
bulk.append(
[str(coordinates[i][0]), coordinates[i][1],
coordinates[i][2], coordinates[i][3],
count[i], generalcount[i],
radius[i], solidangle[i]])
return bulk
#Input: coordinates, cutoff and atomic radius
#Output: the surface in Å^2
def get_surface(coordinates, cutoff1, rAT):
surface, radius, count, \
bulk, generalcount = get_which_surface(coordinates, cutoff1)
surface1 = 0.
for i in range(len(surface)):
#For FCC max 12
surface1 += (1-(generalcount[i]/12))
return surface1*4*math.pi*pow(rAT, 2)
def get_diff_surface(coordinates, cutoff1, rAT):
surfacemin = 0.
surfacemax = 0.
radiuss = get_radius(coordinates)
surface, radius, generalcount, \
bulk, generalcount = get_which_surface(coordinates, cutoff1)
rmin = min(radius)
rmax = max(radiuss)
surfacemin = 4 * math.pi*pow(rmin+rAT, 2)
surfacemax = 4 * math.pi * pow(rmax+rAT, 2)
return surfacemax, surfacemin
def get_medium_radius_surface(coordinates, cutoff1):
rad1 = 0.
r = []
deltarad1 = 0.
surface, radius, count,\
bulk, generalcount = get_which_surface(coordinates, cutoff1)
rad = 0.
for i in range(len(radius)):
rad += radius[i]
rad1 = rad/len(radius)
for i in range(len(radius)):
r.append(abs(radius[i] - rad1))
deltarad1 = max(r)
return rad1, deltarad1
def get_medium_radius_peeledsurface(coordinates, cutoff1):
rad1 = 0.
r = []
deltarad1 = 0.
surface, radius, count,\
bulk, generalcount = get_which_surface(coordinates, cutoff1)
surfacepeel, radius3, \
generalcount3, bulk4 = get_which_surface_peeling(bulk, cutoff)
rad = 0.
for i in range(len(surfacepeel)):
rad += radius3[i]
rad1 = rad/len(radius3)
for i in range(len(radius3)):
r.append(abs(radius3[i] - rad1))
deltarad1 = max(r)
return rad1, deltarad1
#Input: coordinates and cutoff
#Output: the list of the peeled surface atoms and the
#bulk list as: Symbol, x, y, z, count, general count,
#radius and solid angle.
#The output is also the individual
#columns of radius, count and general count
def get_which_surface_peeling(coordinates, cutoff1):
surface = []
radiuss = []
countt = []
bulk = []
count, quali = get_coordination_number(coordinates, cutoff1)
radius = get_radius(coordinates)
solidangle = get_solid_angol(coordinates, cutoff1)
generalcount = get_generalized_CN(coordinates, cutoff1)
for i in range(len(coordinates)):
if count[i] <= 10:
surface.append(
[str(coordinates[i][0]), coordinates[i][1],
coordinates[i][2], coordinates[i][3],
count[i], generalcount[i],
radius[i], solidangle[i]])
radiuss.append(radius[i])
countt.append(count[i])
else:
bulk.append(
[str(coordinates[i][0]), coordinates[i][1],
coordinates[i][2], coordinates[i][3],
count[i], generalcount[i],
radius[i], solidangle[i]])
return surface, radiuss, countt, bulk
#Input: coordinates, atomic radius of the element
# and the cutoff
#Output: the total volume of the cluster
def get_volume(coordinates, rAT, cutoff1):
r2 = []
r20 = 0.
volumemin = 0.
volumemancante = 0.
surface, radius, count,\
bulk, generalcount = get_which_surface(coordinates, cutoff1)
rmin = min(radius) - rAT
volumemin = (4/3)*math.pi*pow(rmin, 3)
volume = 0.
volumemancante = (2/3)*math.pi*pow(rAT, 3)*len(radius)
for i in range(len(radius)):
r20 = rAT-sqrt(pow(pow(pow(radius[i], 2) +
pow(rAT, 2), 0.5) - rmin, 2) -
pow(radius[i] - rmin, 2))
r2.append(r20)
for j in range(len(r2)):
volume += (1/3)*math.pi*(pow(rAT, 2) +
rAT*r2[j] + pow(r2[j], 2))*\
(radius[j] - rmin)
return volume + volumemin + volumemancante
#Input: the coordinates and the Wiener radius
#Output: the volume 4/3*pi*r^3
def get_volume_WS(coordinates, rWS1):
N = 0.
volume = 0.
N = len(coordinates)
volume = (4/3)*math.pi*N*pow(rWS1, 3)
return volume
#Input: coordinates and the radius
#Output: the count of atoms i found in the
#shell r-0.2, r+0.2
def get_RDF(coordinates, r):
count = 0.
k = []
radius = get_radius(coordinates)
for i in range(len(coordinates)):
if i not in k:
if radius[i]-0.2 < r and radius[i]+0.2 > r:
count += 1
k.append(i)
return count
#From <NAME> King's College London (UK)
#Input: coordinates and the bins, is defined out
#of the function so it will be the same for each
#cases of surface, peeled surface and core
#Output: the x and y for the plot
def get_PDDF(coordinates, bins):
distances = Euc_Dist(coordinates)
a, b = np.histogram(distances, bins)
bin_width = b[1] - b[0]
bin_cents = [b[i] + bin_width for i in range(len(b)-1)]
return bin_cents, a
#Input: coordinates and bins
#Output: array of radius and the count for
#any radius for the histogram
def get_histo(coordinates, bins):
lunghezza = []
radius2primo = []
count5 = 0.
radius = get_radius(coordinates)
rmin = min(radius)
rmax = max(radius)
Nint = int((rmax - rmin)/bins)
rmin2 = round(rmin, 1)
for j in range(0, Nint+1):
for i in range(len(radius)):
if rmin + bins * j <= \
radius[i] and radius[i] <= \
rmin + bins * j + bins:
count5 += 1
lunghezza.append(count5)
count5 = 0.
radius2primo.append(rmin2 + (bins/2)*(2*j+1))
return radius2primo, lunghezza
def number_count(coordinates, cutoff1):
d = []
count = 0.
count2 = []
surface, radius, count1, \
bulk, generalcount = get_which_surface(coordinates, cutoff1)
for element in count1:
if element not in d:
d.append(element)
for i in range(len(d)):
for j in range(len(surface)):
if d[i] == count1[j]:
count += 1
count2.append(count)
count = 0.
return d, count2
def number_count_total(coordinates, cutoff1):
d = []
count = 0
count2 = []
count1, quali = get_coordination_number(coordinates, cutoff1)
for element in count1:
if element not in d:
d.append(element)
for i in range(len(d)):
for j in range(len(coordinates)):
if d[i] == count1[j]:
count += 1
count2.append(count)
count = 0.
return d, count2
def number_generalcount(coordinates, cutoff1):
d = []
count = 0.
count2 = []
surface, radius, count1, bulk, generalcount = get_which_surface(coordinates, cutoff1)
for element in generalcount:
if element not in d:
d.append(element)
for i in range(len(d)):
for j in range(len(surface)):
if d[i] == generalcount[j]:
count += 1
count2.append(count)
count = 0.
return d, count2
def colored_GCN(coordinates, cutoff1):
surface, radius, count1, \
bulk, generalcount = get_which_surface(coordinates, cutoff1)
d = []
c = []
total = []
for element in generalcount:
if element not in d:
d.append(element)
d.sort()
n = 0
for i in range(len(d)):
c.append(str(n))
n += 1
for i in range(len(surface)):
for j in range(len(d)):
if generalcount[i] == d[j]:
total.append([c[j], surface[i][1], surface[i][2], surface[i][3]])
return total, d
def colored_GCN2(coordinates, cutoff1):
surface, radius, count1, \
bulk, generalcount = get_which_surface(coordinates, cutoff1)
generalcount2 = get_generalized_CN(surface, cutoff1)
d = []
c = []
total = []
for element in generalcount2:
if element not in d:
d.append(element)
n = 0
for i in range(len(d)):
c.append(str(n))
n += 1
for i in range(len(surface)):
for j in range(len(d)):
if generalcount2[i] == d[j]:
total.append([c[j], surface[i][1], surface[i][2], surface[i][3]])
return total
def get_plane_colored_GCN(coordinates, cutoff1):
surface, radius, count1, \
bulk, generalcount = get_which_surface(coordinates, cutoff1)
uno = []
a = []
b = []
c = []
d = []
a1 = []
b1 = []
c1 = []
d1 = []
matrixplane = []
matrixplane1 = []
dd = []
cc = []
total = []
j = 0
m = 0
k = 0
n = 0
for element in generalcount:
if element not in dd:
dd.append(element)
dd.sort()
for i in range(len(surface)):
if generalcount[i] == dd[0]:
uno.append([surface[i][0], surface[i][1], surface[i][2], surface[i][3]])
if generalcount[i] == dd[1]:
uno.append([surface[i][0], surface[i][1], surface[i][2], surface[i][3]])
for i in range(len(uno)-2):
for j in range(i+1, len(uno)-1):
for m in range(j+1, len(uno)):
if i != j and j != m:
a.append(round((uno[j][2] - uno[i][2]) *
(uno[m][3] - uno[i][3]) -
(uno[j][3] - uno[i][3]) *
(uno[m][2] - uno[i][2]), 2))
b.append(round((uno[j][3] - uno[i][3]) *
(uno[m][1] - uno[i][1]) -
(uno[j][1] - uno[i][1]) *
(uno[m][3] - uno[i][3]), 2))
c.append(round((uno[j][1] - uno[i][1]) *
(uno[m][2] - uno[i][2]) -
(uno[j][2] - uno[i][2]) *
(uno[m][1] - uno[i][1]), 2))
d.append(round(-a[i] * uno[i][1] - b[i] *
uno[i][2] - c[i] * uno[i][3], 2))
for i in range(len(a)):
if a[i] != 0. and b[i] != 0. and c[i] != 0. and d[i] != 0.:
matrixplane.append([a[i], b[i], c[i], d[i]])
return matrixplane, uno, dd
def get_GCN_atom_plane1(coordinates, cutoff1, rAT):
matrixplane, uno, dd = get_plane_colored_GCN(coordinates, cutoff1)
surface, radius, count1,\
bulk, generalcount = get_which_surface(coordinates, cutoff1)
numero = []
quali = []
quali1 = []
quale = 0
d = 0.
n = 0
for i in range(len(matrixplane)):
for j in range(len(surface)):
d = abs(matrixplane[i][0] * round(surface[j][1], 8) +
matrixplane[i][1] * surface[j][2] +
matrixplane[i][2] * surface[j][3] +
matrixplane[i][3]) / (pow((pow(matrixplane[i][0], 2) +
pow(matrixplane[i][1], 2) +
pow(matrixplane[i][2], 2)), 0.5))
if d < 0.3*rAT:
n += 1
quale = j
quali.append(quale)
quali1.append(quali)
quali = []
numero.append(n)
n = 0
return numero, quali1
def get_thickness(coordinates, cutoff1):
r1 = 0.
r2 = 0.
surface, radius, count1, \
bulk, generalcount = get_which_surface(coordinates, cutoff1)
surfacepeel, radius3, \
generalcount3, bulk4 = get_which_surface_peeling(bulk, cutoff)
r1 = min(radius) - min(radius3)
r2 = max(radius) - max(radius3)
return r1, r2
#Input: coordinates and cutoff
#Output: the faceting ratio
def get_FEratio(coordinates, cutoff1):
ratio1 = 0.
surface, radius, count1, \
bulk, generalcount = get_which_surface(coordinates, cutoff1)
surfacepeel, radius3, \
generalcount3, bulk4 = get_which_surface_peeling(bulk, cutoff)
ratio1 = (len(surface)/len(coordinates))*100
return ratio1
```
#### File: Sapphire/Graphing/Plot_Funcs.py
```python
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
import multiprocessing as mp
from functools import partial
import os
import linecache
import sys
import traceback
from inspect import getmembers, isfunction
import inspect
plt.rcParams.update({'figure.max_open_warning': 0})
def distance(a, b):
dx = abs(a[0] - b[0])
dy = abs(a[1] - b[1])
dz = abs(a[2] - b[2])
return np.sqrt(dx**2 + dy**2 + dz**2)
def Gauss(Data,Band, mon=False, Space = None):
if Space is None:
Space = np.linspace(2, 8, 100)
Data = [elem for elem in Data if 1 < elem < 9]
A=[]; Data=np.asarray(Data)
if len(Data) > 10:
for i in range(len(Data)):
A.append(norm.pdf(Space, Data[i],Band))
Density = np.asarray(np.sum(A, axis=0))
Density = Density/np.trapz(Density, Space) #For normalisation purposes
if mon == False:
Min = (np.diff(np.sign(np.diff(Density))) > 0).nonzero()[0] + 1 # local min
R_Cut = Space[Min][np.where(Space[Min]>3)][0]
return Space, Density, R_Cut
elif mon == True:
return Space, Density
else:
return None
class Plot_Funcs():
def __init__(self, MetaData = None, Errors = None, Quantities = None, System = None):
if System == None:
self.System = None
self.Base = ''
self.Images = ''
self.single_file = True
else:
self.System = System
try:
self.Base = System['base_dir']
except KeyError:
self.Base = ''
try:
self.Images = System['plot_dir']
self.ensure_dir(self.Base + self.Images)
except KeyError:
self.Images = ''
if MetaData is None:
sys.exit("\nNo metadata provided for analysis.\nNow exiting.\n")
else:
self.Meta = MetaData
if Errors is None:
self.Errors = False
with open(self.Base+'Plotting_Info.txt', "a+") as f:
f.write("\nNo errors have been provided.\nHence, no errors will be plotted.\n")
else:
self.Err = Errors
self.Errors = True
if Quantities is None:
sys.exit("\nNo quantities requested.\nNow exiting.\n")
else:
self.Quantities = Quantities
self.functions_list = [o for o in getmembers(Plot_Funcs) if isfunction(o[1])]
self.Functions = {}
"""This provides a dictionary with the function names as keys and the
function itself, plus arguments following.
The reason for the arguments is so that user defined input arguments
may be identified and fed in correctly."""
for x in self.functions_list:
self.Functions[x[0]] = inspect.getfullargspec(x[1])[0][1:]
self.Q_Keys = self.Quantities.keys()
self.Meta_Keys = self.Meta.keys()
self.Plot_Dict = {}
for obj in self.Q_Keys:
for item in self.functions_list:
if obj.lower() in item[0].lower():
self.Plot_Dict[item[0]] = [item[1]]
def ensure_dir(self, file_path=''):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def Make_Plots(self):
"""
Robert:
This is the function that calls all of the desired functions for
creating plots.
The list of function names and arguments are already pre-defined and
so this function simply parses through the user input.
Still need to make a robust sanitation of user input but that may come later.
Returns
-------
None.
"""
for x in self.Q_Keys:
if x in self.Functions:
ArgsList = []
for y in self.Functions[x]:
try:
ArgsList.append(self.Quantities[x][y])
except KeyError:
ArgsList.append(
inspect.getargspec(self.Plot_Dict[x][0])[-1][self.Functions[x].index(y)]
)
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nThe arguments for function %s are %s.\n"%(x,ArgsList))
getattr(self, x)(*ArgsList)
def Collect_CNA(self, Sig):
try:
Index = self.Meta["masterkey"].index( Sig )
return [ self.Meta['cna_sigs'][x][Index] for x in range(len(self.Meta['cna_sigs'])) ]
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nData not found in metadata\n")
return None
def Collect_CNA_error(self, Sig):
try:
Index = self.Meta["masterkey"].index( Sig )
return [ self.Err['cna_sigs'][x][Index] for x in range(len(self.Err['cna_sigs'])) ]
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nData not found in metadata\n")
return None
def autolabel(self, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', fontsize = 18)
def agcn_heat(self, Name = 'agcn_Heat.png'):
Bins = np.linspace(3,12,41)
Heat = []
try:
for frame in range( len(self.Meta['agcn']) ):
a,b = np.histogram( self.Meta['agcn'][frame], bins = Bins )
Heat.append(a)
YTicks = np.array( [ "{:.1f}".format(x) for x in np.linspace(3,12,20) ] )
try:
XTicks = np.array( [ "{:.0f}".format(t) for t in np.linspace( self.Meta['SimTime'][0], self.Meta['SimTime'][-1] ,25) ], dtype = int )
except KeyError:
XTicks = np.array( [ "{:.0f}".format(t) for t in np.linspace( self.Meta['Start'], self.Meta['End'] ,25) ], dtype = int )
Heat = ( np.asanyarray(Heat) ).transpose()
ax = sns.heatmap(Heat, cmap = 'hot')
ax.set_xlabel("Frame", fontsize = 14)
ax.set_ylabel("AGCN", fontsize =14)
ax.set_xticklabels(XTicks)
ax.set_yticklabels(YTicks)
plt.savefig(self.Base+self.Images+'/'+Name, dpi = 100, bbox_inches='tight')
plt.close()
except KeyError:
print("\nThis quantity does not exist in the metadata.\n")
return None
def prdf_plot(self, Names = None, Frames = [], He = False, Ho = None, Errors = False):
Frames = list(Frames)
if self.Errors is True:
Errors = True
"""
Name: str 'pdf' 'rdf'
Frames: list frames to be reviewed
He: bool Whether to look for hetero quantities default is False
Homo: List of atomic species to be considered as homo pairs only - default is empty list
Parameters
----------
Name : TYPE
DESCRIPTION.
Frames : TYPE, optional
DESCRIPTION. The default is [].
He : TYPE, optional
DESCRIPTION. The default is None.
Homo : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
None.
"""
for Name in Names:
for frame in Frames:
fig, ax = plt.subplots()
fig.set_size_inches((9,3))
try:
ax.plot(self.Meta[Name][Frames.index(frame)][0], self.Meta[Name][Frames.index(frame)][1],
color='k', linestyle = 'solid', linewidth = 4, label = "Full system")
if Errors is True:
ax.fill_between(self.Meta[Name][Frames.index(frame)][0],
self.Meta[Name][Frames.index(frame)][1] + self.Err[Name][Frames.index(frame)][1],
self.Meta[Name][Frames.index(frame)][1] - self.Err[Name][Frames.index(frame)][1],
color='k', alpha = 0.25)
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=2, mode="expand", borderaxespad=0. ,fontsize = 14)
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\n%s was not found in the provided metadata.\n"%Name)
if He is False:
pass
else:
try:
ax.plot(self.Meta['He'+Name.upper()][Frames.index(frame)][0], self.Meta['He'+Name.upper()][Frames.index(frame)][1],
color='r', linestyle = 'dashed', linewidth = 4, label = "Pair different only")
if Errors is True:
ax.fill_between(self.Meta['He'+Name.upper()][Frames.index(frame)][0],
self.Meta['He'+Name.upper()][Frames.index(frame)][1] + self.Err['He'+Name.upper()][Frames.index(frame)][1],
self.Meta['He'+Name.upper()][Frames.index(frame)][1] - self.Err['He'+Name.upper()][Frames.index(frame)][1],
color='r', alpha = 0.25)
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\n%s was not found in the metadata.\n"%Name)
if Ho is None:
pass
elif type(Ho) is list:
for ele in Ho:
ax.plot(self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][0],
self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][1],
linestyle = 'dashdot', linewidth = 4,
label ="%s only"%ele)
if Errors is True:
ax.fill_between(self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][0],
self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][1] + self.Err['Ho'+Name.upper()+ele][Frames.index(frame)][1],
self.Meta['Ho'+Name.upper()+ele][Frames.index(frame)][1] - self.Err['Ho'+Name.upper()+ele][Frames.index(frame)][1],
alpha = 0.25)
else:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nError in input arguments for the prdf_plot.\n")
ax.tick_params(axis = 'both', which = 'major', labelsize = 12)
ax.set_xlabel(r"Distance (Angstrom)", fontsize = 12)
ax.set_ylabel(Name, fontsize = 12)
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=2, mode="expand", borderaxespad=0. ,fontsize = 14)
try:
FrameTemp = "{:.1f}".format(self.Meta['Temp'][int(frame)])
ax.text(1.4*np.amin(self.Meta[Name][Frames.index(frame)][0]), 0.9*np.amax(self.Meta[Name][Frames.index(frame)][1]),
"Time: %sps\nTemp: %sK" %(self.Meta['SimTime'][int(frame)],
FrameTemp), fontsize=13)
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\n%s threw an error when using the prdf_plot function.\n"%frame)
if He is False:
if Ho is None:
plt.savefig(self.Base + self.Images + '/' + Name.upper() + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
elif type(Ho) is list:
plt.savefig(self.Base + self.Images + '/' + Name.upper() + '_Ho_' + ''.join(map(str, Ho)) +'_' + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
else:
plt.savefig(self.Base + self.Images + '/' + Name.upper() + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
else:
if Ho is None:
plt.savefig(self.Base + self.Images + '/' + Name.upper() +'_He_' + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
elif type(Ho) is list:
plt.savefig(self.Base + self.Images + '/' + Name.upper() +'_He_' + '_Ho_' + ''.join(map(str, Ho)) +'_' + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
else:
plt.savefig(self.Base + self.Images + '/' + Name.upper() +'_He_' + str(frame)+'.png',
dpi = 100, bbox_inches='tight')
plt.close()
def plot_stats(self, Stats = [], Species = None, Quants = [], Temp = False, Errors = False, Frames = None):
if self.Errors is True:
Errors = True
if Frames is None:
try:
TimeAxis = range(int(self.Meta['Start']),
int(self.Meta['SimTime'][-1]),
int(int(self.Meta['Skip']) * int(self.Meta['SimTime'][-1]) / int(self.Meta['End'])))
except KeyError:
TimeAxis = range(int(self.Meta['Start']),
int(self.Meta['End']),
int(self.Meta['Step']))
else:
TimeAxis = Frames
for Stat in Stats:
fig,ax = plt.subplots()
fig.set_size_inches((9,3))
for Quant in Quants:
try:
ax.plot(TimeAxis,
self.Meta[Stat+Quant.lower()],
label = Quant.lower())
if Errors is True:
ax.fill_between(TimeAxis,
self.Meta[Stat+Quant.lower()] - self.Err[Stat+Quant.lower()],
self.Meta[Stat+Quant.lower()] + self.Err[Stat+Quant.lower()],
alpha = 0.25)
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nNo %s found in metadata.\n"%(Stat+Quant.lower()))
try:
ax2=ax.twinx()
ax2.scatter(TimeAxis,
self.Meta['R_Cut'],
linewidths=4, label = 'R_Cut', color='g')
if Species is not None:
for x in Species:
ax2.scatter(TimeAxis,
self.Meta['Cut' + x],
linewidths=4, label = 'R_Cut' + x)
if Errors is True:
ax2.errorbar(TimeAxis, self.Meta['R_Cut'],
self.Err['R_Cut'], color='g',
capsize = 5, capthick = 3)
if Species is not None:
for x in Species:
ax2.errorbar(TimeAxis,self.Meta['Cut' + x],
self.Err['Cut' + x],
capsize = 5, capthick = 3)
except KeyError:
pass
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
ax.set_xlabel('Time (ps)')
ax.set_ylabel(Stat.upper())
if Temp is True:
ax3 = ax.twiny()
ax1Ticks = ax.get_xticks()
ax3Ticks = ax1Ticks
ax3.set_xticks(ax2Ticks)
ax3.set_xbound(ax.get_xbound())
ax3.set_xticklabels(tick_function(ax2Ticks))
ax3.set_xlabel('Temperature (K)')
plt.savefig(self.Base + self.Images + '/' + str(Stat) + '.png' , dpi = 100, bbox_inches='tight')
plt.close()
def tick_function(self, X):
try:
inc = (max(self.Meta['Temp']) - min(self.Meta['Temp']))/( 10*len(self.Meta['Temp']) )
V = min(self.Meta['Temp']) + X*inc
return ["%.3f" % z for z in V]
except KeyError:
return None
def com_plot_bi(self, Dists = None, Species = None, Frames = [0], Errors = False):
if self.Errors is True:
Errors = True
if Dists is None:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nNo distributions requested.\n")
return None
elif type(Dists) is list:
for Dist in Dists:
if Dist is "MidCoMDist":
D = "Cluster Centre"
elif Dist is "CoMDist":
D = "Sub-cluster Centre"
else:
raise KeyError("Invalid distribution.\n")
if Species is None:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nNo chemical species requested.\n")
elif type(Species) is list:
for Specie in Species:
for frame in Frames:
try:
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(self.Meta['CoMSpace'], self.Meta[Dist + Specie][frame], color= 'k', linewidth = 4)
if Errors is True:
ax.fill_between(self.Meta['CoMSpace'],
self.Meta[Dist + Specie][frame] + self.Err[Dist + Specie][frame],
self.Meta[Dist + Specie][frame] - self.Err[Dist + Specie][frame],
color = 'k', alpha = 0.25)
ax.set_xlabel('Distance (Angstrom)')
ax.set_ylabel('Probability')
try:
ax.text(self.Meta['CoMSpace'][5], 0.65*max(self.Meta[Dist + Specie][frame]), "%s to %s\nTime: %sps\nTemp: %sK"
%(Specie, D, self.Meta['SimTime'][frame], "{:.1f}".format(self.Meta['Temp'][frame])))
except KeyError:
pass
plt.savefig(self.Base + self.Images + '/' + Dist+Specie+str(frame) + '.png',
dpi = 100, bbox_inches='tight')
plt.close()
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nThere was an error trying to plot %s.\n" %(Dist+Specie))
pass
def cna_plot(self, Name = 'CNA_Time', Frames = [], Errors = False):
if self.Errors is True:
Errors = True
for Frame in Frames:
try:
X_CNA = [ str(a) for a in self.Meta['masterkey'] ] # Create a set of ticks for the x-axis
fig = plt.figure(figsize = (9,3) )
if Errors is True:
ax = plt.bar( X_CNA, self.Meta['cna_sigs'][Frame], yerr = self.Err['cna_sigs'][Frame], tick_label = X_CNA )
else:
ax = plt.bar( X_CNA, self.Meta['cna_sigs'][Frame], tick_label = X_CNA)
plt.xlabel("CNA Signature", fontsize = 14)
plt.ylabel("Probability", fontsize = 14)
plt.xticks(rotation=90,fontsize = 14)
try:
plt.text( X_CNA[-7], 0.8*np.amax(self.Meta['cna_sigs'][Frame]),
'Time: %sps\nTemp: %sK' %(self.Meta["SimTime"][Frame],
"{:.1f}".format(self.Meta['Temp'][Frame])), fontsize = 14 )
except KeyError:
pass
plt.savefig(self.Base+self.Images+'/'+Name+str(Frame)+'.png', dpi = 100, bbox_inches = 'tight')
plt.close()
except KeyError:
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write("\nThis quantitiy, cna, does not exist in the metadata.\n")
return None
def agcn_histo(self, Frames = [], Errors = False):
for Frame in Frames:
fig, ax = plt.subplots()
fig.set_size_inches(9,3)
y,binEdges = np.histogram(self.Meta['agcn'][Frame], bins = 40)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
ax.bar(bincenters, y, color='r')
try:
ax.text(bincenters[4], 0.7*np.amax(y), "Time : %sps\nTemp : %sK"%(self.Meta['SimTime'][Frame], "{:.1f}".format(self.Meta['Temp'][Frame])) )
plt.savefig(self.Base + self.Images + '/'+ 'AGCNDist'+str(self.Meta['SimTime'][Frame])+'.png', dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'AGCNDist.png', dpi = 100, bbox_inches='tight')
plt.close()
def com_full_plot(self, Frames = [], Errors = False):
if self.Errors is True:
Errors = True
for Frame in Frames:
fig, ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(self.Meta['CoMSpace'], self.Meta['CoMDist'][Frame], color='k')
if Errors is True:
ax.fill_between(self.Meta['CoMSpace'] ,
self.Meta['CoMDist'][Frame] + self.Err['CoMDist'][Frame],
self.Meta['CoMDist'][Frame] - self.Err['CoMDist'][Frame],
color='k', alpha = 0.25)
ax.set_xlabel('Distance (Angstrom)')
ax.set_ylabel('RDF')
try:
ax.text(self.Meta['CoMSpace'][5], 0.65*max(self.Meta['CoMDist'][Frame]), "Full System\nTime: %sps\nTemp: %sK" %(self.Meta['SimTime'][Frame], "{:.1f}".format(self.Meta['Temp'][Frame])))
plt.savefig(self.Base + self.Images + '/'+ 'FullCoM'+str(self.Meta['SimTime'][Frame])+'.png',
dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'FullCoM.png', dpi = 100, bbox_inches='tight')
plt.close()
def Mass(self,r):
return ((4/3) * np.pi * r**3 )
def cum_com(self, Frames):
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
for Frame in Frames:
Int = [ np.trapz(self.Meta['CoMDist'][Frame][:x], self.Meta['CoMSpace'][:x]) for x in range(100) ]
try:
ax.plot(self.Meta['CoMSpace'], Int, label = '%sps' %(self.Meta['SimTime'][Frame]))
except KeyError:
ax.plot(self.Meta['CoMSpace'], Int, label = str(Frame))
ax.plot(self.Meta['CoMSpace'], self.Mass(self.Meta['CoMSpace'])/max(self.Mass(self.Meta['CoMSpace'])), label = 'Spherical mass distribution', linestyle = 'dashed')
ax.set_xlabel('Distance from centre (Angstrom)')
ax.set_ylabel('M(r) / M(R)')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
try:
plt.savefig(self.Base + self.Images + '/'+ 'Cum_CoM'+str(self.Meta['SimTime'][Frame])+'.png',
dpi = 100, bbox_inches='tight')
except KeyError:
plt.savefig(self.Base + self.Images + '/'+ 'Cum_CoM.png',
dpi = 100, bbox_inches='tight')
plt.close()
def cna_traj(self, Sigs = [], Errors = False):
if self.Errors is True:
Errors = True
try:
Time = self.Meta['SimTime']
except KeyError:
Time = range(len(self.Meta['cna_sigs']))
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
for x in Sigs:
try:
ax.plot(Time, self.Collect_CNA(x), label = x)
if Errors is True:
ax.fill_between(Time,
np.asarray(self.Collect_CNA(x)) + np.asarray(self.Collect_CNA_error(x)),
np.asarray(self.Collect_CNA(x)) - np.asarray(self.Collect_CNA_error(x)),
alpha = 0.25)
except ValueError:
print(x, type(x))
with open(self.Base+'Plotting_Info.txt', "a") as f:
f.write(f"\nSignature, '{0}', not in metadata.\n".format(x))
ax.set_xlabel('Time (ps)')
ax.set_ylabel('Probability')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(self.Base + self.Images + '/'+ 'CNA_Traj'+'.png',
dpi = 100, bbox_inches='tight')
plt.close()
def h_c(self, Errors = False):
if self.Errors is True:
Errors = True
Time = self.Meta['SimTime']
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(Time, self.Meta['h'], label = 'Collectivity')
ax.plot(Time, self.Meta['c'], label = 'Concertedness')
if Errors is True:
ax.fill_between(Time[1:],
self.Meta['h']+self.Err['h'],
self.Meta['h']-self.Err['h'],
alpha = 0.25)
ax.fill_between(Time[2:-1],
self.Meta['c']+self.Err['c'],
self.Meta['c']-self.Err['c'],
alpha = 0.25)
ax.set_xlabel('Time (ps)')
ax.set_ylabel(' H / C')
fig.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(self.Base + self.Images + '/'+ 'HC_Stats'+'.png',
dpi = 100, bbox_inches='tight')
plt.close()
def pair_plot(Data, System):
try:
HeAdj = Data['HeAdj']
NewHe = []
except KeyError:
sys.exit()
for x in range(len(HeAdj)):
try:
NewHe.append(sum(HeAdj[x][1]))
except TypeError:
pass
fig,ax = plt.subplots()
fig.set_size_inches(9,3)
ax.plot(Data['SimTime'], [sum(Data['HoAdjPt'][x]) for x in range(len(Data['HoAdjPt']))], 'orange', label='Pt only')
ax2 = ax.twinx()
ax2.plot(Data['SimTime'], [sum(Data['HoAdjAu'][x]) for x in range(len(Data['HoAdjAu']))] , 'blue', label = 'Au only')
ax3 = ax.twiny()
ax3.plot(NewHe, label = 'Hetero pairs only', color='red')
ax2.axes.yaxis.set_visible(False)
ax3.axes.xaxis.set_visible(False)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
ax.set_xlabel('Time (ps)')
ax.set_ylabel('Number of pairs')
fig.legend(bbox_to_anchor=(0, 1.0, 1., 0), loc='lower left',
ncol=3, mode="expand", borderaxespad=0. ,fontsize = 12)
plt.savefig(System['base_dir']+System['plot_dir'] + '/Pairs.png', dpi = 100, bbox_inches='tight')
def All_CNA_Traj(System, Pipeline, outfile):
CNA = []
for x in System['iter_dir']:
for y in [(4,2,2), (4,2,1), (3,1,1)]:
Index = Pipeline.BigMeta[x]['cna'][0][0].index(y)
Temp = [ Pipeline.BigMeta[x]['cna'][i][1][Index] for i in range(len(Pipeline.BigMeta[x]['cna'])) ]
CNA.append(Temp)
x = Pipeline.BigMeta[System['iter_dir'][0]]['SimTime']
fig, axs = plt.subplots(2, 2, sharex='col', sharey='row')
fig.set_size_inches(9,3)
(ax1, ax2), (ax3, ax4) = axs
ax1.plot(x, CNA[0], label = '(4 2 2)')
ax1.plot(x, CNA[1], label = '(4 2 1)')
ax1.plot(x, CNA[2], label = '(3 1 1)')
ax2.plot(x, CNA[3])
ax2.plot(x, CNA[4])
ax2.plot(x, CNA[5])
ax3.plot(x, CNA[6])
ax3.plot(x, CNA[7])
ax3.plot(x, CNA[8])
ax4.plot(x, CNA[9])
ax4.plot(x, CNA[10])
ax4.plot(x, CNA[11])
for ax in axs.flat:
ax.label_outer()
ax.set_ylim(0, 0.7)
fig.legend( loc='upper center', ncol=3, fontsize = 10)
plt.savefig(outfile, dpi = 100, bbox_inches='tight')
"""
##########################################################################
The following are old functions with little utility but may be
reintroduced if there is demand for such things.
def AGCN_Excess():
Excess = []
for i in range( len( AverageMeta['agcn'] ) ):
Temp = [ a>12 for a in AverageMeta['agcn'][i] ]
Excess.append(np.sum(Temp))
return Excess
def Strange_CNA():
Indices = [ 14, 15, 24, 25, 38 ] #37 and on to the end are all odd
CNA = AverageMeta['cna'] # All of the heights
Strange_Dict = {}
for Index in Indices:
Strange_Dict[AverageMeta['masterkey'][Index]] = np.zeros((len(CNA)), dtype = np.float64)
for Key in AverageMeta['masterkey'][Indices[-1]:]:
Strange_Dict[Key] = np.zeros((len(CNA)), dtype = np.float64)
Key = list(Strange_Dict.keys())
Mast = AverageMeta['masterkey']
for frame in range(len(CNA)):
for Sig in CNA[frame]:
for obj in Key:
if list(CNA[frame]).index(Sig) == Mast.index(obj):
if Sig > 0:
Strange_Dict[obj][frame] = 1
Bar_Heights = []
for Item in Strange_Dict:
Bar_Heights.append( np.sum(Strange_Dict[Item]) )
return (Strange_Dict.keys(), Bar_Heights)
fig, ax = plt.subplots()
fig.set_size_inches((21,7))
ax.plot(New, label = '(4,5,5)', color='k')
Ticks = range(0,1500,50)
for tick in Ticks:
ax.vlines(tick, ymin=0, ymax = 1.1*np.amax(New), color='r', linestyle = '--')
ax2 = ax.twinx()
ax2.scatter(Ticks, AverageMeta['R_Cut'], linewidths = 6, color='g')
ax.tick_params(axis = 'both', which = 'major', labelsize = 20)
ax2.tick_params(axis = 'both', which = 'major', labelsize = 20)
ax2.set_ylabel("Nearest neighbour cutoff (Angstrom)", fontsize = 20)
ax.set_xlabel("Time (ps)", fontsize = 20)
ax.set_ylabel("Probability", fontsize = 20)
fig, ax = plt.subplots()
fig.set_size_inches((21,7))
rect =ax.bar(X_Key, B, tick_label = X_Key)
plt.xticks(rotation = 90)
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlabel("CNA signature", fontsize = 20)
ax.set_ylabel("Number of frames", fontsize=20)
autolabel(rect)
def cna_plotter(Frame):
X_CNA = [ str(a) for a in AverageMeta['masterkey'][:36] ] # Create a set of ticks for the x-axis
fig = plt.figure(figsize = (9,3) )
ax = plt.bar( X_CNA, AverageMeta['cna'][Frame][:36], tick_label = X_CNA )
plt.xlabel("CNA Signature", fontsize = 12)
plt.ylabel("Probability", fontsize = 12)
plt.xticks(rotation=90,fontsize = 14)
plt.text( X_CNA[20], 0.8*np.amax(AverageMeta['cna'][Frame]), 'Time: %sps\nTemp: %sK' %(AverageMeta["SimTime"][Frame], AverageMeta['Temp'][Frame]) )
plt.savefig(path + 'Images/'+ 'CNA'+str(Frame)+'.png', dpi = 100, bbox_inches='tight')
##########################################################################
"""
```
#### File: Sapphire/Graphing/Read_Plot.py
```python
import pickle
from ase.io import read
import numpy as np
import matplotlib.pyplot as plt
def distance(a, b):
dx = abs(a[0] - b[0])
dy = abs(a[1] - b[1])
dz = abs(a[2] - b[2])
return np.sqrt(dx**2 + dy**2 + dz**2)
def Collect_CNA(Data, Sig):
"""
Parameters
----------
Data : TYPE - List
Must be of the form
Read_Data = Reader(...)... etc...
Data = Read_Data[2][Key] for whichever simulation
you wish to extract the cna signatures from.
Note that the index '2' corresponds to the extracted cna sigs.
Sig : TYPE - Tuple
Will be of the form (r, s, t) EXACTLY
where r,s,t are the triplet of the desired signature
Returns
-------
list
A list of the normalised frequency of occurance for
a given signature to be observed in the given simulation.
"""
try:
Index = Data[0][1].index( Sig )
#This will pull the index of the desired signature from the
#first frame of the data.
return [ Data[x][0][Index] for x in range(len(Data)) ]
except Exception as e:
print(e)
return None
Sims = ['Sim-1345/', 'Sim-2783/', 'Sim-3987/', 'Sim-4009/']
Struts = ['Co/', 'Ih/']
def New_File(path, new_movie='Quantity_movie.xyz', Quantities = []):
Reference = read(path, index = ':')
"""
Robert:
This function, at the moment, is only supporting the introduction of the aGCN
to the new xyz file.
But this is easily appended to as needs dictate.
"""
with open(new_movie, 'w+') as movie:
movie.write(str(len(Reference[0])) +'\n')
movie.write('\t' + "This was made by Jones' post-processing code." + '\n')
for i in range(len(Reference)):
Ele = Reference[i].get_chemical_symbols()
Pos = Reference[i].positions
items = np.column_stack(( Ele, Pos))
for obj in Quantities:
items = np.column_stack((items, obj[i]))
for atom in items:
movie.write(' \t'.join(str(item) for item in atom) +'\n')
movie.write(str(len(Ele)) + '\n')
movie.write('\n')
def hebond(data):
a = np.array([ sum(data[t][1]) for t in range(len(data)) ])
b = np.array([ np.concatenate((data[t][0], data[t][1]), axis = 0) for t in range(len(data)) ])
c = [ [0] * len(data[0][0]) ]; d = [ [0] * len(data[0][1]) ]
for t in range(len(data)-1):
c.append( [ data[t+1][0][x] - data[t][0][x] for x in data[t][0] ] )
d.append( [data[t+1][0][x] - data[t][0][x] for x in data[t][1] ] )
e = []
f = []
for t in range(len(data)):
e.append(np.concatenate((c[t], d[t])))
e = np.array(e)
c = [0]
for t in range(len(data)-1):
c.append( sum(data[t+1][1]) - sum(data[t][1]))
c = np.array(c)
d = np.array([data[t][1] for t in range(len(data)) ])
return a,b,e,c,d
def Relative(headj, nn):
return [ headj[t][1] / nn[t][309:] for t in range(len(nn)) ]
def Init():
"""
Returns
-------
edelta : TYPE
DESCRIPTION.
comspace : TYPE
DESCRIPTION.
cna_sigs : TYPE
DESCRIPTION.
adj : TYPE
DESCRIPTION.
agcn : TYPE
DESCRIPTION.
com : TYPE
DESCRIPTION.
comdist : TYPE
DESCRIPTION.
surf_atoms : TYPE
DESCRIPTION.
comAu : TYPE
DESCRIPTION.
comPt : TYPE
DESCRIPTION.
hoadjAu : TYPE
DESCRIPTION.
hoadjPt : TYPE
DESCRIPTION.
comdistAu : TYPE
DESCRIPTION.
comdistPt : TYPE
DESCRIPTION.
midcomdistAu : TYPE
DESCRIPTION.
midcomdistPt : TYPE
DESCRIPTION.
surf_atomsPt : TYPE
DESCRIPTION.
headj : TYPE
DESCRIPTION.
mix : TYPE
DESCRIPTION.
"""
edelta = {}; comspace = {}; cna_sigs = {}
com = {}; comdist = {}; surf_atoms = {}
comAu = {}; comPt = {}; hoadjAu = {}; hoadjPt = {}
comdistAu = {}; comdistPt = {}; midcomdistPt = {} ; nn = {}
midcomdistAu = {}; surf_atomsPt = {}; headj = {}; mix = {}
PtAu = {}; PtOnly = {}; AvgCoPt = {}; GyrationPt = {}; Gyration = {}
return (edelta, comspace, cna_sigs, com, comdist,
surf_atoms, comAu, comPt, hoadjAu, hoadjPt, comdistAu,
comdistPt, midcomdistAu, midcomdistPt, surf_atomsPt,
headj, mix, nn, PtAu, PtOnly, AvgCoPt, Gyration, GyrationPt)
def Reader(T, Seed, Struts, Sims):
"""
Parameters
----------
Struts : TYPE
DESCRIPTION.
Sims : TYPE
DESCRIPTION.
Returns
-------
init : TYPE
DESCRIPTION.
"""
init = Init()
for Strut in Struts:
for Sim in Sims:
try:
with open(T+Seed+Strut+Sim+'Metadata.csv', 'rb') as infile:
Temp = pickle.load(infile)
init[0][Strut+Sim] = Temp['edelta'] #t-type: number
init[1][Strut+Sim] = Temp['comspace'] #t-type: array
init[2][Strut+Sim] = Temp['cna_sigs'] #t-type: number
init[3][Strut+Sim] = Temp['com'] #t-type: array
init[4][Strut+Sim] = Temp['comdist'] #t-type: array
init[5][Strut+Sim] = Temp['surf_atoms'] #t-type: number
init[6][Strut+Sim] = Temp['comAu'] #t-type: array
init[7][Strut+Sim] = Temp['comPt'] #t-type: array
hoadjAu = Temp['hoadjAu']
init[8][Strut+Sim] = np.array([ x for x in hoadjAu ] ) #t-type: list
hoadjPt = Temp['hoadjPt']
init[9][Strut+Sim] = np.array([ x for x in hoadjPt ] ) #t-type: list
init[10][Strut+Sim] = Temp['comdistAu'] #t-type: array
init[11][Strut+Sim] = Temp['comdistPt'] #t-type: array
init[12][Strut+Sim] = Temp['midcomdistPt'] #t-type: array
init[13][Strut+Sim] = Temp['midcomdistAu'] #t-type: array
init[14][Strut+Sim] = Temp['surf_atomsPt'] #t-type: number
headj = Temp['headj'] #t-type: tuple #######
PtAuTemp = []
PtOnlyTemp = []
for t in range(len(headj)):
Temp1 = [ x for x in headj[t][1] if x == 0 ]
Temp2 = [ x for x in headj[t][1] if x > 9 ]
PtAuTemp.append(len(Temp2))
PtOnlyTemp.append(len(Temp1)/55)
init[15][Strut+Sim] = PtAuTemp
init[16][Strut+Sim] = PtOnlyTemp
init[17][Strut+Sim] = Temp['mix'] #t-type: number
Spare = []
for t in range(len(headj)):
c = np.average([ Temp['hoadjPt'][t][i] + headj[t][0][i] for i in range(len(Temp['hoadjPt'][t])) ])
Spare.append(c)
init[18][Strut+Sim] = Spare #t-type: number
init[19][Strut+Sim] = Temp['gyrationPt'] #t-type: number
init[20][Strut+Sim] = Temp['gyration'] #t-type: number
init[21][Strut+Sim] = headj
del(Temp)
print(Strut+Sim)
except Exception as e:
print(e)
return init
def clean(data, strut):
System = {
'edetla' : np.zeros(len(data[0]), dtype = float),
'comspace' : np.zeros(len(data[1]), dtype = float),
'421' : np.zeros(len(data[2]), dtype = float),
'422' : np.zeros(len(data[2]), dtype = float),
'555' : np.zeros(len(data[2]), dtype = float),
'com' : np.zeros(len(data[3]), dtype = object),
'comdist' : np.zeros(len(data[4]), dtype = object),
'surf_atoms' : np.zeros(len(data[5]), dtype = float),
'comAu' : np.zeros(len(data[6]), dtype = object),
'comPt' : np.zeros(len(data[7]), dtype = object),
'hoadjAu' : np.zeros(len(data[8]), dtype = object),
'hoadjPt' : np.zeros(len(data[9]), dtype = object),
'comdistAu' : np.zeros(len(data[10]), dtype = object),
'comdistPt' : np.zeros(len(data[11]), dtype = object),
'midcomdistAu' : np.zeros(len(data[13]), dtype = object),
'midcomdistPt' : np.zeros(len(data[12]), dtype = object),
'surf_atomsPt' : np.zeros(len(data[14]), dtype = float),
'mix' : np.zeros(len(data[16]), dtype = float),
'headj' : np.zeros(len(data[15]), dtype = object),
'atombonds' : np.zeros(len(data[15]), dtype = object),
'deltaatoms' : np.zeros(len(data[15]), dtype = object),
'deltabonds' : np.zeros(len(data[15]), dtype = object),
'nnadj' : np.zeros(len(data[15]), dtype = object),
'nn' : np.zeros(len(data[15]), dtype = object),
'PtOnly' : np.zeros(len(data[15]), dtype = object),
'PtAu' : np.zeros(len(data[15]), dtype = object),
'GyrPt' : np.zeros(len(data[15]), dtype = object),
'Gyr' : np.zeros(len(data[15]), dtype = object),
'AvgCoPt' : np.zeros(len(data[15]), dtype = object)
}
Keys = data[0].keys()
print(Keys)
Tempedelta = []; Tempcomspace = []; Temp421 = []; Temp422 = []; Temp555 = []
Tempcom = []; Tempcomdist = []; Tempsurf_atoms = []
TempcomAu = []; TempcomPt = []; TemphoadjAu = []; TemphoadjPt = []
TempcomdistAu = []; TempcomdistPt = []; TempmidcomdistPt = []
TempmidcomdistAu = []; Tempsurf_atomsPt = []; Tempmix = []
Tempheadj = []; Tempatombonds = []; Tempdeltaatoms = []; Tempdeltabonds = []
Tempnnadj = []; Tempnn = []; TempPtOnly = []; TempPtAu = []
TempGyrPt = []; TempGyr = []; TempAvgCoPt = []
for Key in Keys:
try:
Tempedelta.append(data[0][Key])
Tempcomspace.append(data[1][Key])
Temp421.append( Collect_CNA( data[2][Key], (4, 2, 1) ) )
Temp422.append( Collect_CNA( data[2][Key], (4, 2, 2) ) )
Temp555.append( Collect_CNA( data[2][Key], (5, 5, 5) ) )
Tempcom.append(data[3][Key])
Tempcomdist.append(data[4][Key])
Tempsurf_atoms.append(data[5][Key])
TempcomAu.append(data[6][Key])
TempcomPt.append(data[7][Key])
TemphoadjAu.append(data[8][Key])
TemphoadjPt.append(data[9][Key])
TempcomdistAu.append(data[10][Key])
TempcomdistPt.append(data[11][Key])
TempmidcomdistAu.append(data[13][Key])
TempmidcomdistPt.append(data[12][Key])
Tempsurf_atomsPt.append(data[14][Key])
TempPtAu.append(data[15][Key])
TempPtOnly.append(data[16][Key])
Tempmix.append(data[17][Key])
TempAvgCoPt.append(data[18][Key])
TempGyrPt.append(data[19][Key])
TempGyr.append(data[20][Key])
HeAdj = hebond(data[21][Key])
Tempheadj.append(HeAdj[0])
Tempatombonds.append(HeAdj[1])
Tempdeltaatoms.append(HeAdj[2])
Tempdeltabonds.append(HeAdj[3])
Tempnnadj.append(HeAdj[4])
#New_File(Key+'NewMovie.xyz', new_movie=Key+'Quantity_movie.xyz', Quantities = [HeAdj[1], HeAdj[2]])
except Exception as e:
print(e)
System['edetla'] = np.average(Tempedelta, axis = 0)
System['comspace'] = np.average(Tempcomspace, axis = 0)
System['421'] = np.average(Temp421, axis = 0)
System['422'] = np.average(Temp422, axis = 0)
System['555'] = np.average(Temp555, axis = 0)
System['com'] = np.average(Tempcom, axis = 0)
System['comdist'] = np.average(Tempcomdist, axis = 0)
System['surf_atoms'] = np.average(Tempsurf_atoms, axis = 0)
System['comAu'] = np.average(TempcomAu, axis = 0)
System['comPt'] = np.average(TempcomPt, axis = 0)
System['hoadjAu'] = np.average(TemphoadjAu, axis = 0)
System['hoadjPt'] = np.average(TemphoadjPt, axis = 0)
System['comdistAu'] = np.average(TempcomdistAu, axis = 0)
System['comdistPt'] = np.average(TempcomdistPt, axis = 0)
System['midcomdistAu'] = np.average(TempmidcomdistAu, axis = 0)
System['midcomdistPt'] = np.average(TempmidcomdistPt, axis = 0)
System['surf_atomsPt'] = np.average(Tempsurf_atomsPt, axis = 0)
System['mix'] = np.average(Tempmix, axis = 0)
System['headj'] = np.average(Tempheadj, axis = 0)
System['atombonds'] = np.average(Tempatombonds, axis = 0)
System['deltaatoms'] = np.average(Tempdeltaatoms, axis = 0)
System['deltabonds'] = np.average(Tempdeltabonds, axis = 0)
System['nnadj'] = np.average(Tempnnadj, axis = 0)
System['PtAu'] = np.average(TempPtAu, 0)
System['PtOnly'] = np.average(TempPtOnly, 0)
System['nn'] = np.average(Tempnn, 0)
System['GyrPt'] = np.average(TempGyrPt, 0)
System['Gyr'] = np.average(TempGyr, 0)
System['AvgCoPt'] = np.average(TempAvgCoPt, 0)
return System
class Plot():
def __init__(self):
return None
def plotting(self, System, Strut, T):
self.System = System
self.Strut = Strut
self.T = T
self.Time = np.linspace(0, 500, len(self.System['edetla'])) #time in ns
def cna(self, name = 'CNA_Sigs'):
fig, ax = plt.subplots()
fig.set_size_inches(8,3)
ax.plot(self.Time, self.System['421'])
ax.plot(self.Time, self.System['422'])
ax2 = ax.twinx()
x_filt = self.Time[self.System['555'] > 0]
y_filt = self.System['555'][self.System['555'] > 0]
ax2.scatter(x_filt, y_filt, color= 'g')
ax2.set_ylim(0,max(y_filt))
ax2.set_yticklabels([])
ax.set_xlabel('Time (ns)', fontsize = 14)
plt.subplots_adjust(right=0.8)
scale = max(ax.get_yticks())/max(ax2.get_yticks())
line_labels = ["(4 2 1)", "(4 2 2)", r"(5 5 5)$\times$%s" %int(scale)]
fig.legend(labels=line_labels,loc = 'center right', title = 'CNA Sigs')
plt.savefig(name+self.T+self.Strut+'.png', dpi=400, bbox_inches='tight')
def hebonds(self, name = 'Bonds'):
fig, ax = plt.subplots()
fig.set_size_inches(8,3)
ax.plot(self.Time,
[ self.System['headj'][t]/ self.System['headj'][0] for t in range(len(self.Time))],
label = '#Hetero Bonds')
ax.set_xlabel('Time (ns)', fontsize = 14)
ax2 = ax.twinx()
ax2.plot(self.Time,
[ self.System['surf_atomsPt'][t]/ self.System['surf_atomsPt'][0] for t in range(len(self.Time))],
color='k')
ax2.set_ylabel('#Surf(t) / #Surf(0)')
ax.set_ylabel('#He(t) / #He(0)')
labels = ['Hetero bonds', 'Pt Surface']
fig.legend(labels = labels, loc = 'center', fontsize = 14)
plt.savefig(name+self.T+self.Strut+'.png', dpi=400, bbox_inches='tight')
def comtraj(self, name = 'Distance'):
fig, ax = plt.subplots()
fig.set_size_inches(8,3)
A = A = [ distance(self.System['comPt'][t], self.System['comAu'][t]) for t in range(len(self.Time)) ]
ax.plot(self.Time, max(A) - A)
ax2 = ax.twinx()
ax2.plot(self.Time, [ self.System['surf_atomsPt'][t]/self.System['surf_atomsPt'][0] for t in range(len(self.Time))], color='k')
ax.set_ylabel(r'$\Delta$|CoM(Pt), CoM(Au)|')
ax2.set_ylabel('#Surf(t) / #Surf(0)')
ax.set_xlabel('Time (ns)', fontsize = 14)
labels = ['Distance', 'Pt Surface']
fig.legend(labels = labels, loc = 'center', fontsize = 14)
plt.savefig(name+self.T+self.Strut+'.png', dpi=400, bbox_inches='tight')
def midPtDist(self, name = 'MidPtDist'):
imin = []; imax = []
for t in range(len(self.Time)):
s = np.flatnonzero(self.System['midcomdistPt'][t] > 0)
imin.append(s[0])
imax.append(s[-1])
fig, ax = plt.subplots()
fig.set_size_inches(8,3)
ax.scatter(self.Time, self.System['comspace'][imin]/10, color= 'r', label = 'Min')
ax.scatter(self.Time, self.System['comspace'][imax]/10, color= 'g', label = 'Max')
fig.legend(loc = 'center')
ax.set_xlabel('Time (ns)', fontsize = 14)
ax.set_ylabel('Distance (nm)')
plt.savefig(name+self.T+self.Strut+'.png', dpi=400, bbox_inches='tight')
def PtDist(self, name = 'PtDist'):
imin = []; imax = []
for t in range(len(self.Time)):
s = np.flatnonzero(self.System['comdistPt'][t] > 0)
imin.append(s[0])
imax.append(s[-1])
fig, ax = plt.subplots()
fig.set_size_inches(8,3)
ax.scatter(self.Time, self.System['comspace'][imin]/10, color= 'r', label = 'Min')
ax.scatter(self.Time, self.System['comspace'][imax]/10, color= 'g', label = 'Max')
fig.legend(loc = 'center')
ax.set_xlabel('Time (ns)', fontsize = 14)
ax.set_ylabel('Distance (nm)')
plt.savefig(name+self.T+self.Strut+'.png', dpi=400, bbox_inches='tight')
def AuExpand(self, name = 'AuVolume'):
Vol = []; imax = []
for t in range(len(self.Time)):
s = np.flatnonzero(self.System['comdistAu'][t] > 0)
imax.append(s[-1])
v1 = np.trapz(self.System['comdistAu'][t][:s[-1]],
self.System['comspace'][:s[-1]], dx = 0.05)
v2 = np.trapz(self.System['comdistAu'][t][:imax[0]],
self.System['comspace'][:imax[0]], dx = 0.05)
Vol.append(abs(v1-v2))
fig,ax = plt.subplots()
fig.set_size_inches(8,3)
ax.plot(self.Time, Vol, color = 'g')
ax.set_ylabel('% Volume')
ax.set_xlabel('Time (ns)', fontsize = 14)
plt.savefig(name+self.T+self.Strut+'.png', dpi = 400, bbox_inches='tight')
def AuNeigh(self, name = 'AuNN'):
fig,ax = plt.subplots()
fig.set_size_inches(10,6)
for a in range(2,9):
B = []
for t in range(len(self.Time)):
temp = [ x for x in self.System['nnadj'][t] if x == a]
B.append(len(temp))
ax.plot(self.Time,B, label = 'NN $\geq$ %s' %(a))
ax.set_xlabel('Time (ns)')
ax.set_ylabel('Au neighbours')
fig.legend(loc = 'center right', fontsize = 13)
plt.subplots_adjust(right=0.825)
plt.savefig(name+self.T+self.Strut+'.png', dpi = 400, bbox_inches='tight')
def CompBars(self, name = 'CompBars'):
fig,ax = plt.subplots()
fig.set_size_inches(10,6)
for t in range(0,len(self.Time),10):
bins = np.arange(0, 1.05, 0.05)
a,b = np.histogram(self.System['PtOnly'][t], bins)
bin_width = b[1]-b[0]
bin_cents = [ b[i]+ bin_width for i in range(len(b)-1) ]
fig,ax = plt.subplots()
fig.set_size_inches(8,5)
ax.bar(bin_cents, a, width= 0.05, color = 'k')
ax.set_xlabel('NN$_{Au}$ / NN$_{Tpt}$', fontsize = 16)
ax.set_ylabel('Frequency', fontsize = 16)
ax.text(0.4, 0.5*max(a), 'Time | {:d} ns'.format(int(self.Time[t])), fontsize = 14)
ax.set_xlim(0,1)
plt.show()
plt.savefig(name+self.T+self.Strut+'%s.png'%t, dpi = 400, bbox_inches='tight')
def Panels(self, name = 'MultiPanels'):
fig,axs = plt.subplots(2,1)
fig.set_size_inches(14,6)
ax1,ax2 = axs
ax1.plot(self.Time, self.System['AvgCoPt'], label =r'$\langle$ NN$_{Pt}$ $\rangle$', color = 'k')
ax1.set_ylabel(r'$\langle$ NN$_{Pt}$ $\rangle$', fontsize = 16)
ax1.set_xticklabels([])
ax12 = ax1.twinx()
ax12.plot(self.Time, [self.System['PtOnly'][t]/55 for t in range(len(self.Time))], label = 'Pt | No Au NN', color='r')
ax12.plot(self.Time, self.System['PtAu'], label = r'NN(Pt)|$_{Au>10}$', color = 'g')
ax12.set_ylabel('#Pt', fontsize = 16)
ax2.plot(self.Time, self.System['mix'], label = r'$\mu$', color = 'y')
ax2.set_ylabel('Mixing', fontsize = 16)
ax22 = ax2.twinx()
ax22.plot(self.Time, [ self.System['GyrPt'][t] / self.System['GyrPt'][0] for t in range(len(self.Time))], label = 'Pt Radius of gyration', color = 'c')
ax22.plot(self.Time, [ self.System['Gyr'][t] / self.System['Gyr'][0] for t in range(len(self.Time))], label = 'Radius of gyration', color = 'm')
ax22.set_ylabel('RoG(t) / RoG(0)', fontsize = 16)
ax2.set_xlabel('Time (ns)', fontsize = 16)
lines_labels = [ax.get_legend_handles_labels() for ax in fig.axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
fig.legend(lines,labels,loc = 'upper center', fontsize = 14, ncol = 6)
plt.savefig(name+self.T+self.Strut+'.png', dpi = 400, bbox_inches='tight')
```
#### File: Sapphire/IO/Output.py
```python
import os
import sys
import warnings
from inspect import getmembers, isfunction
import inspect
import numpy as np
from ase.io import read
import scipy.sparse as sp
from Utilities import Initial
no_dir_template = "\nThere does not exist a suitable directory in which to place these" \
"quantities.\n\nInstead, we shall generate one at '%s'.\n"
no_file_template = "\nThere does not exist a file in which to write the quantity %s.\n" \
"\nInstead, we shall create the file '%s' at location '%s'."
AttrErr = "Unable to find a write object for {0}:\n"\
"\nException traceback:\n{1}.\n"
class Writer():
"""
Robert:
This class object has been written with the purpose of handling the
creation and distribution of Sapphire Output.
In version 0.10.1, the pickle function is inadequate to facilitate
the entirity of the metadata.
In principle, all of the handling of output should be handled out of
sight of the user.
"""
def __init__(self, System, Metadata):
self.output_info_file = System['base_dir']+'Output_Info.txt'
self.output_error_file = System['base_dir']+'Output_Errors.txt'
self.Quants = {
'Dir': 'Time_Dependent/', 'File': 'R_Cut', 'Iterate': False, 'Bool': False,
'Skip': True, 'Energy': False, 'Homo': False, 'Hetero': False, 'xyz': False
}
self.Metadata = Metadata # This is the data provided to the user by Sapphire after post processing
self.System = System # Significant system information regarding I/O streams
self.Logo = Initial.Logo().Logo()
with open(self.output_info_file, 'w') as outfile:
outfile.write(self.Logo)
outfile.write('\n')
with open(self.output_error_file, 'w') as outfile:
outfile.write(self.Logo)
outfile.write('\n')
"""
This provides a dictionary with the function names as keys and the
function itself.
This allows us to have 1-1-1 mapping between the output p
"""
self.functions_list = [o for o in getmembers(Writer) if isfunction(o[1])]
self.Functions = {}
for x in self.functions_list:
if x in self.Quants.keys():
self.Functions[x[0]] = inspect.getfullargspec(x[1])[0][1:]
def ensure_dir(self, base_dir='', file_path=''):
"""
Robert:
A simple script to verify the existence of a directory
given the path to it. If it does not exist, will create it.
"""
directory = base_dir + file_path
if not os.path.exists(directory):
os.makedirs(directory)
with open(self.output_info_file, 'w') as outfile:
outfile.write(no_dir_template % (base_dir+file_path))
def MakeFile(self, Attributes):
self.out = self.System['base_dir'] + Attributes['Dir'] + Attributes['File']
if not os.path.isfile(self.out):
with open(self.System['base_dir'] + Attributes['Dir'] + Attributes['File'], 'w') as out:
out.close()
else:
pass
def Masterkey(self, Quantity):
try:
with open(self.out, 'w') as f:
for item in self.Metadata[self.x]:
f.write(str(item)+'\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
def Adj(self, Quantity):
self.out = self.System['base_dir'] + Quantity['Dir'] + Quantity['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Quantity['Dir'])
for i, t in enumerate(self.Metadata[self.x]):
try:
self.filename = self.System['base_dir'] + Quantity['Dir'] + 'File%s' % i
self.Mat = sp.csr_matrix.todense(t)
with open(self.filename, 'w') as f:
for line in self.Mat:
np.savetxt(f, line, fmt='%d')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
def Ele(self, Quantity):
self.out = self.System['base_dir'] + Quantity['Dir'] + Quantity['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Quantity['Dir'])
with open(self.out, 'w') as file:
for i, t in enumerate(self.Metadata[self.x]):
try:
self.filename = self.System['base_dir'] + Quantity['Dir'] + 'File%s' % i
file.write('\t|\t'.join(str(item) for item in t[0])+'\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
def HeAdj(self, Quantity):
self.Homo = self.System['Homo']
for Ele in self.Homo:
if len(self.Metadata[self.x]) > 1:
Temp = np.column_stack((
self.Metadata[self.x][0][self.Homo.index(Ele)],
self.Metadata[self.x][1][self.Homo.index(Ele)]
))
for t in range(2, len(self.Metadata[self.x])):
Temp = np.column_stack((
Temp, np.array(self.Metadata[self.x][t][self.Homo.index(Ele)], int)
))
np.savetxt(
self.System['base_dir'] + Quantity['Dir'] + Quantity['File']+Ele,
Temp.transpose(), fmt='%d')
else:
np.savetxt(
self.System['base_dir'] + Quantity['Dir'] + Quantity['File']+Ele,
np.array(self.Metadata[self.x][0][self.Homo.index(Ele)]).transpose(),
fmt='%d')
def Write_Homo(self, Quantity):
# self.MakeFile(Quantity) #See if the file already exists
for Ele in self.System['Homo']:
File = str(self.x)[:-2]+Ele
self.out = self.System['base_dir'] + Quantity['Dir'] + Quantity['File']+Ele
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Quantity['Dir'])
try:
if not Quantity['Iterate'] and not Quantity['Bool'] and not Quantity['array']:
try:
np.savetxt(self.out, self.Metadata[File], fmt='%s')
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(File), str(e)))
try:
with open(self.out, 'a') as CurrentOut:
CurrentOut.write(str(File)+str(self.Metadata[File]))
CurrentOut.write('\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (File, e))
elif Quantity['Iterate'] and Quantity['array']:
try:
if len(self.Metadata[File]) > 1:
Temp = np.column_stack((self.Metadata[File][0], self.Metadata[File][1]))
for t in range(2, len(self.Metadata[File])):
Temp = np.column_stack((Temp, self.Metadata[File][t]))
np.savetxt(self.out, Temp.transpose(), fmt='%f')
else:
np.savetxt(
self.out,
np.array(self.Metadata[File][0]).transpose(),
fmt='%f')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (File, e))
elif Quantity['Iterate'] and not Quantity['array']:
try:
np.savetxt(self.out, np.array(self.Metadata[File], dtype=float).transpose(), fmt='%f')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (File, e))
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(File), str(e)))
def Write(self, Quantity):
self.out = self.System['base_dir'] + Quantity['Dir'] + Quantity['File']
self.ensure_dir(base_dir=self.System['base_dir'], file_path=Quantity['Dir']) # See if the directory already exists
# self.MakeFile(Quantity) #See if the file already exists
if Quantity['Exec']:
try:
with open(self.out, 'a') as CurrentOut:
CurrentOut.write(str(self.x)+'\t|\t'+str(self.Metadata[self.x]))
CurrentOut.write('\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
else:
try:
if Quantity['Bool']:
try:
with open(self.out, 'a') as CurrentOut:
CurrentOut.write(str(self.x) + '\t|\t' + str(self.Metadata[self.x]))
CurrentOut.write('\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
elif not Quantity['Iterate'] and not Quantity['Bool'] and not Quantity['array']:
try:
np.savetxt(self.out, self.Metadata[self.x], fmt='%s')
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
try:
with open(self.out, 'a') as CurrentOut:
CurrentOut.write(str(self.x)+str(self.Metadata[self.x]))
CurrentOut.write('\n')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
elif Quantity['Iterate'] and Quantity['array']:
try:
if len(self.Metadata[self.x]) > 1:
Temp = np.column_stack((self.Metadata[self.x][0], self.Metadata[self.x][1]))
for t in range(2, len(self.Metadata[self.x])):
Temp = np.column_stack((Temp, self.Metadata[self.x][t]))
np.savetxt(self.out, Temp.transpose(), fmt='%f')
else:
np.savetxt(
self.out,
np.array(self.Metadata[self.x][0]).transpose(),
fmt='%f')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
elif Quantity['Iterate'] and not Quantity['array']:
try:
np.savetxt(self.out, np.array(self.Metadata[self.x], dtype=float).transpose(), fmt='%f')
except Exception as e:
with open(self.output_error_file, 'a') as outfile:
outfile.write(AttrErr % (self.x, e))
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
def Run(self, Output_Type):
"""
Robert.
This will need to be handled internally delicately so as to not confuse
the user.
I would like to be able to determine whether or not to call a given
output file type based on it being part of the Full, Homo, or Hetero
sub-systems.
In principle, the User is at liberty (not now, but soon) to pre-select their
own output parameters. Though deviating from the defaults could be dangerous.
At present, one of three string-types can be assigned to the 'Output_Type'
free variable:
Full - Loads in the OutputInfoFull.py file for its attributes to be read.
Homo - Loads in the OutputInfoHomo.py file for its attributes to be read.
Hetero - Loads in the OutputInfoHetero.py file for its attributes to be read.
"""
if Output_Type == 'Full':
from Utilities import OutputInfoFull as Out # Case 1
elif Output_Type == 'Homo':
from Utilities import OutputInfoHomo as Out # Case 2
elif Output_Type == 'Hetero':
from Utilities import OutputInfoHetero as Out # Case 3
self.Write_List = []
for self.x in self.Metadata.keys(): # Things such as: 'pdf', 'R_Cut', ...
try:
if Output_Type == 'Homo' and self.x.startswith('ho'):
Attributes = getattr(Out, str(self.x[:-2])) # Pulls dictionaries with names corresponding to x as above
with open(self.output_info_file, 'a') as outfile:
outfile.write('Working now with %s and placing it in %s with file name %s.\n' % (self.x, Attributes['Dir'], Attributes['File']))
try:
self.Write_Homo(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
else:
Attributes = getattr(Out, str(self.x)) # Pulls dictionaries with names corresponding to x as above
if self.x == 'adj':
try:
self.Adj(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
elif self.x == 'Elements':
try:
self.Ele(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
elif self.x == 'headj':
try:
self.HeAdj(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
elif self.x == 'master':
try:
self.Masterkey(Attributes)
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
else:
self.Write(Attributes)
with open(self.output_info_file, 'a') as outfile:
outfile.write('Working now with %s and placing it in %s with file name %s.\n' % (self.x, Attributes['Dir'], Attributes['File']))
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format(str(self.x), str(e)))
try:
from CNA.Utilities import Pattern_Key as PK
self.pattern_key = PK().Key()
with open(self.System['base_dir'] + 'RecognisedPatterns.txt', 'w') as outfile:
for i, thing in enumerate(self.pattern_key.keys()):
outfile.write(str(i) + ')\t' + str(thing)+':\t')
for item in self.pattern_key[thing]:
outfile.write(str(item) + ':\t' + str(self.pattern_key[thing][item])+'\t|\t')
outfile.write('\n\n')
except Exception as e:
with open(self.output_error_file, 'a') as error:
error.write(AttrErr.format('CNA_Patterns', e))
```
#### File: Sapphire/IO/OutWrite.py
```python
from ase.io import read
import numpy as np
import pickle
def ExtendXYZ(Traj, Metadata, Quants, Names):
with open('Extend.xyz', 'w') as movie:
movie.write(str(Metadata['NAtoms'][0]) +'\n')
movie.write('Extra columns are | \t')
for name in Names:
movie.write(str(name) + '\t')
movie.write('\n')
for i, Frame in enumerate(range(len(Metadata['agcn']))):
items = np.column_stack(( Traj[i].get_chemical_symbols(),
Traj[i].positions ))
for obj in Quants:
items = np.column_stack((items, obj[i]))
for atom in items:
movie.write(' \t'.join(str(item) for item in atom) +'\n')
movie.write(str(Metadata['NAtoms'][i]) + '\n')
movie.write('\n')
def count(Input, Value):
return(len([x for x in Input if x == Value]))
def PtInfo(Metadata):
AvgPt = []
Mat = np.zeros((len(Metadata['nn']),13))
for t in range(len(Metadata['nn'])):
for a in range(13):
Mat[t][a] += count(Metadata['headj'][t][1], a)
AvgPt.append(np.average(Metadata['nn'][t][-55:],0))
N = np.column_stack((Mat,AvgPt))
np.savetxt('PtInfo.dat', N)
def AuNN(Metadata):
Temp = []
for t in range(len(Metadata['headj'])):
Temp.append(Metadata['hoadjAu'][t] + Metadata['headj'][t][1])
return Temp
def PtNN(Metadata):
Temp = []
for t in range(len(Metadata['headj'])):
Temp.append(Metadata['headj'][t][0] + Metadata['hoadjPt'][t])
return Temp
def Output():
Traj = read('NewMovie.xyz', index = ':')
with open('Metadata.csv', 'rb') as file:
Metadata = pickle.load(file)
AuNeigh = AuNN(Metadata); PtNeigh = PtNN(Metadata); NN = Metadata['nn']
ExtendXYZ(Traj, Metadata, [NN, AuNeigh, PtNeigh], ['Coordination', 'Au Neighbours', 'Pt Neighbours'])
PtInfo(Metadata)
np.savetxt('Gyration_Mix.dat', np.column_stack((np.column_stack((
Metadata['gyration'], Metadata['gyrationPt'])), Metadata['mix'])))
if __name__ == '__main__':
Output()
```
#### File: Sapphire/Post_Process/Mass_Activity.py
```python
from ase import Atoms
from ase.io import read
import numpy as np
from matplotlib import pyplot as plt
import time
from scipy.interpolate import interp1d
import seaborn as sns
import pickle
import numpy as np
from itertools import groupby
from collections import namedtuple
from scipy.ndimage import gaussian_filter1d
import math
"""CONSTANTS TO BE UPDATED AND INSERTED MANUALLY BY THE USER !!!"""
def beta(T):
return 1/((8.6173303E-5)*(T))
sigma=2 #how much you want to smoothen out your functions?
applied_V=1.1
U_bins = 1299 #binning of the voltages (v)
#Change the temperature at whihc you want the catalytic activities to be performed at.
r=1.28*10**(-9) #atomic radius of your atoms (in cm)?
mass_cu = 1.0552e-19 #mass of your atoms (in mg)?
C = 12.56 # constant in equation, to be calculated from initial conditions
def heatmap(agcn):
occ_long=[]
for i in range(len(agcn)):
(n, bins, patches)=plt.hist(agcn[i], bins=90, density=True)
occ_long.append(n)
occ_long=np.asarray(occ_long)
corrected_occ=np.zeros((90, len(agcn)))
for i in range(90):
corrected_occ[-i-1]=np.asarray(occ_long[:, i])
sns.heatmap(corrected_occ, vmax=1, xticklabels=False, yticklabels=False, cbar=False)
#plt.savefig(str(folder)+’occurrency.png’, bbox_inches=’tight’, dpi=400)
#plt.show()
#plt.close()
return corrected_occ
def catalytic_analysis (filename, agcn):
corrected_occ = heatmap(agcn)
traj = read(filename, index = ':')
current=[]; mass_activity=[]; y=[]
for j in range(0, len(agcn)):
surf_area=0
for i in range(len(agcn[j])):
once = 4*np.pi*r**2*(1-agcn[j][i]/12)
surf_area = surf_area + once
y.append(surf_area)
site=np.zeros((U_bins))
for h in range(U_bins):
spec=0
for m in range(len(corrected_occ)):
if m<31:
sitecurrent = C*np.exp(((0.162 * m/10 - 1.11)-(h*0.001))*beta)*m/10*corrected_occ[m][j]/len(agcn[j])
elif 31<=m<81:
sitecurrent = C*np.exp(((-0.067 * m/10 - 0.416)-(h*0.001))*beta)*m/10*corrected_occ[m][j]/len(agcn[j])
else:
sitecurrent = C*np.exp(((-0.222 * m/10 + 0.849)-(h*0.001))*beta)*m/10*corrected_occ[m][j]/len(agcn[j])
spec=spec+sitecurrent
site[h]=spec
current.append(site)
mass_activity.append(-site[int(1299-applied_V*1000)]*surf_area/mass_NP)
"""Plotting the current density at different applied potentials for different
time steps."""
potentials = np.linspace(-1.299, 0, U_bins)
plt.plot(potentials, current[int(len(traj)-1)], color='orange',lw=3, label='final')
plt.plot(potentials, current[int(len(traj)/2)-1], color='purple', label='middle')
plt.plot(potentials, current[0], color='r', label='initial')
plt.xlabel('V vs RHE')
plt.ylabel('j (mA/cm^2)')
plt.savefig('current densities.png', bbox_inches='tight', dpi=200)
plt.show()
plt.close()
"""Mass activity plots at desired applied_V."""
plt.xlabel('Time (ns)')
plt.ylabel('MA (mA/mg)')
plt.plot(mass_activity, linestyle='dashed', color='k')
plt.plot(gaussian_filter1d(mass_activity, sigma), linestyle='solid', color='k')
plt.savefig('mass_activity.png', bbox_inches='tight', dpi=200)
plt.show()
plt.close()
```
#### File: Sapphire/Utilities/Supported.py
```python
class Supported(object):
def __init__(self):
return None
def Full(self):
self.Supported_Full=[
'rdf', 'cna_sigs', 'cna_patterns', 'adj', 'pdf', 'agcn', 'nn', 'com',
'comdist', 'moi', 'gyration', 'stat_radius', 'surf_area', 'surf_atoms',
'concert', 'collect', 'pair_distance'
]
return self.Supported_Full
def Homo(self):
self.Supported_Homo=[
'hopdf', 'hordf', 'hocom', 'hoadj', 'hocomdist', 'homidcomdist', 'hopair_distance',
'euc', 'hocna_sigs', 'hocna_patterns', 'hogyration', 'hosurf_area', 'hosurf_atoms', 'homobonds'
]
return self.Supported_Homo
def Hetero(self):
self.Supported_Hetero = [
'hepdf', 'herdf', 'headj', 'he_pair_distance', 'mix', 'lae', 'ele_nn',
'heterobonds'
]
return self.Supported_Hetero
```
#### File: Sapphire/Utilities/System_Clean.py
```python
import os
import sys
import warnings
from inspect import getmembers, isfunction
from ase.io import read
unsupported_template = '\nProperty "%s" not available. Please verify which features' \
'in Sapphire are supported first by calling\n' \
'\nfrom Utilities.Supported import Supported\n' \
'print(Supported().Full(), Supported().Homo(), Supported().Hetero())\n'
none_template = '\nSystem property "%s" is bad. Typically, this is because the ' \
'required information has not been provied by the user or is given incorrectly.\n' \
'Reverting to System default "%s".\n'
class _Clean_System(object):
def __init__(self, System={}):
self.System = System
self.file = 'Sapphire_Info.txt'
self.Default = {
'base_dir': '',
'movie_file_name': 'movie.xyz',
'energy_file_name': None,
'extend_xyz': None,
'Homo': None,
'Hetero': None,
'Start': 0, 'End': None, 'Step': 1, 'Skip': 50,
'UniformPDF': False, 'Band': 0.05
}
self.Keys = list(self.Default.keys())
self.FunkList = [o for o in getmembers(_Clean_System) if isfunction(o[1])]
self.Funks = [x[0] for x in self.FunkList if not x[0].startswith('_')]
for x in self.Funks:
getattr(self, x)()
def Abase_dir(self):
def _no_base():
self.System['base_dir'] = ''
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % (self.System['base_dir'], self.Default['base_dir']))
try:
self.System['base_dir']
if type(self.System['base_dir']) is not str:
self._no_base()
else:
if self.System['base_dir'] == '':
pass
else:
if not os.path.isdir(self.System['base_dir']):
_no_base()
except KeyError:
_no_base()
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nInitialising...\n")
def Bmovie_file_name(self):
def _exit():
try:
if not os.path.isfile(self.System['base_dir']+self.System['movie_file_name']):
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("\nNo trajectory file can be found at the specified location.\n"
"Please check your local directories and re-write your input file.\n"
"Sapphire will now terminate.\n")
raise SystemExit("No trajectory found at '%s'.\n" % (
self.System['base_dir']+self.System['movie_file_name']))
_exit()
except Exception as e:
sys.exit('\nCannot find this file.\nExiting now due to error rasied as:\n.%s' % e)
try:
_exit()
if type(self.System['movie_file_name']) is not str:
self.System['movie_file_name'] = self.Default['movie_file_name']
_exit()
warnings.warn(none_template % ('movie_file_name', self.Default['movie_file_name']))
with open(self.System['movie_file_name']+self.file, 'a') as warn:
warn.write(none_template % ('movie_file_name', self.Default['movie_file_name']))
_exit()
else:
if not os.path.isfile(self.System['base_dir']+self.System['movie_file_name']):
self.System['movie_file_name'] = self.Default['movie_file_name']
warnings.warn(none_template % ('movie_file_name', self.Default['movie_file_name']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('movie_file_name', self.Default['movie_file_name']))
_exit()
except Exception as e:
self.System['movie_file_name'] = self.Default['movie_file_name']
warnings.warn(none_template % ('movie_file_name', self.Default['movie_file_name']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(
none_template % (
self.System['movie_file_name'], self.Default['movie_file_name']
)
)
_exit()
with open(self.System['base_dir']+self.file, "a") as f:
f.write('\nReading from the %s file.\n' % (self.System['base_dir']+self.System['movie_file_name']))
"""
def Cenergy_file_name(self):
"""""""
Please note that this command has since been removed due to being obsolete.
"""""""
def _no_file():
self.System['energy_file_name'] = self.Default['energy_file_name']
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("\nNo energy file can be found at the specified location.\n'%s'\n"
"Please check your local directories and re-write your input file if you want energetic analysis.\n"
% (self.System['base_dir']))
try:
if type(self.System['energy_file_name']) is not str:
self.System['energy_file_name'] = self.Default['energy_file_name']
warnings.warn(none_template % ('energy_file_name', self.Default['energy_file_name']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('energy_file_name', self.Default['energy_file_name']))
_no_file()
else:
if not os.path.isfile(self.System['base_dir']+self.System['energy_file_name']):
_no_file()
except Exception as e:
_no_file()
"""
def Dextend_xyz(self):
try:
if type(self.System['extend_xyz']) is not list:
self.System['extend_xyz'] = self.Default['extend_xyz']
warnings.warn(none_template % ('extend_xyz', self.Default['extend_xyz']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('extend_xyz', self.Default['extend_xyz']))
else:
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("Will attempt to write the following quantities into an extended xyz file:\n")
for x in self.System['extend_xyz']:
warn.write("%s\n" % x)
except KeyError:
self.System['extend_xyz'] = self.Default['extend_xyz']
warnings.warn(none_template % ('extend_xyz', self.Default['extend_xyz']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('extend_xyz', self.Default['extend_xyz']))
def _no_homo(self):
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("\nNo specie-specific properties for homo species will be calculated in this run.\n")
self.System['Homo'] = self.Default['Homo']
def EHomo(self):
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nChecking user input for calculating homo properties in this run.\n")
try:
self.System['Homo']
if self.System['Homo'] is None:
self._no_homo()
elif type(self.System['Homo']) is list:
Temp = read(
self.System['base_dir']+self.System['movie_file_name'],
index=0).get_chemical_symbols()
used = set()
Species = [x for x in Temp
if x not in used and (used.add(x) or True)]
Temp = []
for x in self.System['Homo']:
if x not in Species:
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nChemical specie %s not present in the trajectory."
"Consequently, this shall be discarded from Homo.\n" % x)
else:
Temp.append(x)
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nSpecies being considered are:\n"+'\t'.join(str(x) for x in Temp))
self.System['Homo'] = Temp
except Exception as e:
self._no_homo()
def _no_hetero(self):
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write("\nNo specie-specific properties for homo species will be calculated in this run.\n")
self.System['Hetero'] = self.Default['Hetero']
def GHetero(self):
with open(self.System['base_dir']+self.file, "a") as f:
f.write("\nChecking user input for calculating homo properties in this run.\n")
try:
self.System['Hetero']
if self.System['Hetero'] is None:
self._no_hetero()
except KeyError:
self._no_hetero()
def IStart(self):
try:
self.System['Start']
if type(self.System['Start']) is not int or self.System['Start'] < 0:
self.System['Start'] = 0
warnings.warn(none_template % ('Start', self.Default['Start']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Start', self.Default['Start']))
else:
with open(self.System['base_dir']+self.file, 'a') as file:
file.write("\nInitial frame has been set to %s.\n" % self.System['Start'])
except KeyError:
self.System['Start'] = 0
warnings.warn(none_template % ('Start', self.Default['Start']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Start', self.Default['Start']))
def JEnd(self):
try:
if not type(self.System['End']) is int or self.System['End'] < self.System['Start']:
Temp = read(self.System['base_dir']+self.System['movie_file_name'], index=':')
self.Default['End'] = len(Temp)
self.System['End'] = len(Temp)
del(Temp)
warnings.warn(none_template % ('End', self.Default['End']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % (self.System['End'], self.Default['End']))
elif self.System['End'] < self.System['Start']:
Temp = read(self.System['base_dir']+self.System['movie_file_name'], index=':')
self.Default['End'] = len(Temp)
self.System['End'] = len(Temp)
del(Temp)
warnings.warn(none_template % ('End', self.Default['End']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('End', self.Default['End']))
else:
with open(self.System['base_dir']+self.file, 'a') as file:
file.write("\nFinal frame has been set to %s.\n" % self.System['End'])
except KeyError:
Temp = read(self.System['base_dir']+self.System['movie_file_name'], index=':')
self.Default['End'] = len(Temp)
self.System['End'] = len(Temp)
del(Temp)
warnings.warn(none_template % ('End', self.Default['End']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('End', self.Default['End']))
def KStep(self):
try:
if not type(self.System['Step']) is int or self.System['Step'] < 1:
self.System['Step'] = self.Default['Step']
warnings.warn(none_template % ('Step', self.Default['Step']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Step', self.Default['Step']))
except KeyError:
self.System['Step'] = self.Default['Step']
warnings.warn(none_template % ('Step', self.Default['Step']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Step', self.Default['Step']))
def LSkip(self):
try:
if not type(self.System['Skip']) is int or self.System['Skip'] < 1:
self.Default['Skip'] = int(self.System['End']-self.System['Start']/25.0)
if self.Default['Skip'] < 1:
self.Default['Skip'] = 1
warnings.warn(none_template % ('Skip', self.Default['Skip']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Skip', self.Default['Skip']))
self.System['Skip'] = self.Default['Skip']
except KeyError:
self.Default['Skip'] = int(self.System['End']-self.System['Start']/25.0)
if self.Default['Skip'] < 1:
self.Default['Skip'] = 1
warnings.warn(none_template % ('Step', self.Default['Step']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Step', self.Default['Step']))
self.System['Skip'] = self.Default['Skip']
def MUniformPDF(self):
try:
if type(self.System['UniformPDF']) is not bool:
warnings.warn(none_template % ('UniformPDF', self.Default['UniformPDF']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('UniformPDF', self.Default['UniformPDF']))
self.System['UniformPDF'] = self.Default['UniformPDF']
except KeyError:
warnings.warn(none_template % ('UniformPDF', self.Default['UniformPDF']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('UniformPDF', self.Default['UniformPDF']))
self.System['UniformPDF'] = self.Default['UniformPDF']
def NBand(self):
try:
if type(self.System['Band']) is not float:
self.Default['Band'] = self.Default['Band']
warnings.warn(none_template % ('Band', self.Default['Band']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Band', self.Default['Band']))
self.System['Band'] = self.Default['Band']
except KeyError:
warnings.warn(none_template % ('Band', self.Default['Band']))
with open(self.System['base_dir']+self.file, 'a') as warn:
warn.write(none_template % ('Band', self.Default['Band']))
self.System['Band'] = self.Default['Band']
``` |
{
"source": "jonestcharles/physics-simulation",
"score": 3
} |
#### File: physics-simulation/Assignments/Binary.py
```python
import Simulation
import matplotlib.pyplot as plt
class App(object):
def __init__(self):
self.time = 4.
def run(self):
self.sim_1()
def sim_1(self):
sim = Simulation.BinarySim(1.,4.,2.,0.5)
sim.advance(self.time)
time, bodies = sim.get_results()
self.plorbits(bodies)
def plorbits(self,bodies):
x0= [b[0].position.x for b in bodies]
y0= [b[0].position.y for b in bodies]
x1= [b[1].position.x for b in bodies]
y1= [b[1].position.y for b in bodies]
plt.figure()
plt.plot(x0,y0,'.',x1,y1,'.')
plt.title('Binary Orbits')
plt.xlabel('X axis [AU]')
plt.ylabel('Y axis [AU]')
plt.legend(['One','Two'])
if __name__=="__main__":
app = App()
app.run()
```
#### File: physics-simulation/Assignments/cooling.py
```python
import matplotlib.pyplot as plt
import Body
import Solver
import Simulation
def main():
coffee = Body.ThermalBody(500)
solver = Solver.Euler(0.5)
def stop_condition(coffee):
return coffee.temperature > sim.Ta*1.1
sim = Simulation.CoolingSim(stop_condition,solver,293,0.05,coffee)
t,T = sim.get_results()
plt.plot(t,T)
coffee = Body.ThermalBody(500)
solver = Solver.RK2(0.5)
def stop_condition(coffee):
return coffee.temperature > sim.Ta*1.1
sim = Simulation.CoolingSim(stop_condition,solver,293,0.05,coffee)
t,T = sim.get_results()
plt.plot(t,T)
plt.title("OOP Cooling Curves")
plt.xlabel("Time [s]")
plt.ylabel("Temperature[K]")
plt.legend(["Euler Curve","RK2 Cooling Curve"])
if __name__ == "__main__": main()
```
#### File: physics-simulation/Assignments/cooling_thomas.py
```python
import numpy as np
import matplotlib.pyplot as plt
import math
def main():
k=0.1
dt=5
t=np.arange(0,math.log(10)/k,0.05)
T=np.exp(-k*t)
plt.plot(t,T)
t=np.arange(0,math.log(10)/k,dt)
plt.plot(t,cooling_euler(k,t,dt))
plt.title('Temperature')
plt.xlabel('Time'); plt.ylabel('Temperature (as proportion of T0 - Ta)')
plt.plot(t,cooling_rk2(cooling_diffeq,t,dt,1))
plt.legend(['Exact','Euler','RK2'])
'''
dt=np.arange(.2,4.2,.2)
error=np.exp(-k*dt) + k*dt -1
plt.figure()
plt.plot(dt,error)
plt.title('Error with step size')
plt.xlabel('step size'); plt.ylabel('Error')
return
'''
def cooling_rk2(G,t,dt,T0):
'''rk2 for cooling function
Parameters
----------
G:function
cooling_diffeq gives the derivative at T
t:array,float
time/independent variable
dt:float
time step
T0:float
initial temperature
Returns
-------
T:array,float
rk2 values for T
'''
T= 0*t
T[0]=1
for i in range(1,len(T)):
k1 = G(T[i-1],t)
k2 = G(T[i-1]+k1*dt*0.5,t)
T[i] = T[i-1] + k2*dt
return np.array(T)
def cooling_diffeq(T,t):
'''Provides the slope of the cooling function
Parameters
---------
T: float
initial temperature
t: array, float
time/independent variable
Returns
-------
slope: float
derivative at a certain T
'''
k=0.1
slope = -k*T
return slope
def cooling_euler(k,t,dt):
''' Approximates the cooling function exp(-kt), using Euler's method
Parameters
----------
k:float
the cooling constant
t:array,float
time/independent variable
dt:float
time step
Returns
-------
T:array,float
'''
T=0*t
T[0]=1
for i in range(1,len(T)):
T[i]=T[i-1]-k*T[i-1]*dt
return T
def cooling_taylor(k,t,t_0,n):
''' Gives the nth order Talor polynomial for the cooling function exp(-kt)
Parameters
----------
k:float
the cooling constant
t:array,float
time/independent variable
t_0:float
center of Taylor expansion
n:int
order of taylor polynomial
Returns
-------
T:array,float
'''
T=1
for i in range(1,n+1):
T=T+ ((-k*(t-t_0))**i)/math.factorial(i)
T=T*np.exp(-k*t_0)
return T
if __name__=="__main__":main()
```
#### File: physics-simulation/Assignments/exo_pyrate.py
```python
import Simulation
import matplotlib.pyplot as plt
import math
import numpy as np
import Search
class App(object):
def __init__(self):
self.time = 4.
def run(self):
self.sim_1()
tlist = np.arange(0,self.max,self.max/100.)
dlist = [self.error_1(t) for t in tlist]
plt.figure()
plt.plot(tlist,dlist,'.')
plt.title('Error Function')
plt.xlabel('Time [Years]')
plt.ylabel('Distance [AU]')
sim2 = self.sim_2()
print "Time of min distance is %f"%sim2
def sim_1(self):
Mp = 1.0
Ms = 1.*10.**(-6)
ap = 2.0
e = 0.5
As = 1.0
Ap = 0.25
omega = 0.
i = 0.
sim = Simulation.ExoSim(Mp,Ms,ap,e,As,Ap,omega,i,apnd=True)
self.max = sim.period
sim.advance()
time, bodies = sim.get_results()
self.plorbits(bodies)
def sim_2(self):
times = self.start_times()
print times
test1 = Search.GoldenSection(self.error_1,times[0],times[1],0.0001)
result1 = test1.do_it()
return result1
def start_times(self):
Mp = 1.0
Ms = 1.*10.**(-6)
ap = 2.0
e = 0.5
As = 1.0
Ap = 0.25
omega = 0.
i = 0.
sim = Simulation.ExoSim(Mp,Ms,ap,e,As,Ap,omega,i,apnd=True)
sim.advance()
time, bodies = sim.get_results()
t = np.array(time)
v = np.array([b[1].velocity.x for b in bodies])
test = v[1:]*v[:-1]
vt = v[:-1]
tt = t[:-1]
times = tt[test<0]
if len(times) == 1.:
times = np.append(times,sim.period/2.+times[0])
test2 = vt[test<0]
if test2[0] < 0:
return times
elif test2[0] > 0:
return np.array([times[1],times[0]+sim.period])
def error_1(self,t):
Mp = 1.0
Ms = 1.*10.**(-6)
ap = 2.0
e = 0.5
As = 1.0
Ap = 0.25
omega = 0.
i = math.pi/2.
sim = Simulation.ExoSim(Mp,Ms,ap,e,As,Ap,omega,i,apnd=True)
sim.advance(t)
time, bodies = sim.get_results()
r = bodies[-1][0].position - bodies[-1][1].position
d = np.sqrt(r.x**2. + r.y**2.)
return d
def plorbits(self,bodies):
x0 = [b[0].position.x for b in bodies]
y0 = [b[0].position.y for b in bodies]
z0 = [b[0].position.z for b in bodies]
x1 = [b[1].position.x for b in bodies]
y1 = [b[1].position.y for b in bodies]
z1 = [b[1].position.z for b in bodies]
plt.figure()
plt.plot(x0,y0,'.',x1,y1,'.')
plt.title('Binary Orbits')
plt.xlabel('X axis [AU]')
plt.ylabel('Y axis [AU]')
plt.legend(['Star','Planet'])
# plt.figure()
# plt.plot(x0,z0,'.',x1,z1,'.')
# plt.title('Binary Orbits')
# plt.xlabel('X axis [AU]')
# plt.ylabel('Z axis [AU]')
# plt.legend(['Star','Planet'])
# plt.figure()
# plt.plot(y0,z0,'.',y1,z1,'.')
# plt.title('Binary Orbits')
# plt.xlabel('Y axis [AU]')
# plt.ylabel('Z axis [AU]')
# plt.legend(['Star','Planet'])
if __name__=="__main__":
app = App()
app.run()
```
#### File: physics-simulation/Assignments/Mission2Mars.py
```python
import Body
import Solver
import Simulation
import Physics
import math
import vector
import Search
import matplotlib.pyplot as plt
class App(object):
def __init__(self):
self.rEarth = vector.Vector(1,0,0)
self.vEarth = vector.Vector(0,2*math.pi,0)
self.rMars = vector.Vector(1.5,0,0)
self.vMars = vector.Vector(0,2*math.pi/math.sqrt(1.5),0)
self.max_steps = 10000
self. solver = Solver.RK4(0.01)
self.physics = Physics.CentralGravity(self.solver,G=4*math.pi**2,M=1)
def run(self):
sim1 = self.sim_1()
print sim1
sim2 = self.sim_2()
print sim2
sim3 = self.sim_3()
print sim3
def sim_1(self):
Earth = Body.GravBody(1,self.rEarth,self.vEarth)
Mars = Body.GravBody(1,self.rMars,self.vMars)
Planets = [Earth,Mars]
sim = Simulation.OrbitSim(self.stop_1,self.physics,Planets)
sim.advance()
time, bodies = sim.get_results()
return bodies[-1][1].position.theta
def sim_2(self):
test = Search.NewtonMethod(self.error_2,1.1*2*math.pi,0.01,0.01)
result = test.do_it()
self.vL = result
return result
def sim_3(self):
test = Search.NewtonMethod(self.error_3,1.9,0.01,0.01)
result = test.do_it()
return result
def stop_1(self,time,bodies):
if len(bodies) == self.max_steps:
print "Orbit Failed to complete after %f steps"%self.max_steps
return False
elif len(bodies) < 2:
return True
else:
L = bodies[0][0].position - bodies[-1][0].position
F = bodies[0][0].position - bodies[1][0].position
if L.r < F.r:
return False
return True
def error_2(self,v0):
vShip = vector.Vector(0,v0,0)
rShip = vector.Vector(1,0,0)
Ship = Body.GravBody(1,rShip,vShip)
sim = Simulation.OrbitSim(self.stop_1,self.physics,Ship)
sim.advance()
time, Ships = sim.get_results()
r = [[s.position.r for s in ship] for ship in Ships]
max_r = max(r)
error = max_r[0] - 1.5 #since r becomes another stupid list of lists
return error
def error_3(self,launch_time):
vShip = vector.Vector(0,2*math.pi,0)
rShip = vector.Vector(1,0,0)
Ship = Body.GravBody(1,rShip,vShip)
rMars = vector.Vector(1.5,0,0)
vMars = vector.Vector(0,2*math.pi/math.sqrt(1.5),0)
Mars = Body.GravBody(1,rMars,vMars)
Objects = [Ship,Mars]
sim = Simulation.OrbitSim(physics=self.physics,body=Objects)
sim.advance(time=launch_time)
sim.body[0].velocity.r = self.vL
p = ((2.5/2.)**(3./2.))/2.
sim.advance(time=p+launch_time)
t2list,blist = sim.get_results()
sep = sim.body[1].position - sim.body[0].position
error = sep.r
if error <= 0.01:
self.plorbits(blist)
return error
def plorbits(self,bodies):
x0= [b[0].position.x for b in bodies]
y0= [b[0].position.y for b in bodies]
x1= [b[1].position.x for b in bodies]
y1= [b[1].position.y for b in bodies]
plt.figure()
plt.plot(x0,y0,x1,y1)
plt.title('Mission to Mars')
plt.xlabel('X axis [AU]')
plt.ylabel('Y axis [AU]')
plt.legend(['Ship','Mars'])
if __name__=="__main__":
app = App()
app.run()
```
#### File: physics-simulation/Assignments/skull.py
```python
import Body
import matplotlib.pyplot as plt
import Solver
import Simulation
def main():
skull = Body.GravBody(5,5,5)
solver = Solver.RK2(0.001)
def stop_condition(skull):
return skull.velocity > 0
sim = Simulation.TrajectorySim(stop_condition,solver,skull)
t, h = sim.get_results()
plt.plot(t,h)
plt.title("OOP Skull Toss")
plt.xlabel("Time [s]")
plt.ylabel("Height [m]")
if __name__ == "__main__": main()
``` |
{
"source": "Joneswn/Baloti",
"score": 2
} |
#### File: Baloti/djelectionguard/components.py
```python
import random
from datetime import datetime, date, timedelta
from django import forms
from django.conf import settings
from django.db.models import Sum
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import date as _date
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import get_language
from django.utils.safestring import mark_safe
from django.utils.html import escape
from electeez_common.components import *
from ryzom_django.forms import widget_template
from django.conf import settings
from djlang.utils import gettext as _
from electeez_sites.models import Site
from .models import Contest, Candidate
@widget_template('django/forms/widgets/splitdatetime.html')
class SplitDateTimeWidget(SplitDateTimeWidget):
date_label = _('Date')
date_style = 'margin-top: 0; margin-bottom: 32px;'
time_label = _('Time')
time_style = 'margin: 0;'
class ContestForm(forms.ModelForm):
def now():
now = datetime.now()
return now.replace(second=0, microsecond=0)
def tomorow():
tomorow = datetime.now() + timedelta(days=1)
return tomorow.replace(second=0, microsecond=0)
about = forms.CharField(
label=_('FORM_ABOUT_ELECTION_CREATE'),
widget=forms.Textarea,
required=False
)
votes_allowed = forms.IntegerField(
label=_('FORM_VOTES_ALLOWED_ELECTION_CREATE'),
initial=1,
help_text=_('The maximum number of choice a voter can make for this election')
)
start = forms.SplitDateTimeField(
label='',
initial=now,
widget=forms.SplitDateTimeWidget(
date_format='%Y-%m-%d',
date_attrs={'type': 'date', 'label': 'date'},
time_attrs={'type': 'time', 'label': 'heure'},
),
)
end = forms.SplitDateTimeField(
label='',
initial=tomorow,
widget=forms.SplitDateTimeWidget(
date_format='%Y-%m-%d',
date_attrs={'type': 'date'},
time_attrs={'type': 'time'},
)
)
class Meta:
model = Contest
fields = [
'name',
'about',
'votes_allowed',
'start',
'end',
'timezone',
]
labels = {
'name': _('FORM_TITLE_ELECTION_CREATE'),
'about': _('FORM_ABOUT_ELECTION_CREATE'),
'votes_allowed': _('FORM_VOTES_ALLOWED_ELECTION_CREATE'),
'start': _('FORM_START_ELECTION_CREATE'),
'end': _('FORM_END_ELECTION_CREATE'),
'timezone': _('FORM_TIMEZONE_ELECTION_CREATE')
}
class ContestFormComponent(CList):
def __init__(self, view, form, edit=False):
content = []
content.append(Ul(
*[Li(e) for e in form.non_field_errors()],
cls='error-list'
))
super().__init__(
H4(_('Edit election') if edit else _('Create an election')),
Form(
form['name'],
form['about'],
H6(_('Voting settings:')),
form['votes_allowed'],
H6(_('Election starts:')),
form['start'],
H6(_('Election ends:')),
form['end'],
form['timezone'],
CSRFInput(view.request),
MDCButton(_('update election') if edit else _('create election')),
method='POST',
cls='form'),
)
@template('djelectionguard/contest_form.html', Document, Card)
class ContestCreateCard(Div):
style = dict(cls='card')
def to_html(self, *content, view, form, **context):
self.backlink = BackLink(_('back'), reverse('contest_list'))
edit = view.object is not None
return super().to_html(
ContestFormComponent(view, form, edit),
)
class ContestFiltersBtn(Button):
def __init__(self, pos, text, active=False):
active_cls_name = 'mdc-tab--active' if active else ''
active_indicator = 'mdc-tab-indicator--active' if active else ''
attrs = {
'class': f'contest-filter-btn mdc-tab {active_cls_name}',
'role': 'tab',
'aria-selected': 'true',
'tabindex': pos
}
super().__init__(
Span(
Span(text, cls='mdc-tab__text-label'),
cls='mdc-tab__content'
),
Span(
Span(cls='mdc-tab-indicator__content ' +
'mdc-tab-indicator__content--underline'
),
cls=f'mdc-tab-indicator {active_indicator}'
),
Span(cls='mdc-tab__ripple'),
**attrs
)
class ContestFilters(Div):
def __init__(self, view):
active_btn = view.request.GET.get('q', 'all')
self.all_contests_btn = ContestFiltersBtn(1, _('all'), active_btn == 'all')
self.my_contests_btn = ContestFiltersBtn(2, _('created by me'), active_btn == 'created')
self.shared_contests_btn = ContestFiltersBtn(3, _('shared with me'), active_btn == 'shared')
super().__init__(
Div(
Div(
self.all_contests_btn,
self.my_contests_btn,
self.shared_contests_btn,
cls='mdc-tab-scroller__scroll-content'
),
cls='mdc-tab-scroller__scroll-area ' +
'mdc-tab-scroller__scroll-area--scroll'
),
cls='mdc-tab-bar contest-filter'
)
class ContestItem(A):
def __init__(self, contest, user, *args, **kwargs):
active_cls = ''
status = ''
voter = contest.voter_set.filter(user=user).first()
voted = voter and voter.casted
if contest.actual_start:
status = _('voting ongoing')
active_cls = 'active'
if contest.actual_end:
status = _('voting closed')
if contest.plaintext_tally:
active_cls = ''
status = _('result available')
status_2 = _(', voted') if voted else ''
super().__init__(
Span(cls='mdc-list-item__ripple'),
Span(
Span(cls=f'contest-indicator'),
Span(
Span(status, status_2, cls='contest-status overline'),
Span(contest.name, cls='contest-name'),
cls='list-item__text-container'
),
cls='mdc-list-item__text'
),
cls=f'contest-list-item mdc-list-item mdc-ripple-upgraded {active_cls}',
href=reverse('contest_detail', args=[contest.id])
)
class Separator(Li):
def __init__(self, inset=False):
cls = 'mdc-list-divider'
if inset:
cls += ' mdc-list-divider--inset'
super().__init__(role='separator', cls=cls)
class ListItem(CList):
def __init__(self, component, separator=True):
content = [component]
if separator:
content.append(Separator())
super().__init__(*content)
class ContestListItem(ListItem):
def __init__(self, obj, user, **kwargs):
super().__init__(ContestItem(obj, user))
class ListAction(ListItem):
def __init__(self, title, txt, icon_comp, btn_comp, **kwargs):
self.action_btn = btn_comp
subitem_cls = 'mdc-list-item__primary-text list-action-row'
if not txt:
subitem_cls = 'list-action-row'
subitem = Span(cls=subitem_cls)
if icon_comp:
subitem.addchild(icon_comp)
subitem.addchild(H6(title))
if btn_comp:
subitem.addchild(btn_comp)
item = Span(subitem, cls='mdc-list-item__text list-action-column')
if txt:
item.addchild(Span(txt, cls='mdc-list-item__secondary-text ' +
'list-action-text body-2'))
super().__init__(Li(item, cls='mdc-list-item list-action-item'), **kwargs)
class ContestListCreateBtn(A):
def __init__(self):
super().__init__(
Span(
Span(cls='mdc-list-item__ripple'),
Span(
Span('+', cls='new-contest-icon'),
cls='new-contest-icon-container'
),
Span(_('Create new election')),
cls='mdc-list-item__text text-btn mdc-ripple-upgraded'
),
cls='mdc-list-item contest-list-item',
href=reverse('contest_create')
)
@template('djelectionguard/contest_list.html', Document, Card)
class ContestList(Div):
def to_html(self, *content, view, **context):
site = Site.objects.get_current()
can_create = (site.all_users_can_create
or view.request.user.is_staff
or view.request.user.is_superuser
)
return super().to_html(
H4(_('Elections'), style='text-align: center;'),
# ContestFilters(view),
Ul(
ListItem(ContestListCreateBtn())
if can_create else None,
*(
ContestListItem(contest, view.request.user)
for contest in context['contest_list']
) if len(context['contest_list'])
else (
Li(
_('There are no elections yet'),
cls='mdc-list-item body-1'
),
),
cls='mdc-list contest-list'
),
cls='card'
)
class CircleIcon(Span):
def __init__(self, icon, color='', small=False, **kw):
base_cls = f'icon {icon} {"small " if small else ""}'
super().__init__(
cls=base_cls + color,
**kw
)
class TodoIcon(CircleIcon):
def __init__(self, **kw):
super().__init__('empty-icon', 'yellow', **kw)
class DoneIcon(CircleIcon):
def __init__(self):
super().__init__('check-icon', 'green')
class TezosIcon(CircleIcon):
def __init__(self):
super().__init__('tezos-icon', 'white')
class OnGoingIcon(CircleIcon):
def __init__(self):
super().__init__('ongoing-icon', 'yellow')
class EmailIcon(CircleIcon):
def __init__(self):
super().__init__('email-icon', 'yellow')
class SimpleCheckIcon(CircleIcon):
def __init__(self):
super().__init__('simple-check-icon', 'green')
class WorldIcon(CircleIcon):
def __init__(self):
super().__init__('world-icon', 'black', small=True)
class BasicSettingsAction(ListAction):
def __init__(self, obj):
btn_comp = MDCButtonOutlined(
_('edit'),
False,
tag='a',
href=reverse('contest_update', args=[obj.id]))
super().__init__(
_('Basic settings'),
_('Name, votes allowed, time and date, etc.'),
DoneIcon(), btn_comp
)
class AddCandidateAction(ListAction):
def __init__(self, obj):
num_candidates = obj.candidate_set.count()
kwargs = dict(
tag='a',
href=reverse('contest_candidate_create', args=[obj.id]))
if num_candidates and num_candidates > obj.number_elected:
btn_comp = MDCButtonOutlined(_('edit'), False, **kwargs)
icon = DoneIcon()
else:
btn_comp = MDCButtonOutlined(_('add'), False, 'add', **kwargs)
icon = TodoIcon()
number = obj.number_elected + 1
txt = _('%(candidates)d candidates, minimum: %(elected)d',
n=num_candidates,
candidates=num_candidates,
elected=number
)
super().__init__(
_('Add candidates'), txt, icon, btn_comp,
)
class AddVoterAction(ListAction):
def __init__(self, obj):
num_voters = obj.voter_set.all().count()
num_candidates = obj.candidate_set.all().count()
kwargs = dict(
tag='a',
href=reverse('contest_voters_update', args=[obj.id]))
if num_voters:
btn_comp = MDCButtonOutlined(_('edit'), False, **kwargs)
icon = DoneIcon()
txt = _('%(num_voters)d voters', n=num_voters, num_voters=num_voters)
else:
btn_comp = MDCButtonOutlined(_('add'), False, 'add', **kwargs)
icon = TodoIcon()
txt = ''
super().__init__(
_('Add voters'),
txt, icon, btn_comp,
separator=True
)
class DownloadBtnMixin:
async def get_file(event):
event.preventDefault()
elem = event.currentTarget
file_response = await fetch(elem.href).then(
lambda res: res.blob()
)
url = URL.createObjectURL(file_response)
link = document.createElement('a')
link.download = elem.dataset.filename
link.href = url
link.click()
URL.revokeObjectURL(url)
setTimeout(
lambda: document.location.reload()
, 2000)
def py2js(self):
elem = getElementByUuid(self.id)
elem.onclick = self.get_file
class DownloadBtnOutlined(DownloadBtnMixin, MDCButtonOutlined):
pass
class DownloadBtn(DownloadBtnMixin, MDCTextButton):
pass
class SecureElectionInner(Span):
def __init__(self, obj, user):
text = _('All guardians must possess a private key so that the ballot box is secure and the election can be opened for voting.')
todo_list = Ol()
#todo_list.addchild(Li('Add guardians', cls='line'))
guardian = obj.guardian_set.filter(user=user).first()
if guardian:
cls = 'line' if guardian.downloaded else 'bold'
todo_list.addchild(Li(_('Download my private key'), cls=cls))
cls = ''
if guardian.downloaded and not guardian.verified:
cls = 'bold'
elif guardian.verified:
cls = 'line'
todo_list.addchild(Li(_('Confirm possession of an uncompromised private key'), cls=cls))
if user == obj.mediator:
n_confirmed = obj.guardian_set.exclude(verified=None).count()
n_guardians = obj.guardian_set.count()
cls = ''
if guardian and guardian.verified:
cls = 'bold'
if n_guardians == n_confirmed:
cls = 'line'
todo_list.addchild(
Li(
_('All guardians confirm possession of uncompromised private keys'),
Br(),
_('%(confirmed)d/%(gardiens)d confirmed',
n=n_confirmed,
confirmed=n_confirmed,
gardiens=n_guardians
),
cls=cls))
cls = ''
if n_confirmed == n_guardians and not obj.joint_public_key:
cls = 'bold'
elif obj.joint_public_key:
cls = 'line'
todo_list.addchild(Li(_('Lock the ballot box / erase private keys from server memory'), cls=cls))
cls = ''
if guardian.contest.joint_public_key:
cls = 'bold'
todo_list.addchild(Li(_('Open the election for voting'), cls=cls))
subtext = _('Guardians must NOT loose their PRIVATE keys and they must keep them SECRET.')
action_btn = None
if not guardian.downloaded:
action_btn = DownloadBtnOutlined(
_('download private key'),
p=False,
icon='file_download',
data_filename=f'guardian-{guardian.id}.pkl',
tag='a',
href=reverse('guardian_download', args=[guardian.id]))
elif not guardian.verified:
action_btn = MDCButtonOutlined(
_('confirm key integrity'),
p=False,
tag='a',
href=reverse('guardian_verify', args=[guardian.id]))
elif user == obj.mediator:
if n_guardians == n_confirmed and not obj.joint_public_key:
action_btn = MDCButtonOutlined(
_('Lock the ballot box'),
p=False,
tag='a',
href=reverse('contest_pubkey', args=[guardian.contest.id]))
elif obj.joint_public_key and not obj.actual_start:
action_btn = MDCButton(
_('Open for voting'),
tag='a',
href=reverse('contest_open', args=[guardian.contest.id]))
super().__init__(
text,
P(todo_list),
subtext,
action_btn,
cls='body-2'
)
class SecureElectionAction(ListAction):
def __init__(self, obj, user):
title = _('Secure the election')
if obj.mediator == user:
if obj.joint_public_key:
title = _('Ballot box securely locked. Election can be open for voting.')
icon = DoneIcon()
else:
icon = TodoIcon()
elif guardian := obj.guardian_set.filter(user=user).first():
if guardian.verified:
icon = DoneIcon()
else:
icon = TodoIcon()
super().__init__(
title,
SecureElectionInner(obj, user),
icon,
None,
separator=False
)
class CastVoteAction(ListAction):
def __init__(self, obj, user):
voter = obj.voter_set.filter(user=user).first()
if voter.casted:
head = _('Voted')
s = voter.casted
txt = Span(
_('You casted your vote!'
' The results will be published after the election is closed.'
),
Br(),
A(_('Track my vote'), href=reverse('tracker_detail', args=[voter.id])) if voter.casted else None,
)
icon = DoneIcon()
btn_comp = None
elif not obj.actual_end:
head = _('Cast my vote')
txt = ''
icon = TodoIcon()
url = reverse('contest_vote', args=(obj.id,))
btn_comp = MDCButtonOutlined(_('vote'), False, tag='a', href=url)
else:
head = _('You did not vote')
txt = _('The vote is closed, sorry you missed it.')
icon = TodoIcon(style=dict(filter='brightness(0.5)'))
btn_comp = None
super().__init__( head, txt, icon, btn_comp, separator=True)
class ChooseBlockchainAction(ListAction):
def __init__(self, obj, user):
num_voters = obj.voter_set.all().count()
num_candidates = obj.candidate_set.all().count()
separator = (
obj.publish_state != obj.PublishStates.ELECTION_NOT_DECENTRALIZED
and num_voters
and num_candidates > obj.votes_allowed
)
if obj.publish_state != obj.PublishStates.ELECTION_NOT_DECENTRALIZED:
txt = ''
icon = DoneIcon()
else:
txt = _('Choose the blockchain you want to deploy your election smart contract to')
icon = TodoIcon()
try:
has_contract = obj.electioncontract is not None
except Contest.electioncontract.RelatedObjectDoesNotExist:
has_contract = False
super().__init__(
_('Add the election smart contract'),
txt, icon,
MDCButtonOutlined(
_('add'),
icon='add',
tag='a',
p=False,
href=reverse('electioncontract_create', args=[obj.id])
) if not has_contract else None,
separator=separator
)
class OnGoingElectionAction(ListAction):
def __init__(self, contest, user, view):
close_url = reverse('contest_close', args=[contest.id])
close_btn = MDCButtonOutlined(_('close'), False, tag='a', href=close_url)
start_time = '<b>' + _date(contest.actual_start, 'd F, G\hi') + '</b>'
sub_txt = None
if contest.actual_end:
end_time = '<b>' + _date(contest.actual_end, 'd F, G\hi') + '</b>'
title = _('Voting closed')
txt = _('The voting started on %(start)s and was open till %(end)s. '
'Timezone: %(timezone)s.',
start=start_time,
end=end_time,
timezone=str(contest.timezone)
)
txt = mark_safe(txt)
icon = SimpleCheckIcon()
else:
vote_link = reverse('otp_send') + f'?redirect=' + reverse('contest_vote', args=[contest.id])
vote_link = view.request.build_absolute_uri(vote_link)
end_time = '<b>' + _date(contest.end, 'd F, G\hi') + '</b>'
title = _('The voting process is currently ongoing')
txt = _('The voting started on %(time_start)s and will be closed at %(time_end)s. '
'Timezone: %(timezone)s',
time_start=start_time,
time_end=end_time,
timezone=str(contest.timezone)
)
if contest.mediator == user:
sub_txt = _('Vote link: %(link)s',
link=f'<a href={vote_link}>{vote_link}</a>'
)
icon = OnGoingIcon()
inner = Span(
txt,
CList(Br(), Br(), sub_txt) if sub_txt else None,
cls='body-2 red-button-container'
)
if contest.mediator == user and not contest.actual_end:
inner.addchild(close_btn)
separator = (
contest.actual_end
or contest.mediator == user
or contest.guardian_set.filter(user=user).count()
)
super().__init__(
title,
inner,
icon,
None,
separator=separator
)
class UploadPrivateKeyAction(ListAction):
def __init__(self, contest, user):
guardian = contest.guardian_set.filter(user=user).first()
title = _('Upload my private key')
icon = TodoIcon()
content = Div(
_('All guardians need to upload their private keys so that'
' the ballot box can be opened to reveal the results.'))
if contest.actual_end and not guardian.uploaded:
action_url_ = reverse('guardian_upload', args=[guardian.id])
action_btn_ = MDCButtonOutlined(
_('upload my private key'),
False,
tag='a',
href=action_url_)
content.addchild(action_btn_)
elif guardian.uploaded:
icon = DoneIcon()
super().__init__(
title,
content,
icon,
None,
separator=user == contest.mediator
)
class UnlockBallotAction(ListAction):
def __init__(self, contest, user):
self.contest = contest
self.user = user
self.has_action = False
guardian = contest.guardian_set.filter(user=user).first()
n_guardian = contest.guardian_set.count()
n_uploaded = contest.guardian_set.exclude(uploaded=None).count()
if contest.actual_end:
task_list = Ol()
txt = _('All guardians upload their keys %(uploaded)s/%(guardian)s uploaded',
n=n_uploaded,
uploaded=n_uploaded,
guardian=n_guardian
)
cls='bold'
if n_uploaded == n_guardian:
cls = 'line'
task_list.addchild(Li(txt, cls=cls))
cls = 'bold' if cls == 'line' else ''
txt = _('Unlock the ballot box with encrypted ballots and reveal the results')
task_list.addchild(Li(txt, cls=cls))
content = Span(
P(
_('All guardians need to upload their private keys so that the ballot box can be opened to reveal the results.')
),
task_list,
cls='body-2'
)
else:
content = Span(
P(_('When the election is over the guardians use their keys to open the ballot box and count the results.')),
cls='body-2'
)
title = _('Unlocking the ballot box and revealing the results')
if (contest.actual_end
and not self.has_action
and n_guardian == n_uploaded
):
action_url_ = reverse('contest_decrypt', args=(contest.id,))
action_btn_ = MDCButton(
_('reveal results'),
True,
tag='a',
href=action_url_,
disabled=n_guardian != n_uploaded)
content.addchild(action_btn_)
icon = TodoIcon()
super().__init__(
title,
content,
icon,
None,
separator=False,
)
class WaitForEmailAction(ListAction):
def __init__(self, contest, user):
super().__init__(
_('Once the ballots are counted you will be notified by email'),
'',
EmailIcon(), None, separator=False
)
class ResultAction(ListAction):
def __init__(self, contest, user):
subtext = Div()
if contest.decrypting:
icon = OnGoingIcon()
title = _('Tallying in progress')
if contest.mediator == user:
subtext.addchild(
Div(_('An email will be sent when finished'))
)
else:
icon = DoneIcon()
title = _('Results available')
if contest.mediator == user:
subtext.addchild(
Div(_('Congratulations! You have been the mediator of a secure election.')))
url=reverse('contest_result', args=[contest.id])
result_btn = MDCButton(_('view result table'), tag='a', href=url)
subtext.addchild(result_btn)
super().__init__(
title,
subtext,
icon,
None,
separator=False
)
class ContestVotingCard(Div):
def __init__(self, view, **context):
contest = view.get_object()
user = view.request.user
list_content = []
actions = []
if contest.voter_set.filter(user=user).count():
actions.append('vote')
if contest.mediator == user:
actions.append('close')
guardian = contest.guardian_set.filter(user=user).first()
if guardian:
actions.append('upload')
if 'vote' in actions:
list_content.append(CastVoteAction(contest, user))
list_content.append(OnGoingElectionAction(contest, user, view))
if 'upload' in actions:
list_content.append(UploadPrivateKeyAction(contest, user))
if contest.mediator == user:
list_content.append(UnlockBallotAction(contest, user))
elif guardian.uploaded:
if contest.mediator != user:
list_content.append(WaitForEmailAction(contest, user))
if not len(actions):
list_content.append(WaitForEmailAction(contest, user))
about = mark_safe(escape(contest.about).replace('\n', '<br>'))
super().__init__(
H4(contest.name, style='word-break: break-all;'),
Div(
about,
style='padding: 12px; word-break: break-all;',
cls='subtitle-2'
),
Ul(
*list_content,
cls='mdc-list action-list'
),
cls='setting-section main-setting-section'
)
class ContestSettingsCard(Div):
def __init__(self, view, **context):
contest = view.get_object()
user = view.request.user
list_content = []
if contest.mediator == view.request.user:
list_content += [
BasicSettingsAction(contest),
AddCandidateAction(contest),
AddVoterAction(contest),
ChooseBlockchainAction(contest, user),
]
if (
contest.voter_set.count()
and contest.candidate_set.count()
and contest.candidate_set.count() > contest.number_elected
):
if contest.publish_state != contest.PublishStates.ELECTION_NOT_DECENTRALIZED:
list_content.append(SecureElectionAction(contest, user))
else:
list_content.append(SecureElectionAction(contest, user))
about = mark_safe(escape(contest.about).replace('\n', '<br>'))
super().__init__(
H4(contest.name, style='word-break: break-all;'),
Div(
about,
style='padding: 12px; word-break: break-all;',
cls='subtitle-2'
),
Ul(
*list_content,
cls='mdc-list action-list'
),
cls='setting-section main-setting-section'
)
class Section(Div):
pass
class TezosSecuredCard(Section):
def __init__(self, contest, user):
link = None
blockchain = None
if contest.publish_state != contest.PublishStates.ELECTION_NOT_DECENTRALIZED:
try:
contract = contest.electioncontract
blockchain = contract.blockchain
link = A(
contract.contract_address,
href=getattr(contract, 'explorer_link', ''),
style='text-overflow: ellipsis; overflow: hidden; width: 100%;'
)
except ObjectDoesNotExist:
pass # no contract
def step(s):
return Span(
Span(s, style='width: 100%'),
link,
style='display: flex; flex-flow: column wrap'
)
super().__init__(
Ul(
ListAction(
_('Secured and decentralised with Tezos'),
Span(
_('Your election data and results will be published'
' on Tezos’ %(blockchain)s blockchain.',
blockchain=blockchain
),
PublishProgressBar([
step(_('Election contract created')),
step(_('Election opened')),
step(_('Election closed')),
step(_('Election Results available')),
step(_('Election contract updated')),
], contest.publish_state - 1),
) if contest.publish_state else None,
TezosIcon(),
None,
separator=False
),
cls='mdc-list action-list',
),
cls='setting-section', style='background-color: aliceblue;'
)
class CheckedIcon(MDCIcon):
def __init__(self):
super().__init__('check_circle', cls='material-icons icon green2')
class GuardianActionButton(CList):
def __init__(self, guardian, action):
url = reverse(f'guardian_{action}', args=[guardian.id])
if action == 'download':
btn = DownloadBtn(
_('Download'),
'file_download',
tag='a',
href=url,
data_filename=f'guardian-{guardian.id}.pkl')
elif action == 'verify':
btn = MDCTextButton(_('Upload'), 'file_upload', tag='a', href=url)
super().__init__(btn)
class GuardianTable(Div):
def __init__(self, view, **context):
table_head_row = Tr(cls='mdc-data-table__header-row')
for th in (_('email'), _('key downloaded'), _('key verified')):
table_head_row.addchild(
Th(
th,
role='columnheader',
scope='col',
cls='mdc-data-table__header-cell overline',
style='width: 50%' if th == 'email' else 'text-align: center;'
)
)
table_content = Tbody(cls='mdc-data-table__content')
contest = view.get_object()
cls = 'mdc-data-table__cell'
for guardian in contest.guardian_set.all():
if guardian.user == view.request.user:
if not guardian.downloaded:
dl_elem = GuardianActionButton(guardian, 'download')
ul_elem = '--'
else:
if not guardian.verified:
dl_elem = GuardianActionButton(guardian, 'download')
ul_elem = GuardianActionButton(guardian, 'verify')
else:
dl_elem = CheckedIcon()
ul_elem = CheckedIcon()
table_content.addchild(Tr(
Td(guardian.user.email, cls=cls),
Td(
dl_elem,
cls=cls + ' center'),
Td(
ul_elem,
cls=cls + ' center'),
cls='mdc-data-table__row'
))
else:
table_content.addchild(Tr(
Td(guardian.user.email, cls=cls),
Td(
CheckedIcon() if guardian.downloaded else 'No',
cls=cls + ' center'),
Td(
CheckedIcon() if guardian.verified else 'No',
cls=cls + ' center'),
cls='mdc-data-table__row'
))
table = Table(
Thead(table_head_row),
table_content,
**{
'class': 'mdc-data-table__table',
'aria-label': 'Guardians'
}
)
super().__init__(table, cls='table-container guardian-table')
class GuardiansSettingsCard(Div):
def __init__(self, view, **context):
contest = view.get_object()
super().__init__(
H5(_('Guardians')),
GuardianTable(view, **context),
cls='setting-section'
)
class CandidatesSettingsCard(Div):
def __init__(self, view, **context):
contest = view.get_object()
editable = (view.request.user == contest.mediator
and not contest.actual_start)
kwargs = dict(p=False, tag='a')
if contest.candidate_set.count():
if editable:
kwargs['href'] = reverse('contest_candidate_create', args=[contest.id])
btn = MDCButtonOutlined(_('view all/edit'), **kwargs)
else:
kwargs['href'] = reverse('contest_candidate_list', args=[contest.id])
btn = MDCButtonOutlined(_('view all'), **kwargs)
else:
if editable:
kwargs['href'] = reverse('contest_candidate_create', args=[contest.id])
btn = MDCButtonOutlined(_('add'), icon='add', **kwargs)
else:
btn = None
super().__init__(
H5(_('Candidates')),
CandidateListComp(contest, editable),
btn,
cls='setting-section'
)
class VotersSettingsCard(Div):
def __init__(self, view, **context):
contest = view.get_object()
num_emails = contest.voter_set.all().count()
kwargs = dict(
p=False,
tag='a',
href=reverse('contest_voters_detail', args=[contest.id]))
if contest.actual_start:
btn = MDCButtonOutlined(_('view all'), **kwargs)
elif num_emails:
btn = MDCButtonOutlined(_('view all/edit'), **kwargs)
else:
kwargs['href'] = reverse('contest_voters_update', args=[contest.id])
btn = MDCButtonOutlined(_('add'), icon='add', **kwargs)
super().__init__(
H5(_('Voters')),
Span(_('%(voters)s voters added', n=num_emails, voters=num_emails), cls='voters_count'),
btn,
cls='setting-section'
)
class ContestFinishedCard(Div):
def __init__(self, view, **context):
contest = view.get_object()
is_voter = False
if contest.voter_set.filter(user=view.request.user).count():
is_voter = True
about = mark_safe(escape(contest.about).replace('\n', '<br>'))
super().__init__(
H4(contest.name, style='word-break: break-all'),
Div(
about,
style='padding: 12px; word-break: break-all;',
cls='subtitle-2'
),
Ul(
CastVoteAction(contest, view.request.user)
if is_voter else None,
ResultAction(contest, view.request.user),
cls='mdc-list action-list'
),
cls='setting-section main-setting-section'
)
@template('djelectionguard/contest_detail.html', Document)
class ContestCard(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
if contest.plaintext_tally or contest.decrypting:
main_section = ContestFinishedCard(view, **context)
elif contest.actual_start:
main_section = ContestVotingCard(view, **context)
else:
main_section = ContestSettingsCard(view, **context)
action_section = Div(
main_section,
TezosSecuredCard(contest, view.request.user),
cls='main-container')
sub_section = Div(
CandidatesSettingsCard(view, **context),
cls='side-container')
if (
contest.mediator == view.request.user
or contest.guardian_set.filter(user=view.request.user).count()
):
action_section.addchild(GuardiansSettingsCard(view, **context))
if contest.mediator == view.request.user:
sub_section.addchild(VotersSettingsCard(view, **context))
return super().to_html(
Div(
Div(
BackLink(_('my elections'), reverse('contest_list')),
cls='main-container'),
Div(cls='side-container'),
action_section,
sub_section,
cls='flex-container'
)
)
class CandidateDetail(Div):
def __init__(self, candidate, editable=False, **kwargs):
if editable:
kwargs['tag'] = 'a'
kwargs['href'] = reverse('contest_candidate_update', args=[candidate.id])
kwargs['style'] = 'margin-left: auto; margin-top: 12px;'
extra_style = 'align-items: baseline;'
content = []
if candidate.picture:
extra_style = ''
content.append(
Div(
Image(
loading='eager',
src=candidate.picture.url,
style='width: 100%;'
'display: block;'
),
style='width: 150px; padding: 12px;'
)
)
subcontent = Div(
H5(
candidate.name,
style='margin-top: 6px; margin-bottom: 6px; word-break: break-all;'
),
I(
candidate.subtext,
style=dict(
font_size='small',
font_weight='initial',
word_break='break-all',
)
),
style='flex: 1 1 65%; padding: 12px;'
)
if candidate.description:
description = mark_safe(escape(candidate.description).replace('\n', '<br>'))
subcontent.addchild(
Div(
description,
style='margin-top: 24px; word-break: break-all;'
)
)
content.append(subcontent)
if editable and not candidate.description:
content.append(
MDCButtonOutlined(_('edit'), False, 'edit', **kwargs)
)
elif editable:
subcontent.addchild(
MDCButtonOutlined(_('edit'), False, 'edit', **kwargs)
)
if 'style' not in kwargs:
kwargs['style'] = ''
super().__init__(
*content,
style='padding: 12px;'
'display: flex;'
'flex-flow: row wrap;'
'justify-content: center;'
+ kwargs.pop('style')
+ extra_style,
cls='candidate-detail',
)
class CandidateAccordionItem(MDCAccordionSection):
tag = 'candidate-list-item'
def __init__(self, candidate, editable=False):
super().__init__(
CandidateDetail(candidate, editable),
label=candidate.name,
)
class CandidateAccordion(MDCAccordion):
tag = 'candidate-accordion'
def __init__(self, contest, editable=False):
super().__init__(
*(
CandidateAccordionItem(candidate, editable)
for candidate
in contest.candidate_set.all()
) if contest.candidate_set.count()
else [_('No candidate yet.')]
)
class CandidateListComp(MDCList):
tag = 'candidate-list'
def __init__(self, contest, editable=False):
qs = contest.candidate_set.all()[:]
def candidates(qs):
for candidate in qs:
attrs = dict()
if editable:
attrs['tag'] = 'a'
attrs['href'] = reverse(
'contest_candidate_update',
args=[candidate.id]
)
yield (candidate, attrs)
super().__init__(
*(
MDCListItem(candidate, **attrs)
for candidate, attrs in candidates(qs)
) if qs.count()
else [_('No candidate yet.')]
)
class VoterList(Ul):
def __init__(self, contest):
emails = contest.voters_emails.split('\n')
num_emails = len(emails)
if emails[0] == '':
num_emails = 0
super().__init__(
*(
MDCListItem(voter)
for voter
in emails
) if num_emails
else _('No voter yet.'),
cls='mdc-list voters-list'
)
class ClipboardCopy(MDCTextButton):
def onclick(target):
target.previousElementSibling.select()
document.execCommand('copy')
@template('djelectionguard/candidate_list.html', Document, Card)
class CandidateList(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(_('back'), reverse('contest_detail', args=[contest.id]))
return super().to_html(
H4(_('Candidates'), cls='center-text'),
CandidateAccordion(
contest,
view.request.user == contest.mediator and not contest.actual_start
)
)
@template('djelectionguard/contest_voters_detail.html', Document)
class VotersDetailCard(Div):
style = dict(cls='card')
def to_html(self, *content, view, **context):
contest = view.object
self.backlink = BackLink(_('back'), reverse('contest_detail', args=[contest.id]))
voters = contest.voter_set.select_related('user')
table_head_row = Tr(cls='mdc-data-table__header-row')
for th in ('email', 'vote email sent', 'voted', 'tally email sent'):
table_head_row.addchild(
Th(
th,
role='columnheader',
scope='col',
cls='mdc-data-table__header-cell overline',
style='' if th == 'email' else 'text-align: center;'
)
)
table_head_row.addchild(Th('OTP'))
table_content = Tbody(cls='mdc-data-table__content')
cls = 'mdc-data-table__cell'
for voter in voters:
otp_link = None
if not voter.casted:
redirect = reverse('contest_vote', args=[contest.pk])
else:
redirect = reverse('contest_detail', args=[contest.pk])
token = voter.user.token_set.filter(
redirect=redirect,
used=None,
expiry__gt=timezone.now(),
).first()
if token:
otp_link = CList(
Input(
value=token.url,
style='opacity: 0; position: absolute',
),
ClipboardCopy(_('Copy link'), icon='content_copy'),
)
else:
otp_link = MDCTextButton(
'Request OTP',
href=''.join([
reverse('otp_send'),
'?email=',
voter.user.email,
'&redirect=',
redirect,
'&next=',
view.request.path_info,
]),
tag='a',
icon='shield',
)
activated = voter.user and voter.user.is_active
open_email_sent = (
voter.open_email_sent.strftime("%d/%m/%Y %H:%M")
if voter.open_email_sent else ''
)
close_email_sent = (
voter.close_email_sent.strftime("%d/%m/%Y %H:%M")
if voter.close_email_sent else ''
)
table_content.addchild(Tr(
Td(voter.user.email, cls=cls),
Td(
open_email_sent,
cls=cls + ' center',
),
Td(CheckedIcon() if voter.casted else 'No', cls=cls + ' center'),
Td(
close_email_sent,
cls=cls + ' center',
),
Td(
otp_link,
cls=cls + ' center',
),
cls='mdc-data-table__row',
style='opacity: 0.5;' if not activated else ''
))
table = Table(
Thead(table_head_row),
table_content,
**{
'class': 'mdc-data-table__table',
'aria-label': 'Voters'
}
)
edit_btn = MDCButtonOutlined(
_('edit voters'),
False,
'edit',
tag='a',
href=reverse('contest_voters_update', args=[contest.id]))
email_btn = MDCButtonOutlined(
_('invite new voters'),
False,
'email',
tag='a',
href=reverse('email_voters', args=[contest.id]))
if contest.actual_end:
edit_btn = ''
email_btn = ''
if not voters.filter(open_email_sent=None).count():
email_btn = ''
return super().to_html(
H4(
_('%(count)s Voters', n=voters.count(), count=voters.count()),
cls='center-text'
),
Div(edit_btn, email_btn, cls='center-button'),
Div(
table,
cls='table-container'),
)
class ContestCandidateForm(Div):
def __init__(self, form):
self.form = form
self.count = 0
if form.instance and form.instance.description:
self.count = len(form.instance.description)
super().__init__(form)
def init_counter(form_id, count):
form = getElementByUuid(form_id)
counter = form.querySelector('.mdc-text-field-character-counter')
counter.innerHTML = count + '/300'
def update_counter(event):
field = event.currentTarget
current_count = field.value.length
if current_count > 300:
field.value = field.value.substr(0, 300)
current_count = 300
parent = field.parentElement.parentElement.parentElement
counter = parent.querySelector('.mdc-text-field-character-counter')
counter.innerHTML = current_count + '/300'
def py2js(self):
self.init_counter(self.id, self.count)
field = document.getElementById('id_description')
field.addEventListener('keyup', self.update_counter)
@template('djelectionguard/candidate_form.html', Document, Card)
class ContestCandidateCreateCard(Div):
def to_html(self, *content, view, form, **context):
contest = view.get_object()
editable = (view.request.user == contest.mediator
and not contest.actual_start)
self.backlink = BackLink(_('back'), reverse('contest_detail', args=[contest.id]))
form_component = ''
if editable:
form_component = Form(
ContestCandidateForm(form),
CSRFInput(view.request),
MDCButton(_('Add candidate'), icon='person_add_alt_1'),
method='POST',
cls='form')
count = contest.candidate_set.count()
return super().to_html(
H4(
_('%(count)s Candidates', n=count, count=count),
cls='center-text'
),
CandidateAccordion(contest, editable),
H5(_('Add a candidate'), cls='center-text'),
form_component,
cls='card'
)
@template('djelectionguard/candidate_update.html', Document, Card)
class ContestCandidateUpdateCard(Div):
def to_html(self, *content, view, form, **context):
candidate = view.get_object()
contest = candidate.contest
self.backlink = BackLink(
_('back'),
reverse('contest_candidate_create', args=[contest.id]))
delete_btn = MDCTextButton(
_('delete'),
'delete',
tag='a',
href=reverse('contest_candidate_delete', args=[candidate.id]))
return super().to_html(
H4(
_('Edit candidate'),
style='text-align: center;'
),
Form(
CSRFInput(view.request),
ContestCandidateForm(form),
Div(
Div(delete_btn, cls='red-button-container'),
MDCButton(_('Save'), True),
style='display: flex; justify-content: space-between'),
method='POST',
cls='form'),
cls='card'
)
@template('djelectionguard/voters_update.html', Document, Card)
class ContestVotersUpdateCard(Div):
def to_html(self, *content, view, form, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
voters = contest.voter_set.all()
count = voters.count()
return super().to_html(
H4(_('%(count)s Voters', n=count, count=count), style='text-align: center;'),
Div(_('The list of allowed voters with one email per line (sparated by Enter/Return ⏎)'), cls='body-2', style='margin-bottom: 24px;text-align: center;'),
Form(
CSRFInput(view.request),
form,
MDCButton(_('Save')),
method='POST',
cls='form'
),
cls='card'
)
@template('djelectionguard/guardian_form.html', Document, Card)
class GuardianVerifyCard(Div):
def to_html(self, *content, view, form, **context):
guardian = view.get_object()
contest = guardian.contest
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
self.submit_btn = MDCButton(_('confirm'), True, disabled=True)
self.submit_btn_id = self.submit_btn.id
return super().to_html(
H4(_('Confirm possession of an uncompromised private key'), cls='center-text'),
Div(_('You need to upload your private key to confirm that you posses a valid key that hasn’t been temepered with.'), cls='center-text'),
Form(
MDCFileField(
Input(id='file_input', type='file', name='pkl_file'),
label=_('Choose file')),
Span(_("Your privacy key is a file with '.pkl' extension."), cls='body-2'),
self.submit_btn,
CSRFInput(view.request),
enctype='multipart/form-data',
method='POST',
cls='form',
),
cls='card'
)
def enable_post(event):
file_input = document.querySelector('#file_input')
file_name = file_input.value
btn = getElementByUuid(file_input.submit_btn_id)
btn.disabled = file_name == ''
def py2js(self):
file_input = document.querySelector('#file_input')
file_input.submit_btn_id = self.submit_btn_id
file_input.addEventListener('change', self.enable_post)
@template('djelectionguard/contest_pubkey.html', Document, Card)
class ContestPubKeyCard(Div):
def to_html(self, *content, view, form, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
return super().to_html(
H4(_('Lock the ballot box'), cls='center-text'),
Div(
P(_('This will remove all guardians’ private keys from the server memory.')),
P(_('When the voting is over the ballot box can only be opened when all guardians upload their private keys.')),
P(_('This is what makes the governing of the election decentralised.'))
),
Form(
CSRFInput(view.request),
Div(
MDCButton(_('create')),
style='width: fit-content; margin: 0 auto;'
),
method='POST',
cls='form',
),
cls='card'
)
@template('email_voters', Document, Card)
class ContestEmailVoters(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_voters_detail', args=[contest.id]))
return super().to_html(
H4(_('Send an invite to the newly added voters'), cls='center-text'),
Form(
context['form']['email_title'],
context['form']['email_message'],
CSRFInput(view.request),
MDCButton(context['form'].submit_label),
method='POST',
cls='form'
),
cls='card'
)
@template('contest_open', Document, Card)
class ContestOpenCard(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
return super().to_html(
H4(_('Open the election for voting'), cls='center-text'),
Div(
P(_('Once you open the election for voting you can’t make changes to it.')),
cls='center-text'
),
Form(
context['form']['email_title'],
context['form']['email_message'],
MDCMultipleChoicesCheckbox(
'send_email',
((0, B(_('Do not alert voters by email')), 'true'),),
n=1
),
CSRFInput(view.request),
MDCButton(_('open')),
method='POST',
cls='form'
),
cls='card'
)
class DialogConfirmForm(Form):
def __init__(self, *content, selections=[], max_selections=1, **attrs):
def hidden_selections():
for s in selections:
candidate = CandidateDetail(s)
candidate.style.display = 'none'
candidate.attrs['data-candidate-id'] = s.id
yield candidate
self.max_selections = max_selections
actions = MDCDialogActions(
MDCDialogCloseButtonOutlined(_('modify')),
MDCDialogAcceptButton(
_('confirm'),
addcls='mdc-button--raised black-button',
),
style={
'display': 'flex',
'justify-content': 'space-around'
}
)
self.remaining_text_start = str(_('If you want it, you have'))
self.remaining_text_end = str(_('choice left'))
self.remaining_text_end_plural = str(_('choices left'))
super().__init__(
*content,
MDCDialog(
_('Confirm your selection'),
Div(
_('Be careful, once confirmed,'
' your choice is definitive and cannot be changed'),
*hidden_selections(),
),
actions=Div(
actions,
Div(
Span(id='remaining'),
style=dict(
background='aliceblue',
text_align='center',
padding='12px',
margin='24px',
margin_top='0'
),
),
)
),
**attrs
)
def ondialogclosed(event):
candidates = event.currentTarget.querySelectorAll('[data-candidate-id]')
for candidate in candidates:
candidate.style.display = 'none'
def ondialogclosing(event):
if event.detail.action == 'accept':
form.submit()
def handle_submit(event):
event.preventDefault()
this.dialog = this.querySelector('mdc-dialog')
selections = new.FormData(this).getAll('selections')
for selection in selections:
candidate = this.dialog.querySelector(
'[data-candidate-id="' + selection + '"]'
)
candidate.style.display = 'flex'
remaining = this.max_selections - len(selections)
self.update_remaining(this, remaining)
this.dialog.onclosing = self.ondialogclosing
this.dialog.onclosed = self.ondialogclosed
this.dialog.open()
def update_remaining(form, remaining):
elem = document.querySelector('#remaining')
remaining_text = (
form.remaining_text_start + ' ' + remaining + ' '
)
if remaining > 1:
remaining_text += form.remaining_text_end_plural
else:
remaining_text += form.remaining_text_end
if remaining == 0:
elem.parentElement.style.display = 'none'
else:
elem.innerHTML = remaining_text
elem.parentElement.style.display = 'block'
def py2js(self):
form = getElementByUuid(self.id)
form.max_selections = self.max_selections
form.remaining_text_start = self.remaining_text_start
form.remaining_text_end = self.remaining_text_end
form.remaining_text_end_plural = self.remaining_text_end_plural
form.addEventListener('submit', self.handle_submit.bind(form))
@template('contest_vote', Document, Card)
class ContestVoteCard(Div):
def to_html(self, *content, view, form, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
max_selections = contest.votes_allowed
number_elected = contest.number_elected
candidates = list(contest.candidate_set.all())
random.shuffle(candidates)
choices = (
(i, CandidateDetail(candidate), candidate.id)
for i, candidate
in enumerate(candidates))
about = mark_safe(escape(contest.about).replace('\n', '<br>'))
return super().to_html(
H4(contest.name, cls='center-text', style='word-break: break-all'),
Div(
about,
cls='center-text body-2',
style='word-break: break-all'
),
Div(
_('You have up to a total of %(vote_allowed)s choice',
n=max_selections,
vote_allowed=max_selections
),
style='opacity: 0.6'
),
Ul(
*[Li(e) for e in form.non_field_errors()],
cls='error-list'
),
DialogConfirmForm(
CSRFInput(view.request),
MDCMultipleChoicesCheckbox(
'selections',
choices,
n=max_selections),
MDCButton(_('create ballot')),
selections=candidates,
max_selections=max_selections,
method='POST',
cls='form vote-form',
),
cls='card'
)
@template('djelectionguard/vote_success', Document, Card)
class ContestVoteSuccessCard(Div):
def to_html(self, *content, view, **context):
voter = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[voter.contest.id])
)
track_link = reverse('tracker_detail', args=[voter.id])
return super().to_html(
H4(
DoneIcon(),
_('Your vote has been validated!'),
style='text-align:center;'
),
Div(
_('Thank you for your participation.'
' Your secret vote has been taken in account.'
' You can, if you want, close this page.'),
style=dict(
margin_top='50px'
)
),
Div(
B(
_('How does electronic voting work?'),
style=dict(
text_align='center',
display='block'
)
),
P(
_('ELECTRONIC_VOTE_EXPLAINATION'),
' ',
A(_('here'), href=track_link)
),
style=dict(
background='aliceblue',
margin_top='50px',
padding='12px',
opacity='0.6'
)
)
)
@template('contest_close', Document, Card)
class ContestCloseCard(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
return super().to_html(
H4(_('Manual closing of the election'), cls='center-text'),
Div(
P(_('This will stop the voting process and it can\'t be undone.')),
cls='center-text body-2'),
Form(
CSRFInput(view.request),
Div(
MDCButtonOutlined(_('close the election now'), False),
style='margin: 0 auto; width: fit-content',
cls='red-button-container'),
method='POST',
cls='form'),
cls='card',
)
@template('guardian_upload', Document, Card)
class GuardianUploadKeyCard(Div):
def to_html(self, *content, view, form, **context):
guardian = view.get_object()
contest = guardian.contest
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
self.submit_btn = MDCButton(_('confirm'), True, disabled=True)
self.submit_btn_id = self.submit_btn.id
return super().to_html(
H4(_('Verify your private key'), cls='center-text'),
Div(_('All guardians’ must upload their valid private keys to unlock the ballot box.'), cls='center-text'),
Form(
MDCFileField(
Input(id='file_input', type='file', name='pkl_file'),
label=_('Choose file')),
Span(_("Your privacy key is a file with '.pkl' extension."), cls='body-2'),
MDCErrorList(form.non_field_errors()),
self.submit_btn,
CSRFInput(view.request),
enctype='multipart/form-data',
method='POST',
cls='form',
),
cls='card'
)
def py2js(self):
file_input = document.querySelector('#file_input')
file_input.submit_btn_id = self.submit_btn_id
file_input.addEventListener('change', self.enable_post)
def enable_post(event):
file_input = document.querySelector('#file_input')
file_name = file_input.value
btn = getElementByUuid(file_input.submit_btn_id)
btn.disabled = file_name == ''
@template('contest_decrypt', Document, Card)
class ContestDecryptCard(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
return super().to_html(
H4(_('Open ballot box'), cls='center-text'),
Div(
P(_('This process will erase all guardian keys from server memory.')),
cls='center-text body-2'),
Form(
context['form']['email_title'],
context['form']['email_message'],
MDCMultipleChoicesCheckbox(
'send_email',
((0, B(_('Do not alert voters by email')), 'true'),),
n=1
),
CSRFInput(view.request),
MDCButton(_('open and view results')),
method='POST',
cls='form'),
cls='card',
)
@template('contest_publish', Document, Card)
class ContestPublishCard(Div):
def to_html(self, *content, view, form , **ctx):
return super().to_html(
H4(_('Publish your election results'), cls='center-text'),
Div(
P(_('This will decentralize your election results.')),
cls='center-text body-2'),
Form(
CSRFInput(view.request),
Div(
MDCButton(_('publish results')),
style='width: fit-content; margin: 0 auto;'
),
method='POST',
cls='form'),
cls='card',
)
class PublishProgressBar(Div):
def __init__(self, _steps, step=0):
self.nsteps = len(_steps)
self.step = step
steps = [
Span(
cls=f'progress-step progress-step--disabled',
**{'data-step': s})
for s in range(0, self.nsteps)
]
if 0 <= step < self.nsteps:
steps[step].attrs['class'] += ' progress-step--active'
super().__init__(
MDCLinearProgress(),
Div(
*steps,
cls='progress-bar__steps'
),
Span(_steps[step], cls='center-text overline'),
cls='progress-bar',
style='margin: 24px auto'
)
def set_progress(current_step, total_steps):
bar_container = document.querySelector('.progress-bar')
bar = bar_container.querySelector('.mdc-linear-progress')
mdcbar = new.mdc.linearProgress.MDCLinearProgress(bar)
bar.MDCLinearProgress = mdcbar
def step(step):
progress = step / (total_steps - 1)
steps = bar_container.querySelectorAll('.progress-step')
for n in range(total_steps):
s = steps.item(n)
if s.dataset.step > step:
s.classList.remove('progress-step--active')
s.classList.add('progress-step--disabled')
elif s.dataset.step == step:
s.classList.remove('progress-step--disabled')
s.classList.add('progress-step--active')
else:
s.classList.remove('progress-step--active')
s.classList.remove('progress-step--disabled')
bar.MDCLinearProgress.foundation.setProgress(progress)
bar.setStep = step
bar.setStep(current_step)
def py2js(self):
self.set_progress(self.step, self.nsteps)
class ArtifactsLinks(Div):
def __init__(self, contest):
links = Div(style=dict(display='flex', flex_flow='row nowrap', justify_content='space-around'))
if contest.electioncontract.blockchain.explorer:
links.addchild(
Div(
A(_('Election report'), href=contest.electioncontract.explorer_link),
Br(),
_('On Tezos\' blockchain'),
style=dict(text_align='center', color='#888', margin='12px')
)
)
if contest.plaintext_tally:
links.addchild(
Div(
A(_('Election datas'), href=contest.artifacts_local_url),
Br(),
_('Local data'),
style=dict(text_align='center', color='#888', margin='12px')
)
)
if contest.artifacts_ipfs_url:
links.addchild(
Div(
A(_('Election datas'), href=contest.artifacts_ipfs_url),
Br(),
_('On IPFS, decentralized'),
style=dict(text_align='center', color='#888', margin='12px')
)
)
super().__init__(links, style=dict(margin_top='32px'))
@template('contest_result', Document, Card)
class ContestResultCard(Div):
def to_html(self, *content, view, **context):
contest = view.get_object()
self.backlink = BackLink(
_('back'),
reverse('contest_detail', args=[contest.id]))
votes = contest.candidate_set.aggregate(total=Sum('score'))
table_head_row = Tr(cls='mdc-data-table__header-row')
kwargs = dict(
role='columnheader',
scope='col',
cls='mdc-data-table__header-cell overline'
)
table_head_row.addchild(Th('candidate', **kwargs))
kwargs['style'] = 'text-align: right;'
table_head_row.addchild(Th('votes', **kwargs))
table_content = Tbody(cls='mdc-data-table__content')
cls = 'mdc-data-table__cell'
for i, candidate in enumerate(contest.candidate_set.order_by('-score')):
num = f'{i + 1}. '
if votes['total']:
score_percent = 100 * candidate.score / votes['total']
score_percent = f'{round(score_percent, 2)} %'
else:
score_percent = '--'
table_content.addchild(
Tr(
Td(
num + candidate.name,
cls=cls,
style='word-break: break-all; white-space: normal'
),
Td(
Span(f'{candidate.score}', cls='body-2'),
Span(f' {score_percent}', cls='text-btn'),
style='text-align: right'),
cls='mdc-data-table__row'))
score_table = Table(
Thead(table_head_row),
table_content,
**{
'class': 'mdc-data-table__table',
'aria-label': 'Scores'
}
)
publish_btn = ''
if (
contest.publish_state == contest.PublishStates.ELECTION_DECRYPTED
and contest.mediator == view.request.user
):
publish_btn = MDCButton(
_('publish results'),
p=True,
icon=WorldIcon(),
tag='a',
href=reverse('contest_publish', args=[contest.id]),
style='margin: 0 auto;')
about = mark_safe(escape(contest.about).replace('\n', '<br>'))
return super().to_html(
H4(_('Results'), cls='center-text'),
Div(
H5(contest.name, style='word-break: break-all'),
Div(
about,
style='padding: 12px; word-break: break-all;',
cls='subtitle-2'
),
publish_btn,
score_table,
cls='table-container score-table center-text'
),
ArtifactsLinks(contest),
cls='card',
)
class GuardianDeleteBtn(A):
def __init__(self, guardian):
self.guardian = guardian
super().__init__(
MDCIcon(
'delete',
cls='delete-icon'),
tag='a',
href=reverse('contest_guardian_delete', args=[guardian.id]))
@template('guardian_create', Document, Card)
class GuardianCreateCard(Div):
def to_html(self, *content, view, form, **context):
contest = view.get_object()
self.backlink = BackLink(_('back'), reverse('contest_detail', args=[contest.id]))
table_head_row = Tr(cls='mdc-data-table__header-row')
for th in ('guardians', ''):
table_head_row.addchild(
Th(
th,
role='columnheader',
scope='col',
cls='mdc-data-table__header-cell overline',
)
)
table_content = Tbody(cls='mdc-data-table__content')
cls = 'mdc-data-table__cell'
for guardian in contest.guardian_set.all():
activated = guardian.user and guardian.user.is_active
table_content.addchild(Tr(
Td(guardian.user.email, cls=cls),
Td(
GuardianDeleteBtn(guardian),
cls=cls,
style='text-align:right'),
cls='mdc-data-table__row',
style='opacity: 0.5;' if not activated else '',
))
table = Table(
Thead(table_head_row),
table_content,
**{
'class': 'mdc-data-table__table',
'aria-label': _('Voters')
}
)
return super().to_html(
H4(_('Add guardians'), cls='center-text'),
Div(
_('Guardians are responsible for locking and unlocking of the ballot box with their private keys.'),
cls='center-text body-1'
),
Div(
B(_('No guardians for speed and simplicity (default).')),
_(' Electis App will technically be your guardian and can secure your ballot box.'),
cls='red-section'),
Div(
B(_('GUARDIAN_HELP_TEXT')),
cls='red-section'),
Form(
form['email'],
CSRFInput(view.request),
MDCButtonOutlined(_('add guardian'), p=False, icon='person_add'),
table,
Div(
form['quorum'],
Span(
MDCButton(_('Save')),
style='margin: 32px 12px'),
style='display:flex;'
'flex-flow: row nowrap;'
'justify-content: space-between;'
'align-items: baseline;'),
method='POST',
cls='form'
),
cls='card'
)
```
#### File: Baloti/djelectionguard/test_models.py
```python
import pytest
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ValidationError
User = apps.get_model(settings.AUTH_USER_MODEL)
@pytest.mark.django_db
def test_manifest(contest, manifest):
assert contest.get_manifest() == manifest
```
#### File: Baloti/djelectionguard_tezos/test_views.py
```python
import pytest
from django.utils import timezone
from djtezos.models import Blockchain
from electeez_auth.models import User
from djelectionguard.models import Contest
@pytest.mark.django_db
def test_create_contract(client):
user = User.objects.create(email='<EMAIL>')
client.force_login(user)
blockchain = Blockchain.objects.create(
name='tzlocal',
provider_class='djtezos.tezos.Provider',
confirmation_blocks=1,
is_active=True,
endpoint='http://tz:8732',
)
account = user.account_set.create(blockchain=blockchain)
election = Contest.objects.create(
mediator=user,
start=timezone.now(),
end=timezone.now(),
)
response = client.post(
f'/en/tezos/{election.pk}/create/',
data=dict(blockchain=str(blockchain.pk)),
)
assert response.status_code == 302
assert response['Location'] == f'/en/contest/{election.pk}/'
```
#### File: Baloti/djlang/views.py
```python
import json
from django.http import HttpResponse
from django.urls import path
from .models import Text
def text_view(request):
data = list(Text.objects.order_by('id', 'key').distinct('id', 'key').values())
return HttpResponse(json.dumps(data, ensure_ascii=False), content_type='application/json')
urlpatterns = [
path('', text_view, name='text')
]
```
#### File: Baloti/electeez_auth/views.py
```python
import textwrap
from django import forms
from django import http
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.backends import BaseBackend
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.views import generic
from django.utils import timezone
from django.urls import include, path, reverse
from django_registration.forms import RegistrationForm
from django_registration.backends.activation.views import RegistrationView
from electeez_common.components import Document, TopPanel, Footer
from .models import Token, User
from django.conf import settings
from djlang.utils import gettext as _
class OTPSend(generic.FormView):
template_name = 'electeez_auth/otp_send.html'
class form_class(forms.Form):
email = forms.EmailField()
submit_label = _('Send magic link')
def clean_email(self):
value = self.cleaned_data['email']
self.user = User.objects.filter(
email__iexact=value
).first()
if not self.user:
raise ValidationError(
_('Could not find registration with email: %(email)s', email=value)
)
return value
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['initial'] = dict(
email=self.request.GET.get('email', '')
)
return kwargs
def form_valid(self, form):
LINK = form.user.otp_new(
redirect=self.request.GET.get('redirect', None)
).url
send_mail(
_('Your magic link'),
textwrap.dedent(str(_('''
Hello,
This is the magic link you have requested: %(link)s
''', allow_insecure=True, link=LINK)
)),
settings.DEFAULT_FROM_EMAIL,
[form.cleaned_data['email']],
)
messages.success(self.request, _('Link sent by email'))
redirect = self.request.GET.get(
'next',
reverse('otp_email_success'),
)
return http.HttpResponseRedirect(redirect)
class OTPEmailSuccess(generic.TemplateView):
template_name = 'electeez_auth/otp_email_success.html'
class OTPLogin(generic.FormView):
template_name = 'electeez_auth/otp_login.html'
form_class = forms.Form
def post(self, request, *args, **kwargs):
token = Token.objects.filter(token=kwargs['token']).first()
if not token:
messages.success(request, _('Invalid magic link.'))
return http.HttpResponseRedirect(reverse('otp_send'))
if token.used or token.expired:
redirect = reverse('otp_send') + '?redirect=' + token.redirect
if token.used:
messages.success(request, _('Magic link already used.'))
return http.HttpResponseRedirect(redirect)
else:
messages.success(request, _('Expired magic link.'))
return http.HttpResponseRedirect(redirect)
token.used = timezone.now()
token.save()
login(request, token.user)
messages.success(request, _('You have been authenticated.'))
return http.HttpResponseRedirect(
request.GET.get(
'next',
token.redirect or reverse('contest_list'),
)
)
```
#### File: Baloti/electeez_common/urls.py
```python
from django import http
from django.contrib import admin, messages
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls import url
from django.conf.urls.static import static
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
from django.urls import include, path, reverse
from django.utils.decorators import method_decorator
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.templatetags.static import static as static_url
from djlang.utils import gettext as _
urlpatterns = [
url(r'^favicon\.ico$', generic.RedirectView.as_view(url='/static/images/favicon.ico')),
]
@method_decorator(csrf_exempt, name='dispatch')
class HomeView(generic.TemplateView):
def dispatch(self, request, *args, **kwargs):
url = reverse('login')
if request.user.is_authenticated:
url = reverse('contest_list')
elif home_page := getattr(settings, 'STATIC_HOME_PAGE', None):
url = static_url(home_page)
elif template := getattr(settings, 'HOME_TEMPLATE', None):
self.template_name = template
return super().dispatch(request, *args, **kwargs)
return http.HttpResponseRedirect(url)
def post(self, request, *args, **kwargs):
if 'inputEmail' in request.POST:
messages.success(
request,
_('You have successfully subscribed to %(app)s mailing list',
app=_('Neuilly Vote')
)
)
return self.get(request, *args, **kwargs)
urlpatterns += i18n_patterns(
path('admin/', admin.site.urls),
path('accounts/', include('electeez_auth.urls')),
path('contest/', include('djelectionguard.urls')),
path('tezos/', include('djelectionguard_tezos.views')),
path('track/', include('djelectionguard_tracker.views')),
path('lang/', include('djlang.views')),
path('', HomeView.as_view(), name='home'),
)
if settings.DEBUG:
urlpatterns.append(
path('bundles/', include('ryzom_django.bundle')),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns('/static/')
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG:
import debug_toolbar
urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]
```
#### File: Baloti/electeez_sites/models.py
```python
from django.conf import settings
from django.contrib.sites.models import Site, SiteManager
from django.db import models
class SiteManager(SiteManager):
def get_current(self):
if settings.DEBUG:
self.clear_cache()
return super().get_current()
class Site(Site):
contact_email = models.EmailField(default='<EMAIL>')
sender_email = models.EmailField(default='<EMAIL>')
all_users_can_create = models.BooleanField(default=True)
all_results_are_visible = models.BooleanField(default=True)
footer_url = models.CharField(max_length=255, default='https://electis.app')
objects = SiteManager()
``` |
{
"source": "joneswong/AutoGraph",
"score": 3
} |
#### File: code_submission/algorithms/gnn_tricks.py
```python
import torch
from torch_geometric.utils import degree
# todo (daoyuan): add pair_norm and batch_norm
class GraphSizeNorm(torch.nn.Module):
def __init__(self):
super(GraphSizeNorm, self).__init__()
def forward(self, x, batch=None):
if batch is None:
batch = torch.zeros(x.size(0), dtype=torch.long, device=x.device)
inv_sqrt_deg = degree(batch, dtype=x.dtype).pow(-0.5)
return x * inv_sqrt_deg[batch].view(-1, 1)
```
#### File: code_submission/early_stoppers/constant_stopper.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from early_stoppers import Stopper
class ConstantStopper(Stopper):
def __init__(self, max_step=800):
self._max_step = max_step
super(ConstantStopper, self).__init__()
def should_early_stop(self, train_info, valid_info):
self._cur_step += 1
return self._cur_step >= self._max_step
``` |
{
"source": "jonesy-git/pytorch-binance",
"score": 3
} |
#### File: jonesy-git/pytorch-binance/binance.py
```python
import requests
from functools import cache
class Binance(object):
_valid_intervals = ['S', 'm', 'H', 'd']
_kline_column_names = ['OpenTime', 'Open', 'High', 'Low', 'Close', 'Volume', 'CloseTime', 'QuoteAssetVolume',
'NumberOfTrades', 'TakerBuyBaseAssetVolume', 'TakerBuyQuoteAssetVolume', 'Ignore']
def __init__(self, cachedir='./cache'):
self.url_base = 'https://www.binance.com/api/v3/'
self.endpoints = {
'klines': 'klines',
'exchangeInfo': 'exchangeInfo',
}
self.cache_dir = cachedir
os.makedirs(self.cache_dir, exist_ok=True)
self.symbols_path = os.path.join(self.cache_dir, 'symbols.csv')
self.symbols = self.get_symbols()
@staticmethod
def make_query_string(**kwargs):
if kwargs:
return '?' + '&'.join([f'{key}={value}' for key, value in kwargs.items() if value is not None])
else:
return ''
def get_klines(self, symbol, interval, time_start=None, time_end=None, limit=None):
url = self.url_base + self.endpoints['klines']
url += self.make_query_string(
symbol=symbol,
interval=interval,
startTime=time_start,
endTime=time_end,
limit=limit
)
data = self.get(url, f"Couldn't get klines for {symbol} (url={url!r}): ")
return pd.DataFrame(data, columns=self._kline_column_names).drop('Ignore', 1)
def get_symbols(self):
if os.path.isfile(self.symbols_path):
return pd.read_csv(self.symbols_path)
url = self.url_base + self.endpoints['exchangeInfo']
exchange_info = self.get(url, f"Couldn't get exchange info (url={url!r}): ")
symbol_dict = lambda s: {'name': s['symbol'], 'base': s['baseAsset'], 'quote': s['quoteAsset']}
symbols = pd.DataFrame.from_records([symbol_dict(s) for s in exchange_info['symbols']])
symbols.to_csv(self.symbols_path, index=False)
return symbols
def coins_to_symbol(self, A, B):
coins = (A, B)
match = (self.symbols.base == A) & (self.symbols.quote == B)
match |= (self.symbols.base == B) & (self.symbols.quote == A)
if match.sum() != 1:
raise SymbolNotFoundError(f"Symbol found {match.sum().item()} matches for coins {coins}")
return self.symbols[match]
def symbol_to_coins(self, symbol):
return self.symbols.set_index('name').loc[symbol]
def assert_coin_exists(self, coin):
if coin not in self.symbols.base.values and coin not in self.symbols.quote.values:
raise CoinNotFoundError(f"Couldn't find coin: {coin!r}")
@classmethod
def check_times(cls, time_start, time_end):
time_start, time_end = as_timestamp(time_start), as_timestamp(time_end)
if not time_end > time_start:
time_start, time_end = as_datetime(time_start), as_datetime(time_end)
raise ValueError(f"{time_end = } is not greater than {time_start = }")
return time_start, time_end
def get(self, url, msg=''):
response = requests.get(url)
if response.status_code != 200:
raise requests.RequestException(msg + response.text)
return response.json()
@cache
def get_exchange(self, symbol, time_interval, **kwargs):
return Exchange(self.symbol_to_coins(symbol), time_interval, binance=self, **kwargs)
def get_fiat_exchanges(self, exchange, fiat='EUR', **kwargs):
exchange_kws = exchange.kwargs.copy()
exchange_kws.update(kwargs)
return [self.get_exchange((coin, fiat), **exchange_kws) for coin in exchange.coins]
class Exchange(object):
def __init__(self, coins, time_interval='10m', time_start=None, time_end=None, binance=None, fiat='EUR'):
self.binance = binance or Binance()
self.coins = coins
self.symbol = self.binance.coins_to_symbol(*coins)
self.coins = self.base, self.quote = self.binance.symbol_to_coins(self.symbol)
self.time_interval, self.time_start, self.time_end = time_interval, time_start, time_end
self.fiat = fiat
self.data = self.collect_data()
def buy_batch(self, amounts, timestamp):
data = self.data[self.data.index >= timestamp]
return [self.buy(a, timestamp, data) for a in amounts]
def buy(self, amount, timestamp, data=None):
if data is None:
data = self.data[self.data.index >= timestamp]
payed = 0
for i, row in data.iterrows():
if amount > 0:
buy = min(amount, row.Volume)
payed += buy * row.AvgPrice
amount -= buy
else:
break
if amount:
raise OrderNotFilled(f'No more {self.base} to buy in {self.symbol}')
return payed
def sell(self, amount, timestamp):
data = self.data[self.data.index >= timestamp]
bought = 0
for i, row in data.iterrows():
if amount > 0:
sell = min(amount, row.QuoteAssetVolume)
bought += sell * row.AvgPrice
amount -= sell
else:
break
if amount:
raise OrderNotFilled(f'No more {self.base} to buy in {self.symbol}')
return bought
def collect_data(self, time_interval, time_start, time_end):
data = self.binance.get_klines(self.symbol.name.item(),
interval=time_interval,
time_start=time_start,
time_end=time_end)
fiat_base, fiat_quote = self.binance.get_fiat_exchanges(self, self.fiat)
data[self.base+self.fiat] = fiat_base.data[['Open', 'Close']].mean(-1)
data[self.quote+self.fiat] = fiat_quote.data[['Open', 'Close']].mean(-1)
data['AvgPrice'] = data[['Open', 'Close']].mean(-1)
return data.set_index('OpenTime')
@property
def kwargs(self):
return {'time_start': self.time_start,
'time_end': self.time_end,
'time_interval': self.time_interval,
'binance':self.binance,
'fiats': self.fiats,
'base_currency': self.base_currency}
def __repr__(self):
return f"{type(self).__name__}({self.symbol.base.item()!r}, {self.symbol.quote.item()!r}, " \
f"time_start={self.time_start}, time_end={self.time_end}, binance={self.binance})"
# Errors
class SymbolNotFoundError(RuntimeError):
pass
class CoinNotFoundError(RuntimeError):
pass
class OrderNotFilled(RuntimeError):
pass
def as_timestamp(dt):
if isinstance(dt, (int, float)):
return dt
return dt.timestamp() * 1000
def as_datetime(ts):
if isinstance(ts, datetime.datetime):
return ts
return datetime.datetime.fromtimestamp(ts / 1000)
```
#### File: jonesy-git/pytorch-binance/datasets.py
```python
from binance import Binance, as_timestamp
import datetime
import torch
binance = Binance
DEFAULT_START = int(datetime.datetime(2016, 10, 23).timestamp() * 1000)
DEFAULT_END = int(datetime.datetime(2018, 10, 23).timestamp() * 1000)
DEFAULT_INTERVAL = '1d'
def get_klines(symbol, start_ts=DEFAULT_START, end_ts=DEFAULT_END, interval=DEFAULT_INTERVAL):
data = binance.get_klines(symbol, interval, time_start=start_ts)
if data.index[-1] < data.index[0]:
raise RuntimeError(f"Times go from present to past")
else:
raise RuntimeError(f"Times go from past to present")
end = max([data.index[0], data.index[-1]])
if end < end_ts:
return pd.concat([data, get_klines(symbol, start_ts=end, end_ts=end_ts, interval=interval)])
else:
return data[data.CloseTime <= end_ts]
class ExchangeDataset(torch.utils.data.Dataset):
def __init__(self, symbol, klines, targets=None, n_klines=10):
self.symbol = symbol
self.klines = torch.Tensor(klines)
self.n_klines = n_klines
msg = f"Expect more klines per timestep ({self.n_klines} than have klines at all ({len(self.klines)})"
assert(len(self.klines) > self.n_klines), msg
self.targets = targets if targets is None else torch.Tensor(targets)
msg = f"Have unequal numbers of kline windows ({len(self)}) and targets {len(self.targets)}"
assert(self.targets is None or len(self.targets) == len(self)), msg
def __len__(self):
return len(self.klines) - self.n_klines
def __getitem__(self, i):
if self.targets is not None:
return self.klines[i:i+self.n_klines], self.targets[i]
else:
return self.klines[i:i+self.n_klines]
def compute_min_price_difference(klines, evaluation_window=30, position=0.5):
min_prices = klines['Open'].rolling(window=evaluation_window).loc[round(evaluation_window * position)]
targets = (klines['Open'] - min_prices) / klines['Open'].std()
not_nan = targets.isnull() == False
return klines[not_nan], targets[not_nan]
if __name__ == "__main__":
klines = get_klines('ETHBTC', interval='1d')
klines, targets = compute_min_price_difference(klines)
ds = ExchangeDataset(klines, targets)
``` |
{
"source": "Jonesywolf/You-Verify",
"score": 3
} |
#### File: Jonesywolf/You-Verify/env_utils.py
```python
from dotenv import dotenv_values
import re
def load_dotenv(filename=".env"):
print("Loading .env")
return dotenv_values(filename)
def is_val_true(env_contents, key):
return env_contents[key].lower() in ('true', '1', 'yes', 'y', 't')
def is_headless_mode(env_contents):
return is_val_true(env_contents, "HEADLESS_MODE")
def get_browser(env_contents):
return env_contents["BROWSER"].upper()
def get_creds(env_contents):
creds = {}
try:
creds = {
"user" : env_contents["USERNAME"],
"pass" : env_contents["PASSWORD"]
}
# Clear sensitive info
del env_contents["USERNAME"]
del env_contents["PASSWORD"]
except KeyError:
print("Missing UTORID credentials in .env")
return creds
def is_valid_gmail(gmail_addr):
return re.fullmatch(r"\b[A-Za-z0-9._%+-][email protected]\b", gmail_addr)
def load_gmail_addr(env_contents):
try:
gmail_addr = env_contents["GMAIL"]
if not is_valid_gmail(gmail_addr):
print("Invalid gmail address in .env")
except KeyError:
print("Missing GMAIL address in .env")
return gmail_addr
def load_gmail_app_pass(env_contents):
try:
gmail_app_pass = env_contents["GMAIL_APP_PASS"]
except KeyError:
gmail_app_pass = None
print("No gmail app password detected, using keyring instead.")
return gmail_app_pass
``` |
{
"source": "JonETJakobsson/FlowSightpy",
"score": 3
} |
#### File: FlowSightpy/FlowSightpy/dataset.py
```python
def load(datasets, skiprows=3):
'''Loads and merge all datasets
datasets: a dictionary with names of datasts as keys, and file locations as values.
return: pandas.DataFrame'''
from functools import reduce
import pandas as pd
dflist = list()
for name, f in datasets.items():
df = pd.read_csv(f, sep="\t", skiprows=skiprows, index_col=0, decimal=",")
df["experiment"] = name
df["old_index"] = df.index
path = f.split("/")
path.pop()
path = "/".join(path)
df["path"] = path
dflist.append(df)
df = reduce(lambda x, y: pd.merge(x, y, how="outer"), dflist)
return df
def to_adata(df, used_channels):
'''Convert dataframe to an AnnData dataset with non flourescent channels as features.
All other channels are added as observations.
Used_channels: describe which channels tif files have been exported for. These files should be stored under data/experiment/
'''
import numpy as np
from sklearn.preprocessing import minmax_scale
import scanpy as sc
# Scale and normalize most features so they can be used with PCA
df1 = df.copy()
for column in df.columns:
if "Area" in column:
df1[column] = np.log1p(df[column])
if "Bright Detail Intensity" in column:
df1[column] = np.log1p(df[column])
if "Bkgd Mean" in column:
df1[column] = minmax_scale(df[column])
if "Contrast" in column:
df1[column] = np.log1p(df[column])
if "Length" in column:
df1[column] = np.log1p(df[column])
if "Width" in column:
df1[column] = np.log1p(df[column])
if "Height" in column:
df1[column] = np.log1p(df[column])
if "Mean" in column:
df1[column] = np.log1p(minmax_scale(df[column]))
if "Median" in column:
df1[column] = np.log1p(minmax_scale(df[column]))
# save only non flourescent features (M01 and M06) to characterize the cells (keep the flourescent info for later)
col_keep = [col for col in df1.columns if ("M01" in col or "M06" in col or "Ch01" in col or "Ch06" in col) and "Intensity" not in col]
# Filter out all corrected features
df_f = df1.filter(col_keep, axis=1)
# Create adata with propper column and index + obs annotations
adata = sc.AnnData(X=df_f.values)
adata.var_names = df_f.columns
adata.obs_names = df_f.index
adata.obs = df1
adata.obs["experiment"] = df["experiment"]
adata.obs["old_index"] = df["old_index"]
for ch in used_channels:
adata.obs[ch] = [f"{e[1]['path']}/{e[1]['old_index']}_{ch}.ome.tif" for e in adata.obs.iterrows()]
return adata
``` |
{
"source": "JonETJakobsson/SpatialOmics",
"score": 2
} |
#### File: SpatialOmics/scSpatial/segmentation.py
```python
from cellpose import models
import pandas as pd
import numpy as np
from skimage import measure
from typing import Tuple
#Import only for type hinting
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .dataset import Dataset
MAX_OBJECTS_SIZE = 3000 # max size of objects image when downsampeling
class Segmentation:
_id = 0
def __init__(self, dataset: "Dataset", type: str, settings: dict = dict(), objects: np.ndarray = None):
self.set_id()
self.dataset = dataset
self.objects = objects
self.type = type
self.settings = settings
self.gene_expression: pd.DataFrame = None
self.background: pd.Series = None
self.pct_mapped_genes: pd.Series = None
self.object_coverage: float = None
self.cell_types: pd.DataFrame = None
self.downsampled: Tuple[np.ndarray, float] = None # used for gene visualization of large images
self.run()
if isinstance(self.dataset.gene_expression, pd.DataFrame):
self.map_genes()
self.calculate_object_coverage()
self.calculate_object_features()
self.downsample()
self.dataset.add_segmentation(self)
def set_id(self):
"""Run to set next available ID of segmentation"""
# Set unique ID
self.id = Segmentation._id
Segmentation._id += 1
def __repr__(self):
return f"id:{self.id} type:{self.type}, settings:{self.settings}"
def run(self):
"""Algorithm used to find objects"""
pass
def calculate_object_coverage(self):
"""Calculate percent of image covered in objects"""
object_pixels = sum(sum(self.objects > 0))
self.object_coverage = object_pixels / (object_pixels + self.objects.size)
def calculate_object_features(self):
features: pd.DataFrame = pd.DataFrame(measure.regionprops_table(
self.objects,
properties=[
"label",
"centroid",
"area",
"equivalent_diameter_area"]
))
self.object_features = features
def map_genes(self):
"""map genes to segmented objects.
self.gene_expression: number of genes mapped to each cell
self.background: number of genes mapped to backgound
self.pct_mapped_genes: percent of induvidual genes mapped to cells"""
gene_map = list()
for i, gene in self.dataset.gene_expression.iterrows():
object_id = self.objects[int(gene.y), int(gene.x)]
gene_map.append((gene.gene, object_id, 1))
df = pd.DataFrame(gene_map, columns=["gene", "object_id", "value"])
df = df.pivot_table(
index="object_id", columns="gene", values="value", fill_value=0, aggfunc=sum
)
# Store genes mapping to objects
self.gene_expression = df.iloc[1:]
# Store genes mapping to background
self.background = df.iloc[0]
# Calculate percent of genes mapped to cells
self.pct_mapped_genes = self.gene_expression.sum() / (self.gene_expression.sum() + self.background)
# broadcast that genes are mapped
self.dataset.com.genes_mapped.emit()
def add_cell_types(self, cell_types: pd.DataFrame):
self.cell_types = cell_types
self.dataset.com.cell_types_changed.emit()
def downsample(self):
objects = self.objects.copy()
scale = 1.0
while objects.shape[0] > MAX_OBJECTS_SIZE or objects.shape[1] > MAX_OBJECTS_SIZE:
# half the resulotion each run
objects = objects[::2, ::2]
scale += scale
self.downsampled = (objects, scale)
class segmentNuclei(Segmentation):
"""Segment an image base on nuclei signal
Stores segmentation under self.objects"""
def __init__(self, dataset, size=70, flow_threshold=0.4, mask_threshold=0):
# set attributes
self.settings = dict(
size=size, flow_threshold=flow_threshold, mask_threshold=mask_threshold
)
self.size = size
self.flow_threshold = flow_threshold
self.mask_threshold = mask_threshold
super().__init__(dataset=dataset, type="Cellpose - Nuclei")
def run(self):
model = models.Cellpose(model_type="nuclei")
masks, flows, styles, diams = model.eval(
self.dataset.images["Nuclei"],
diameter=self.size,
flow_threshold=self.flow_threshold,
mask_threshold=self.mask_threshold,
)
self.objects = masks
class segmentCytoplasm(Segmentation):
"""Segment an image base on nuclei and cytoplasm signal
Stores segmentation under self.objects"""
def __init__(self, dataset, size=120, flow_threshold=0.4, mask_threshold=0):
# set attributes
self.settings = dict(
size=size, flow_threshold=flow_threshold, mask_threshold=mask_threshold
)
self.size = size
self.flow_threshold = flow_threshold
self.mask_threshold = mask_threshold
super().__init__(dataset=dataset, type="Cellpose - Cytoplasm")
def run(self):
"""segment image using nuclei information"""
import numpy as np
n = self.dataset.images["Nuclei"]
c = self.dataset.images["Cytoplasm"]
# Stack nuclei and cytoplasm images into a channel image
arr = np.dstack((n, c))
model = models.Cellpose(model_type="cyto")
masks, flows, styles, diams = model.eval(
x=arr,
channels=[2, 1],
diameter=self.size,
flow_threshold=self.flow_threshold,
mask_threshold=self.mask_threshold,
)
self.objects = masks
```
#### File: scSpatial/widgets/analysis.py
```python
import plotly.express as px
import pandas as pd
import imageio
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
from PyQt5.QtWidgets import (
QApplication,
QComboBox,
QFileDialog,
QFormLayout,
QGridLayout,
QHBoxLayout,
QLabel,
QListWidget,
QPushButton,
QSlider,
QVBoxLayout,
QWidget,
QTableWidget,
QTableWidgetItem)
from PyQt5.QtWebEngineWidgets import QWebEngineView
import sys
from ..dataset import Dataset
from ..viewer import Viewer
from ..analysis import Bonefight
class analysisWidget(QWidget):
"""Widget used to run different analysis methods.
BoneFight
"""
def __init__(self, dataset: Dataset, viewer: Viewer):
super().__init__()
self.dataset = dataset
self.viewer = viewer
self.initUI()
def initUI(self):
self.layout = QVBoxLayout(self)
self.reference_btn = QPushButton("Select a reference dataset")
self.reference_btn.clicked.connect(self.read_reference_dataset)
self.groupby_combo = QComboBox(self)
self.groupby_combo.currentTextChanged.connect(self.show_obs_example)
self.obs_example_list = QListWidget(self)
self.bonefight_btn = QPushButton("Run BoneFight analysis")
self.bonefight_btn.clicked.connect(self.run_bonefight_analysis)
self.bonefight_btn.setEnabled(False)
self.layout.addWidget(self.reference_btn)
self.layout.addWidget(QLabel("Select observation key:"))
self.layout.addWidget(self.groupby_combo)
self.layout.addWidget(self.obs_example_list)
self.layout.addWidget(QLabel("BoneFight analysis"))
self.layout.addWidget(self.bonefight_btn)
self.layout.addStretch()
self.setLayout(self.layout)
def read_reference_dataset(self):
import scanpy as sc
path = QFileDialog.getOpenFileName(self, caption="Select reference dataset")[0]
self.reference_adata = sc.read(path)
if len(self.reference_adata.obs_keys()) > 0:
self.groupby_combo.addItems(self.reference_adata.obs_keys())
else:
print("Error: reference dataset must contain atleast one observation")
def show_obs_example(self, key: str):
from random import sample
self.obs_example_list.clear()
example_list = sample(list(set(self.reference_adata.obs[key])), k=10)
self.obs_example_list.addItems([str(example) for example in example_list])
self.bonefight_btn.setEnabled(True)
def run_bonefight_analysis(self):
# Instantiate the bonefight object
bf_model = Bonefight(
segmentation=self.dataset.active_segmentation,
reference=self.reference_adata,
groupby=self.groupby_combo.currentText(),
)
cell_types = bf_model.transfer_labels()
self.dataset.active_segmentation.add_cell_types(cell_types)
``` |
{
"source": "joneug/sms-gateway",
"score": 2
} |
#### File: sms_gateway/background_threads/sms_sender.py
```python
import logging
import traceback
import sms_gateway.config as config
def send_sms():
while True:
data = config.SMS_QUEUE.get()
try:
sms_message = {
'Number': data[0],
'Text': data[1],
'SMSC': {'Location': 1}
}
config.SM.SendSMS(sms_message)
logging.info(f'Successfully sent SMS message to {data[0]}')
except Exception:
logging.error(f"Error while sending SMS message '{data[1]}' to {data[0]}\n{traceback.format_exc()}")
```
#### File: sms-gateway/sms_gateway/__main__.py
```python
import argparse
import logging
import secrets
import threading
import time
from wsgiref.simple_server import make_server
import falcon
from falcon_auth import FalconAuthMiddleware, TokenAuthBackend
import sms_gateway.config as config
from .background_threads.sms_sender import send_sms
from .controllers.controller_utils import user_loader
from .controllers.sms import SMS
auth_backend = TokenAuthBackend(user_loader)
auth_middleware = FalconAuthMiddleware(auth_backend)
api = falcon.API(middleware=[auth_middleware])
sms = SMS()
api.add_route('/sms', sms)
parser = argparse.ArgumentParser()
parser.add_argument('--loglevel', type=str, help="Sets the log level.", default='INFO')
parser.add_argument('--pin', type=str, help="Sets the PIN used to unlock the modem.", default=None)
parser.add_argument('--port', type=int, help="Sets the port the web app is listening on.", default=8000)
parser.add_argument('--token', type=str, help="Sets the authentication token.", default=None)
parser.add_argument('--device', type=str, help="Sets the device to use with gammu.", default='/dev/modem')
parser.add_argument('--whitelist', type=str, help="Sets a whitelist for the recipient numbers.", default='.+')
def main():
parse_options(parser.parse_args())
unlock_modem()
# Start background thread that sends SMS
sms_sender = threading.Thread(name="SMSSender", target=send_sms)
sms_sender.setDaemon(True)
sms_sender.start()
# Start web app
with make_server('', config.PORT, api) as httpd:
logging.info(f'Serving on port {config.PORT}...')
httpd.serve_forever()
def parse_options(options):
# Log level
if config.LOGLEVEL is None:
config.LOGLEVEL = options.loglevel
config.LOGLEVEL = config.LOGLEVEL.upper()
logging.basicConfig(level=config.LOGLEVEL, format='%(asctime)s [%(levelname)s] %(message)s')
# PIN
if config.PIN is None:
config.PIN = options.pin
if config.PIN is None or len(config.PIN) == 0:
raise Exception('PIN is required to unlock the modem')
# Port
if config.PORT is None:
config.PORT = options.port
config.PORT = int(config.PORT)
# Token
if config.TOKEN is None:
config.TOKEN = options.token
if config.TOKEN is None or config.TOKEN == '':
config.TOKEN = secrets.token_urlsafe(20)
logging.warning(f"No token set - using auto-generated token '{config.TOKEN}' instead")
# Device
if config.DEVICE is None:
config.DEVICE = options.device
# Whitelist
if config.WHITELIST is None:
config.WHITELIST = options.whitelist
def unlock_modem():
# Initialize
config.SM.SetConfig(0, {'Device': config.DEVICE, 'Connection': 'at'})
config.SM.Init()
# Unlock
status = config.SM.GetSecurityStatus()
if status is None:
logging.info('Modem is already unlocked')
elif config.SM.GetSecurityStatus() == 'PIN':
logging.info('Unlocking modem...')
config.SM.EnterSecurityCode('PIN', config.PIN)
time.sleep(10)
else:
raise Exception(f"Unexpected security status '{status}'")
if __name__ == '__main__':
main()
``` |
{
"source": "joneugster/Sitzreservation",
"score": 3
} |
#### File: Sitzreservation/utils/helpers.py
```python
import os
import logging
import random
import traceback
if __name__ != "__main__":
from .html import str2ascii
import hashlib
import pathlib
import pyqrcode
from PIL import Image
import base64
def create_bill_number(name, day):
"""Create a unique bill number.
This is of the form SA1-EUG-012
Arguments:
name (str): Last name of the person.
day (str): Day of the event
"""
# a = seat.split('_')
# p1 = a[0]+'%02d'%(int(a[1]))
# Get the next higher 3 digit number.
try:
with open('data/numeration.txt', 'r+', encoding="utf-8") as f:
current_n = int(f.read())
f.seek(0)
f.write(str(current_n + 1))
count = '{:03}'.format(current_n)
except Exception:
logging.error('Could not retrieve bill number:\n%s',
traceback.format_exc())
count = str(random.randint(600, 1000))
# Get short name of the person
short = str2ascii(name).upper()[:3]
if len(short) < 3:
short += 'X'*(3-len(short))
return '%s-%s-%s'%(day, short, count)
def create_QR(file, data='nothing.'):
# Generate the qr code and save as png
qrobj = pyqrcode.create(data)
# Create folders if they don't exist
pathlib.Path(os.path.dirname(file)).mkdir(parents=True, exist_ok=True)
with open(file, 'wb') as f:
#qrobj.png(f, scale=16)
qrobj.png(f, scale=10)
# Now open that png image to put the logo
img = Image.open(file).convert("RGBA")
width, height = img.size
# How big the logo we want to put in the qr code png
logo_size = 210 #368 # 16*23
# Open the logo image
logo = Image.open('img/TVM_logo.png')
# Calculate xmin, ymin, xmax, ymax to put the logo
xmin = ymin = int((width / 2) - (logo_size / 2))
xmax = ymax = int((width / 2) + (logo_size / 2))
# resize the logo as calculated
logo = logo.resize((xmax - xmin, ymax - ymin))
# put the logo in the qr code
img.paste(logo, (xmin, ymin, xmax, ymax), logo)
img.save(file)
def certificate(s):
hasher = hashlib.sha256()
hasher.update(s.encode('utf-8'))
# len: 32 8-bit characters
plain = int.from_bytes(hasher.digest(), 'big')
# FIXME: private key
p = 1408199503 # the primes p and q are not really
q = 906199531 # needed to be part of the key
w = 212684954476439010 # w = lcm(p-1, q-1)
e = 120820088039939 # 1 < e < w, gcd(e, w) == 1
# FIXME: public key
n = 1276109729173033093
d = 197116842892907279 # d = e^(-1) (mod w)
#validate_key(p, q, n, w, e, d)
x = pow(plain, e, n)
crypted = base64.b64encode(x.to_bytes(x.bit_length()//8 +1, 'big')).decode('utf-8')
return crypted
def verify(s, certificate):
# FIXME: public key
n = 1276109729173033093
d = 197116842892907279
hasher = hashlib.sha256()
hasher.update(s.encode('utf-8'))
expected = int.from_bytes(hasher.digest(), 'big') % n
received = pow(int.from_bytes(base64.b64decode(certificate), 'big'), d, n)
print('Expected vs. received: {} - {}'.format(expected, received))
return expected == received
def validate_key(p, q, n, w, e, d):
import numpy as np
if p*q != n:
print('pq != n')
elif w != np.lcm(p-1, q-1):
print('w != lcm')
elif e*d % w != 1:
print('ed != 1 (mod w)')
else:
print('key seems fine.')
if __name__ == '__main__':
s = """Seats: sasöldfjöadsf"""
s2 = """Seats: sasöldfjöadsf"""
m = certificate(s)
print('The certificate is: {}'.format(m))
print('Validation: {}'.format(verify(s2, m)))
``` |
{
"source": "jonev/wago-demo-plc-python",
"score": 3
} |
#### File: wago-demo-plc-python/LeakDetection/datastore.py
```python
import queue
# Stores data regarding calculations for a flow transmitter
class FtData:
def __init__(self, _tagId):
self._tagId = _tagId
self.pointsOver10 = 0
self.pointsOver20 = 0
self.pointsOver30 = 0
self.queuePointsOver10 = queue.Queue()
self.queuePointsOver20 = queue.Queue()
self.queuePointsOver30 = queue.Queue()
for i in range(24):
self.queuePointsOver10.put(0)
self.queuePointsOver20.put(0)
self.queuePointsOver30.put(0)
```
#### File: wago-demo-plc-python/LeakDetection/divcalculations.py
```python
class DivCalculations:
def __init__(self):
pass
@staticmethod
def avgValue(samples, value_column_nr):
if len(samples) < 1:
return 0
sample_sum = 0
for sample in samples:
sample_sum = sample_sum + sample[value_column_nr]
avg_value = sample_sum / len(samples)
return avg_value
```
#### File: OPCUA_MQTT_link/PLC/__main__.py
```python
from opcua import Client, ua, Node
import paho.mqtt.client as mqtt
from threading import Thread, Lock
from dotenv import load_dotenv
from OPCUA_MQTT_link.utils import (
getTagname,
getNewHash,
setTimestamp,
)
import time
import datetime
import json
import os
import logging
import copy
def buildNodeTree(pObject, nodeStore, OpcNodes):
for node in OpcNodes:
children = node.get_children()
tagname = getTagname(node)
if len(children) == 0:
if tagname.startswith("_"):
pObject[tagname] = node.get_value()
else:
nodeStore[tagname] = node
pObject[tagname] = node.get_value()
else:
nodeStore[tagname] = {}
pObject[tagname] = {}
buildNodeTree(pObject[tagname], nodeStore[tagname], children)
def getValuesFromNodes(pObject, nodeStore):
for tagname, pObje in pObject.items():
if tagname.startswith("_"):
continue
if type(pObje) is dict:
getValuesFromNodes(pObje, nodeStore[tagname])
else:
pObject[tagname] = nodeStore[tagname].get_value()
def setValuesToNodes(pObject, nodeStore):
for tagname, pObje in pObject.items():
if tagname.startswith("_"):
continue
if type(pObje) is dict:
setValuesToNodes(pObje, nodeStore[tagname])
else:
value = pObje
node = nodeStore[tagname]
if type(value) is str:
node.set_value(value)
elif type(value) is bool:
node.set_value(value)
elif type(value) is float:
node.set_value(value, varianttype=ua.VariantType.Float)
elif type(value) is int:
node.set_value(value, varianttype=ua.VariantType.Int16)
else:
raise Exception("PLC: Type not found")
def on_mqtt_connect(client, userdata, flags, rc):
logging.info("PLC: MQTT Connected with result code " + str(rc))
client.subscribe(mqttTopicSubscribeData)
def on_mqtt_disconnect(client, userdata, rc):
logging.exception("PLC: MQTT disconnecting: " + str(userdata) + ", " + str(rc))
def on_received_mqtt_message(client, userdata, msg):
global hashsLock, hashs, nodes, tags
receivedObject = json.loads(str(msg.payload, encoding="utf-8"))
# If plc receive empty objects, it send an object with values in return, immediately
tagname = receivedObject["_tagId"]
if receivedObject["_type"] == "":
logging.warning("PLC: HMI is missing data and requesting: " + tagname)
pObject = tags[tagname]
getValuesFromNodes(pObject, nodes[tagname])
newHash = getNewHash(pObject)
setTimestamp(pObject)
# Publish and save hash
mqttClient.publish(mqttTopicPublishData, payload=json.dumps(pObject))
with hashsLock:
hashs[tagname] = newHash
return
# Generate new hash
logging.warning("PLC: HMI is sending CMD: " + tagname)
newHash = getNewHash(receivedObject)
# Store hash
with hashsLock: # Sending data only on change, therefor no need to check for change
hashs[receivedObject["_tagId"]] = newHash
# Write to opc ua by setting the children recursive
setValuesToNodes(receivedObject, nodes[tagname])
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging.WARNING,
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.info("PLC: Starting OPC-UA - MQTT link")
load_dotenv()
# Global variables
tags = {}
nodes = {}
hashs = {} # Storing a hash of the object to be able to compare two objects fast
hashsLock = Lock() # hashs are used in multiple threads
publishLoopWaitTime = int(os.getenv("WAIT_TIME")) # For testing
## Opc UA
opcUaServer = os.getenv("OPC_UA_SERVER") # Host of docker "192.168.0.15"
opcUaServerUsername = os.getenv("OPC_UA_SERVER_USERNAME")
opcUaServerPassword = os.getenv("OPC_UA_SERVER_PASSWORD")
opcUaNs = int(os.getenv("OPC_UA_NS")) # Address
opcUaIdPrefix = os.getenv("OPC_UA_ID_PREFIX")
opcUaIdCounter = os.getenv("OPC_UA_ID_STATUS_COUNTER")
opcUaIdLastRun = os.getenv("OPC_UA_ID_STATUS_LAST_RUN")
opcUaIdRestartCmd = os.getenv("OPC_UA_ID_STATUS_RESTART")
## MQTT
mqttBroker = os.getenv("MQTT_BROKER")
mqttPort = int(os.getenv("MQTT_PORT"))
mqttTopicPublishData = os.getenv("MQTT_TOPIC_PUBLISHDATA")
mqttTopicSubscribeData = os.getenv("MQTT_TOPIC_SUBSCRIBEDATA")
mqttPublishPvSuffix = os.getenv(
"MQTT_PUBLISH_PV_SUFFIX"
) # Published every sample, other tags are pulished on data change
logging.info(
"PLC: Evn: OpcUaServer: "
+ opcUaServer
+ ", OpcUaIdPrefix"
+ opcUaIdPrefix
+ ", MqttBroker: "
+ mqttBroker
)
# OPC UA
opcClient = Client("opc.tcp://" + opcUaServer + ":4840", timeout=3)
opcClient.set_user(opcUaServerUsername)
opcClient.set_password(opcUaServerPassword)
# MQTT
mqttClient = mqtt.Client()
mqttClient.on_connect = on_mqtt_connect
mqttClient.on_disconnect = on_mqtt_disconnect
mqttClient.on_message = on_received_mqtt_message
# Ensure disconnecting on program close
try:
# Tries to reconnect every 10 seconds
logging.warning("PLC: Waiting 10s for e!cockpit to start")
time.sleep(10)
while True:
try:
logging.info("PLC: Connecting to Opc.")
opcClient.connect()
if opcClient is None:
raise Exception("PLC: Opc connection failed")
logging.info("PLC: OPC Connected")
nodesUnderPrefix = opcClient.get_node("ns=" + str(opcUaNs) + ";s=" + opcUaIdPrefix)
# Building tag and opc node trees, for better performance on publishing data
topLevelOpcNodes = nodesUnderPrefix.get_children()
if len(topLevelOpcNodes) == 0:
raise Exception("PLC: No tags where found")
for topLevelOpcNode in topLevelOpcNodes:
tagname = getTagname(topLevelOpcNode)
tags[tagname] = {}
tags[tagname]["_timestamp"] = {}
nodes[tagname] = {}
buildNodeTree(tags[tagname], nodes[tagname], topLevelOpcNode.get_children())
logging.info("PLC: Connecting to MQTT broker.")
mqttClient.connect(mqttBroker, mqttPort, 60)
mqttThread = Thread(target=mqttClient.loop_forever, args=())
mqttThread.start()
logging.info("PLC: Waiting 2s for MQTT to connect...")
time.sleep(2) # MQTT need time to connect
loopCounter = 0
# Read data from OPC UA and Publish data to MQTT loop
while True:
publishLoopStarttime = time.time()
loopCounter = loopCounter + 1
# OPC UA Nodes are at start -> need for restart if there are new nodes
for tagname, pObject in tags.items():
# Building python object, then converting to json before sending
getValuesFromNodes(pObject, nodes[tagname])
# Publishing
if mqttPublishPvSuffix in tagname:
setTimestamp(pObject)
mqttClient.publish(mqttTopicPublishData, payload=json.dumps(pObject))
else:
newHash = getNewHash(pObject)
setTimestamp(pObject)
if tagname in hashs:
if hashs[tagname] != newHash:
# Publish and save hash
mqttClient.publish(
mqttTopicPublishData, payload=json.dumps(pObject)
)
with hashsLock: # Threadsafe
hashs[tagname] = newHash
else:
# Publish
mqttClient.publish(
mqttTopicPublishData, payload=json.dumps(pObject)
)
# Tagname does not exist in hashs
# Save hash
with hashsLock: # Threadsafe
hashs[tagname] = newHash
logging.warning(
"Publish loop used [s]: " + str((time.time() - publishLoopStarttime))
)
node = opcClient.get_node("ns=" + str(opcUaNs) + ";s=" + opcUaIdCounter)
node.set_value(loopCounter, varianttype=ua.VariantType.Int16)
node = opcClient.get_node("ns=" + str(opcUaNs) + ";s=" + opcUaIdLastRun)
node.set_value(time.ctime())
restartCmdNode = opcClient.get_node(
"ns=" + str(opcUaNs) + ";s=" + opcUaIdRestartCmd
)
if restartCmdNode.get_value():
restartCmdNode.set_value(False)
raise Exception("PLC: Restart demanded from e!Cockpit environment")
time.sleep(publishLoopWaitTime)
except Exception:
logging.exception("PLC: Exception in connection loop.")
finally:
logging.warning("PLC: Disconnecting, then reconnecting after 10s.")
time.sleep(10)
try:
if opcClient is not None:
opcClient.disconnect()
except:
logging.exception("PLC: Exception when trying to disconnect OPC")
try:
mqttClient.disconnect()
except:
logging.exception("PLC: Exception when trying to disconnect MQTT")
except:
logging.exception("PLC: Exception in main.")
finally:
logging.warning("PLC: OPC UA - MQTT link is shutting down.")
```
#### File: wago-demo-plc-python/SimulationProgram/battery_level.py
```python
from yr.libyr import Yr
from random import randint
import random
import json
import time
class BatteryLevel:
def __init__(self):
self.solar_effect = [100.0, 100.0, 100.0] # Watt
self.solar_voltage = [24.0, 24.0, 24.0] # Voltage
self.battery_voltage = [24.0, 24.0, 24.0] # Voltage
self.battery_amphour = [20.0, 20.0, 20.0] # Ah
self.battery_start_level = [14.44, 12.32, 17.23] # Ah
self.solar_weather_factor = [1.0, 0.8, 0.3, 0.15, 0.1, 0.1]
self.battery_output = [1.0, 1.0, 1.0]
self.solar_panel_output = [0, 0, 0]
self.secounds_one_hour = 86400
self.sampling_time_sec = 2.4
self.total_samples_one_hour = self.secounds_one_hour / self.sampling_time_sec
self.battery_level = self.battery_start_level
self.symbol_equal_sun = ["Clear sky", "Fair"] # 0
self.symbol_equal_partly_cloudy = ["Partly cloudy"] # 1
self.symbol_equal_cloudy = ["Fog", "Cloudy"] # 2
self.symbol_equal_rain = [
"Light rain",
"Light rain showers", # NEW
"Light sleet",
"Light sleet showers",
"Light rain showers and thunder",
"Light sleet showers and thunder",
"Light rain and thunder",
"Light sleet and thunder",
"Rain",
"Rain showers",
"Sleet",
"Sleet showers",
"Rain showers and thunder",
"Sleet showers and thunder",
"Rain and thunder",
"Sleet and thunder",
] # 3
self.symbol_equal_storm = [
"Heavy rain showers",
"Heavy rain",
"Heavy sleet",
"Heavy sleet showers",
"Heavy rain showers and thunder",
"Heavy sleet showers and thunder",
"Heavy rain and thunder",
"Heavy sleet and thunder",
] # 4
self.symbol_equal_snow = [
"Light snow",
"Snow",
"Heavy snow",
"Light snow showers",
"Snow showers",
"Heavy snow showers",
"Light snow showers and thunder",
"Snow showers and thunder",
"Heavy snow showers and thunder",
"Light snow and thunder",
"Snow and thunder",
"Heavy snow and thunder",
] # 5
self.weather_symbol_list = [
self.symbol_equal_sun, # 0
self.symbol_equal_partly_cloudy, # 1
self.symbol_equal_cloudy, # 2
self.symbol_equal_rain, # 3
self.symbol_equal_storm, # 4
self.symbol_equal_snow, # 5
]
def importForcastTodayFromYrInJson(self, place_string):
"""Import 8todays forcast from Yr.no in json file.
Parameters:
place_string (string):
The place to get the forecast from.
Format: "Land/Fylke/Kommune/Stedsnavn/"
Example: "Norge/Trøndelag/Trondheim/Trondheim/"
Returns:
genarator:forecast
"""
weather_import = Yr(location_name=place_string)
weather_now_json = weather_import.now(as_json=True)
weather_now = json.loads(weather_now_json)
print(weather_now)
return weather_now
def symbolStringToInt(self, symbol_string):
"""Change a symbol to a symbol number. Number given from list in init.
Parameters:
symbol_string (str):
Returns:
symbol_number (int):
"""
symbol_number = None
for idx, weather_type in enumerate(
self.weather_symbol_list
): # Search list in weather_symbol_list to find weather type
if symbol_string in weather_type:
symbol_number = (
idx # If found. Symbol number is index of the list in weather_symbol_list
)
if symbol_number == None:
symbol_number = "Not found" # If not found. Symbol number is "Not found". TODO: Endre til 3 i tilfellet værtypen ikke finnes.
return symbol_number
def solarPanelOutput(self, symbol_number):
"""Calculate the solar panale output by given weathertype
Parameters:
symbol_number (int)
Returns:
solar_panel_output (list):
List of solar panel output.
Each index in the list indicate Amps from solar panel in one module.
"""
for module_number in range(0, len(self.battery_voltage)):
self.solar_panel_output[module_number] = (
self.solar_weather_factor[symbol_number] * self.solar_effect[module_number]
) # Find the effect from solarpanel given by weathertype
self.solar_panel_output[module_number] = (
self.solar_panel_output[module_number] / self.solar_voltage[module_number]
) # Find output from solarpanel in Amps
self.solar_panel_output[module_number] = round(
self.solar_panel_output[module_number] * random.uniform(0.95, 1.05), 3
) # To get a slightly varied value we multiply the result with a randomized factor
return self.solar_panel_output
def batteryOutput(self):
"""The battery output is already calculated and set as a parameter in init.
Parameters:
Returns:
battery_output (list):
List of battery_output output.
Each index in the list indicate Amps out from battery in one module
"""
for module_number in range(0, len(self.battery_voltage)):
self.battery_output[module_number] = round(
self.battery_output[module_number] * random.uniform(0.95, 1.05), 3
) # To get a slightly varied value we multiply the result with a randomized factor
return self.battery_output
def calculateBatteryLevel(self, solar_panel_output, battery_output):
"""Calcualte the battery level by given output and input.
Parameters:
solar_panel_output (list)
battery_output (list)
Returns:
battery_level (list):
Each index in the list indicate Ah in battery in a module
"""
for module_number in range(0, len(self.battery_voltage)):
self.battery_level[module_number] = round(
self.battery_level[module_number]
+ (solar_panel_output[module_number] - battery_output[module_number])
/ self.total_samples_one_hour,
3,
) # Have to devide on total_samples_one_hour. So we get one Ah after the program have run as many times as total_samples_one_hour since the input and output is in Amps and not Ah.
if self.battery_level[module_number] < 0:
self.battery_level[module_number] = 0
elif self.battery_level[module_number] > self.battery_amphour[module_number]:
self.battery_level[module_number] = self.battery_amphour[module_number]
# Ensure that battery level doesnt go over or under max and min value.
return self.battery_level
def mqttSend(self, solar_panel_output, battery_output, battery_level):
MQTT_send = {
"CI01": round(solar_panel_output[0], 2),
"CO01": round(battery_output[0], 2),
"BL01": round(battery_level[0], 2),
"CI02": round(solar_panel_output[1], 2),
"CO02": round(battery_output[1], 2),
"BL02": round(battery_level[1], 2),
"CI03": round(solar_panel_output[2], 2),
"CO03": round(battery_output[2], 2),
"BL03": round(battery_level[2], 2),
}
return MQTT_send
def getBatteryLevelValues(self):
""" Main for BatteryLevel.
Returns:
MQTT_send (dict):
"""
imported_weather = self.importForcastTodayFromYrInJson(
"Norge/Trøndelag/Trondheim/Trondheim/"
)
weather_symbol = imported_weather["symbol"]["@name"]
# print(weather_symbol)
weather_number = self.symbolStringToInt(weather_symbol)
# print(weather_number)
solar_panel_output = self.solarPanelOutput(weather_number)
# print("Solar" + str(solar_panel_output))
battery_output = self.batteryOutput()
# print("Battery" + str(battery_output))
battery_level = self.calculateBatteryLevel(solar_panel_output, battery_output)
# print("LEvel" + str(battery_level))
return self.mqttSend(solar_panel_output, battery_output, battery_level)
```
#### File: wago-demo-plc-python/SimulationProgram/DbClient.py
```python
import mysql.connector
import logging
from datetime import datetime
class DbClient:
def __init__(self):
# Constants - Database setup
self.__dbName = "processvalues"
self.__flowValueTableName = "SignalAnalogHmiPv"
self.__flowValueTableFormat = "(id INT AUTO_INCREMENT PRIMARY KEY, _tagId VARCHAR(124), metric VARCHAR(3), timestamp DATETIME(6), Output_Pv FLOAT)"
self.__flowValueTableInsert = (
"INSERT INTO "
+ self.__flowValueTableName
+ " (_tagId, metric, timestamp, Output_Pv) VALUES (%s, %s, %s, %s)"
)
# Connection to database server
logging.info("Connecting to db host")
self.__db = mysql.connector.connect(host="db", user="root", passwd="<PASSWORD>",)
self.__cursor = self.__db.cursor()
logging.info("Creating tables if not exist")
self.__cursor.execute(
"SELECT * FROM information_schema.tables WHERE table_name='"
+ self.__flowValueTableName
+ "'"
)
tables = self.__cursor.fetchall()
if len(tables) == 0:
logging.info("Creating table " + self.__flowValueTableName)
self.__cursor.execute(
"CREATE TABLE "
+ self.__dbName
+ "."
+ self.__flowValueTableName
+ " "
+ self.__flowValueTableFormat
)
else:
logging.info("Table " + self.__flowValueTableName + " already exist")
# Connect to database
logging.info("Connecting to db " + self.__dbName)
self.__db = mysql.connector.connect(
host="db", user="root", passwd="<PASSWORD>", database=self.__dbName
)
self.__cursor = self.__db.cursor()
logging.info("DbClient ready")
def insertFlowValuesBatch8DifferentTags(self, tags, values, datetimestamp):
val = [
(tags[0], "na", datetimestamp, values[0]),
(tags[1], "na", datetimestamp, values[1]),
(tags[2], "na", datetimestamp, values[2]),
(tags[3], "na", datetimestamp, values[3]),
(tags[4], "na", datetimestamp, values[4]),
(tags[5], "na", datetimestamp, values[5]),
(tags[6], "na", datetimestamp, values[6]),
]
self.__cursor.executemany(self.__flowValueTableInsert, val)
self.__db.commit()
def deleteDataOlderThan(self, tableName, olderThan):
q = "DELETE FROM " + tableName + " WHERE timestamp < %s"
logging.info("DeleteDataOlderThan: " + q)
self.__cursor.execute(q, (olderThan,))
self.__db.commit()
return self.__cursor.rowcount
```
#### File: wago-demo-plc-python/SimulationProgram/yrForecastToHmi.py
```python
import paho.mqtt.client as mqtt
from threading import Thread
from yr.libyr import Yr
import json
import time
import datetime
class YrForecastToHmi:
"""
Using json file from yr.no to find forecast for 8 days.
https://github.com/wckd/python-yr
"""
def __init__(self):
self.periode_list = []
self.from_list = []
self.to_list = []
self.symbol_list = []
self.symbol_number_list = []
self.temp_list = []
self.rain_list = []
self.weekday_list = []
self.date_list = []
self.symbol_equal_sun = ["Clear sky", "Fair"] # 0
self.symbol_equal_partly_cloudy = ["Partly cloudy"] # 1
self.symbol_equal_cloudy = ["Fog", "Cloudy"] # 2
self.symbol_equal_rain = [
"Light rain",
"Light rain showers", # NEW
"Light sleet",
"Light sleet showers",
"Light rain showers and thunder",
"Light sleet showers and thunder",
"Light rain and thunder",
"Light sleet and thunder",
"Rain",
"Rain showers",
"Sleet",
"Sleet showers",
"Rain showers and thunder",
"Sleet showers and thunder",
"Rain and thunder",
"Sleet and thunder",
] # 3
self.symbol_equal_storm = [
"Heavy rain showers",
"Heavy rain",
"Heavy sleet",
"Heavy sleet showers",
"Heavy rain showers and thunder",
"Heavy sleet showers and thunder",
"Heavy rain and thunder",
"Heavy sleet and thunder",
] # 4
self.symbol_equal_snow = [
"Light snow",
"Snow",
"Heavy snow",
"Light snow showers",
"Snow showers",
"Heavy snow showers",
"Light snow showers and thunder",
"Snow showers and thunder",
"Heavy snow showers and thunder",
"Light snow and thunder",
"Snow and thunder",
"Heavy snow and thunder",
] # 5
self.weather_symbol_list = [
self.symbol_equal_sun, # 0
self.symbol_equal_partly_cloudy, # 1
self.symbol_equal_cloudy, # 2
self.symbol_equal_rain, # 3
self.symbol_equal_storm, # 4
self.symbol_equal_snow, # 5
]
self.flag = False
def importForcastLongFromYrInJson(self, place_string):
"""Import 8 days forcast from Yr.no in json file.
Parameters:
place_string (string):
The place to get the forecast from.
Format: "Land/Fylke/Kommune/Stedsnavn/"
Example: "Norge/Trøndelag/Trondheim/Trondheim/"
Returns:
genarator:forecast
"""
self.json_yr_weather = Yr(location_name=place_string)
forecast = self.json_yr_weather.forecast(as_json=True)
return forecast
def formatDataFromYrToJson(self, forecast_from_yr):
"""Find the usefull information in the json file and add the information to the right list.
Parameters:
forecast_from_yr (generator):
Use for loop to loop through the generator.
Returns:
formated_data_list (list):
List of lists with formated data
"""
for forecast in forecast_from_yr:
forecast = json.loads(forecast)
periode = int(forecast["@period"])
from_time = forecast["@from"]
to_time = forecast["@to"]
symbol = forecast["symbol"]["@name"]
temp = forecast["temperature"]["@value"]
rain = forecast["precipitation"]["@value"]
dateStr, weekday = self.jsonDateToWeekday(from_time)
symbol_number = self.symbolStringToInt(symbol)
if periode == 0 and self.flag == False: # Find the first new period to set as new day.
self.flag = True
if self.flag == True and periode == 2: # Using only period 2 for each day. 12:00-18:00.
self.from_list.append(from_time)
self.to_list.append(to_time)
self.symbol_list.append(symbol)
self.symbol_number_list.append(symbol_number)
self.temp_list.append(temp)
self.rain_list.append(rain)
self.weekday_list.append(weekday)
self.date_list.append(dateStr)
formated_data_list = [
self.from_list,
self.to_list,
self.symbol_list,
self.symbol_number_list,
self.temp_list,
self.rain_list,
self.weekday_list,
self.date_list,
]
return formated_data_list
def jsonDateToWeekday(self, datetimestamp):
"""Change a date to a weekday in number. Mon:0, Tue:1, Wed:2, Thu:3, Fri:4, Sat:5, Sun:6
Parameters:
datetimestamp (str):
Format_YYYY-MM-DDTHH:MM:SS
Returns:
weekday (int):
"""
dateStr = datetimestamp.split("T")[0] # Split date from "dateTime"
year, month, day = (int(x) for x in dateStr.split("-")) # Convert date to right format
weekday = datetime.date(year, month, day).weekday() # Find weekday
return dateStr, weekday
def symbolStringToInt(self, symbol_string):
"""Change a symbol to a symbol number. Number given from list in init.
Parameters:
symbol_string (str):
Returns:
symbol_number (int):
"""
symbol_number = None
for idx, weather_type in enumerate(
self.weather_symbol_list
): # Search list in weather_symbol_list to find weather type
if symbol_string in weather_type:
symbol_number = (
idx # If found. Symbol number is index of the list in weather_symbol_list
)
if symbol_number == None:
symbol_number = "Not found" # If not found. Symbol number is "Not found". TODO: Endre til 3 i tilfellet værtypen ikke finnes.
return symbol_number
def mqttSend(self, formated_data):
"""Find the usefull information from list to send with MQTT
Parameters:
formated_data (list):
Returns:
MQTT_send (dict):
"""
symbol_number_list = formated_data[3]
temp_list = formated_data[4]
rain_list = formated_data[5]
weekday_list = formated_data[6]
MQTT_send = {
"day1weekday": weekday_list[0],
"day1symbol": symbol_number_list[0],
"day1temp": temp_list[0],
"day1rain": rain_list[0],
"day2weekday": weekday_list[1],
"day2symbol": symbol_number_list[1],
"day2temp": temp_list[1],
"day2rain": rain_list[1],
"day3weekday": weekday_list[2],
"day3symbol": symbol_number_list[2],
"day3temp": temp_list[2],
"day3rain": rain_list[2],
}
print(MQTT_send)
return MQTT_send
def getForecast(self):
""" Main for forcast.
Returns:
MQTT_send (dict):
"""
imported_data = self.importForcastLongFromYrInJson("Norge/Trøndelag/Trondheim/Trondheim/")
formated_data = self.formatDataFromYrToJson(imported_data)
send_data = self.mqttSend(formated_data)
return send_data
```
#### File: wago-demo-plc-python/test/simulatedObjects_test.py
```python
import unittest
from ddt import ddt, data, unpack
from SimulationProgram.simulatedObjects import Water, RainForcast, WaterDistributionPipes
@ddt
class SimulatedObjectsTests(unittest.TestCase):
@data((5, 60, 1000.0 * 1000.0, 100.0, (100.0 / 3) * 2))
@unpack
def test_water_levels(
self, sampletime_s, oneDayIsSimulatedTo_s, area_m2, hightMax_m, expectedHight_m
):
w = Water(sampletime_s, oneDayIsSimulatedTo_s, area_m2, hightMax_m)
self.assertAlmostEqual(expectedHight_m, w.getWaterLevel_m(), places=3)
expectedHight_percent = (w.getWaterLevel_m() / hightMax_m) * 100.0
self.assertAlmostEqual(expectedHight_percent, w.getWaterLevel_percent(), places=3)
@data((0, 0.0), (25.0, 41666.6666), (50.0, 83333.3333), (100.0, 166666.6666))
@unpack
def test_water_emissionValve(self, opening, expectedFlow):
w = Water(5.0, 60.0, 1000.0 * 1000.0, 100.0)
flow = w.emissionValve_percent_ToFlow_m3_per_s(opening)
self.assertAlmostEqual(expectedFlow, flow, places=3)
``` |
{
"source": "joneygupta/Data-Structure-with-Python",
"score": 4
} |
#### File: Data-Structure-with-Python/Matrix/SpiralPrint.py
```python
def print_spiral(mat):
row, col = len(mat), len(mat[0])
k = 0
l = 0
while k < row and l < col:
for i in range(col):
print(mat[k][i], end=' ')
k += 1
for i in range(k, row):
print(mat[i][col-1], end=' ')
col -= 1
if k < row:
for i in reversed(range(l, col-1)):
print(mat[row-1][i], end=' ')
row -= 1
if l < col:
for i in reversed(range(k, row-1)):
print(mat[i][l], end=' ')
l += 1
a = [[1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18]
]
print_spiral(a)
``` |
{
"source": "JoneYu45/Global_investigation",
"score": 3
} |
#### File: Global_investigation/Function/local_functions_20200813.py
```python
import numpy as np
from sklearn.linear_model import ElasticNetCV
import pandas as pd
import time
from joblib import Parallel, delayed
import multiprocessing
#Define functions
def make_weights(E_dist, theta):
w = [np.exp(-1 * theta * E_dist[i] / np.mean(E_dist)) for i in range(E_dist.shape[0])]
return w
def weight_data(sub_block, w):
wp = np.empty(shape=sub_block.shape)
for i in range(sub_block.shape[0]):
for j in range(sub_block.shape[1]):
wp[i, j] = sub_block[i, j] * w[i]
return wp
def select_frequent_dominate_genera(input, dominate_threshold, zero_frequency_threshold, select):
# Process data
rawdata = pd.read_csv(input).iloc[:, 1:]
abundance = rawdata
# Calculate the relative abundance profile
read_num = np.sum(rawdata, axis=1)
for i in range(rawdata.shape[0]):
abundance.iloc[i, :] = rawdata.iloc[i, :] / read_num[i] * 100
# Process or not
if select == True:
# Select the most frequent and dominate genera
dominate = np.where(np.mean(abundance, axis=0) > dominate_threshold)
wanted_abundance = abundance.iloc[:, dominate[0]]
frequency = np.where(
(wanted_abundance == 0).astype(int).sum(axis=0) / abundance.shape[0] * 100 < zero_frequency_threshold)
wanted_abundance = wanted_abundance.iloc[:, frequency[0]]
else:
wanted_abundance = abundance
#Output selection
return wanted_abundance
def Elastic_net_fitting(block, target_otu, interest_otu, theta, train_len, cv, iteration, l_grid, output_dir):
##Select data and fitting
print('Start fitting.')
lib = range(block.shape[0])
coefs = np.empty(shape=(block.shape[0], block.shape[1] - 1))
fit_results = np.empty(shape=(block.shape[0], 13))
for ipred in lib:
print('\r', 'Complete percentage: %.2f%%' % (ipred / len(lib) * 100), end="", flush=True)
sub_block = np.delete(block, ipred, axis=0)
q = block[lib[ipred], :]
###Calculate weights
E_dist = np.sqrt(np.sum(np.array(sub_block[:, 1:] - q[:, 1:]) ** 2, axis=1))
w = make_weights(E_dist, theta)
###Weighted predictors and responses
X_wp = weight_data(sub_block[:, 1:], w)
Y_wp = np.ravel(weight_data(sub_block[:, 0], w))
X_target = block[ipred, 1:]
Y_target = block[ipred, 0]
##Split training and test data
pick_test = np.random.choice(range(X_wp.shape[0]), size=train_len, replace=False)
X_train = np.append(np.delete(X_wp, pick_test, axis=0), X_target, axis=0)
X_test = X_wp[pick_test, :]
Y_train = np.append(np.delete(Y_wp, pick_test, axis=0), Y_target)
Y_test = Y_wp[pick_test]
###Fit function
regr = ElasticNetCV(cv=cv, random_state=0, max_iter=iteration,
l1_ratio=[(i + 1) * l_grid for i in range(int(1 / l_grid))])
regr.fit(X_train, Y_train)
rmse = np.sqrt(np.mean((regr.predict(X_train) - Y_train) ** 2))
rmse_o = np.sqrt(np.mean((regr.predict(X_test) - Y_test) ** 2))
coefs[ipred, :] = regr.coef_
fit_results[ipred, :] = regr.intercept_, regr.alpha_, regr.l1_ratio_, rmse, np.std(Y_train), rmse_o, np.std(
Y_test), regr.score(X_test, Y_test), regr.score(X_train, Y_train), max(Y_train), min(Y_train), max(
Y_test), min(Y_test)
print('\r', 'Complete percentage: %.2f%%' % ((ipred + 1) / len(lib) * 100), end="", flush=True)
# Output results
coefs = pd.DataFrame(data=coefs)
coefs.to_csv('/'.join([output_dir,'coefs/%s_%s_%s_fit_results.csv' % (interest_otu, target_otu, theta)]))
fit_results = pd.DataFrame(
columns=['Intercept', 'Best alpha', 'Best l1_ratio', 'RMSE', 'Std', 'RMSE_o', 'Std_o', 'Test set score',
'Test set score_train', 'ymax_train', 'ymin_train', 'ymax_test', 'ymin_test'],
data=fit_results)
fit_results.to_csv('/'.join([output_dir,'fit_result/%s_%s_%s_fit_results.csv' % (interest_otu, target_otu, theta)]))
``` |
{
"source": "Joneyviana/todolist-django-angular",
"score": 3
} |
#### File: dashboard/tests/test_models.py
```python
from test_plus.test import TestCase
from .factories import *
from ..models import Task
from ..models import Plan
class TestTask(TestCase):
def setUp(self):
#self.plan = Plan.objects.create(name="my Plan")
#self.task = Task.objects.create(description="my Task",plan=self.plan)
self.task = TaskFactory.create()
def test__str__(self):
self.assertEqual(
self.task.description,
"my Task" # This is the default username for self.make_user()
)
def test_with_plan(self):
self.assertTrue(isinstance(self.task.plan,Plan))
```
#### File: dashboard/tests/test_views.py
```python
from django.test import RequestFactory
from test_plus.test import TestCase
from django.core.urlresolvers import reverse
from django.test import Client
from ..views import TemplatePlanView
from .factories import *
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from ..models import Plan, Task
from todolist.users.models import User
class TemplatePlanViewTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get(reverse('dashboard:home'))
def test_get_with_user(self):
self.user = self.make_user()
self.request.user = self.user
self.plan = Plan.objects.create(user=self.user, name="My Plan")
response = TemplatePlanView.as_view()(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], "tasks/plan_list.html")
def test_get_without_user(self):
with self.assertRaisesMessage(AttributeError, "'WSGIRequest' object has no attribute 'user'"):
response = TemplatePlanView.as_view()(self.request)
class CreatePlanTest(APITestCase):
def testCreatePlan(self):
self.user = User.objects.create(username="Jão")
self.user.save()
self.client = APIClient()
self.client.force_authenticate(user=self.user)
url = reverse('dashboard:api-list')
data = {"name":"nomeQualquer","tasks":[]}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Plan.objects.count(), 1)
self.assertEqual(Plan.objects.get().name, 'nomeQualquer')
class PlanTests(APITestCase):
def setUp(self):
self.user = User.objects.create(username="Jão")
self.user.save()
self.client = APIClient()
self.client.force_authenticate(user=self.user)
self.plan = Plan.objects.create(name="nomeQualquer",user=self.user)
self.plan.save()
def testRetrievePlan(self):
url = reverse('dashboard:api-list')
response = self.client.get(url,format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = '[{"id":%s,"user":%s,"name":"nomeQualquer","tasks":[]}]'%(self.plan.id,self.plan.user.id)
self.assertEqual(response.content.decode("utf-8"), content)
def testDeletePlan(self):
url = reverse('dashboard:api-delete',kwargs={'pk':self.plan.id})
self.client.delete(url, format='json')
self.assertEqual(Plan.objects.count(), 0)
def testPostTask(self):
url = reverse('dashboard:api-taskpost')
data = {"name":"nomeQualquer",'plan':self.plan.id}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Task.objects.count(), 1)
url = reverse('dashboard:api-taskpost')
data = {"name":"nomeQualquer",'plan':self.plan.id+1}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def testDeleteTask(self):
task = self.plan.tasks.create(description="Qualquer")
self.assertEqual(Task.objects.count(), 1)
url = reverse('dashboard:api-taskdelete',kwargs={'pk':task.pk})
response = self.client.delete(url,format='json')
self.assertEqual(Task.objects.count(), 0)
```
#### File: todolist/dashboard/views.py
```python
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from .models import Plan, Task
from .serializers import PlanListSerializer, PlanSerializer, TaskListSerializer,TaskSerializer
from rest_framework import mixins
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
def getUserPlans(request):
return request.user.plans.all()
class PlanListAPIView(generics.ListCreateAPIView):
serializer_class = PlanListSerializer
def get_queryset(self):
return getUserPlans(self.request)
def post(self, request, *args, **kwargs):
request.data["user"] = request.user.pk
return self.create(request, *args, **kwargs)
class PlanUpdateDeleteAPIView(generics.DestroyAPIView):
serializer_class = PlanListSerializer
def get_queryset(self):
return getUserPlans(self.request)
class TaskPostAPIView(mixins.CreateModelMixin,
generics.GenericAPIView):
serializer_class = TaskListSerializer
def post(self, request, *args, **kwargs):
plan_id = request.data['plan']
plan = Plan.objects.filter(pk=plan_id, user=request.user.pk)
if plan:
return self.create(request, *args, **kwargs)
return Response(status=status.HTTP_403_FORBIDDEN)
class TaskDeleteUpdateAPIView(generics.DestroyAPIView,
mixins.UpdateModelMixin):
serializer_class = TaskSerializer
def get_queryset(self):
plans = getUserPlans(self.request)
return Task.objects.filter(plan__in=plans)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class PlanRetrieveView(generics.RetrieveAPIView):
serializer_class = PlanSerializer
def get_object(self):
plan = get_object_or_404(Plan.objects.all(),name=self.kwargs["name"])
return plan
class TemplatePlanView(LoginRequiredMixin,TemplateView):
template_name = "dashboard/plan_list.html"
```
#### File: utils/templatetags/get_links_assets.py
```python
from django.conf import settings
from django.template import Library
import os
from django.utils.safestring import mark_safe
register = Library()
base_directory = settings.STATICFILES_DIRS[0]
@register.simple_tag
def css_href(directory=""):
links_css = ""
completed_directory = base_directory +"/css/" + directory
for elem in os.listdir(completed_directory):
links_css +="<link href={0} rel='stylesheet'>".format("/static/css/" + elem)
links_css += "\n"
return mark_safe(links_css)
@register.simple_tag
def js_src(directory=""):
links_js = ""
completed_directory = base_directory +"/js/" + directory
for elem in os.listdir(completed_directory):
links_js +="<script type='text/javascript' src='{0}'></script>".format("/static/js/" + elem)
links_js += "\n"
return mark_safe(links_js)
``` |
{
"source": "JonezySins/algorithms",
"score": 4
} |
#### File: unit/recursion/test_invert_binary_tree.py
```python
from algorithms.recursion.invert_binary_tree import main
def test_run_invert_tree():
tree = main()
assert tree.val == 1
assert tree.left.val == 3
assert tree.left.right.val == 6
assert tree.left.left.val == 7
assert tree.right.val == 2
assert tree.right.right.val == 4
assert tree.right.left.val == 5
```
#### File: unit/sequences/test_fibonnaci_sequence.py
```python
from unittest.mock import patch
from algorithms.sequences.fibonacci_sequence import generate_fibonacci_sequence
def test_fibonacci_sequence():
fibonacci_sequence = generate_fibonacci_sequence()
assert fibonacci_sequence == [
1,
1,
2,
3,
5,
8,
13,
21,
34,
55,
89,
144,
233,
377,
610,
987,
1597,
2584,
4181,
]
@patch('algorithms.sequences.fibonacci_sequence.ITERATIONS', 94)
@patch('sys.exit')
def test_fibonacci_sequence_iteration_too_large(mock_sys_exit):
generate_fibonacci_sequence()
mock_sys_exit.assert_called_once()
@patch('algorithms.sequences.fibonacci_sequence.ITERATIONS', -1)
@patch('sys.exit')
def test_fibonacci_sequence_iteration_less_than_1(mock_sys_exit):
generate_fibonacci_sequence()
mock_sys_exit.assert_called_once()
``` |
{
"source": "jonfairbanks/rtsp-nvr",
"score": 2
} |
#### File: jonfairbanks/rtsp-nvr/app.py
```python
import argparse
from flask import Response
from flask import Flask
from flask import render_template
from flask import jsonify
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from werkzeug.exceptions import HTTPException
from werkzeug.exceptions import default_exceptions
import settings
# initialize a flask object
app = Flask(__name__,
static_url_path='',
static_folder='client/static',
template_folder='client/templates')
@app.errorhandler(Exception)
def handle_error(e):
code = 500
if isinstance(e, HTTPException):
code = e.code
return jsonify(error=str(e)), code
for ex in default_exceptions:
app.register_error_handler(ex, handle_error)
app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.SQLALCHEMY_TRACK_MODIFICATIONS
app.config['BUNDLE_ERRORS'] = settings.BUNDLE_ERRORS
db = SQLAlchemy(app)
api = Api(app)
api.prefix = '/api'
from endpoints.cams.model import Cam
from endpoints.cams.resource import CamsResource
api.add_resource(CamsResource, '/cams', '/cams/<int:cam_id>')
from lib import capture
# Define Routes
@app.route("/")
def index():
cams = Cam.query.all()
return render_template("index.html", cams = cams)
@app.route("/admin")
def admin():
cams = Cam.query.all()
return render_template("admin.html", cams = cams)
@app.route('/video_feed/<int:id>/', methods=["GET"])
def video_feed(id):
return Response(capture.generateFrames(id), mimetype = "multipart/x-mixed-replace; boundary=frame")
# Main function
def webstreaming():
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--ip", type=str, required=True,
help="ip address of the device")
ap.add_argument("-o", "--port", type=int, required=True,
help="ephemeral port number of the server (1024 to 65535)")
ap.add_argument("-f", "--frame-count", type=int, default=32,
help="# of frames used to construct the background model")
args = vars(ap.parse_args())
# read cams from db
with app.app_context():
cams = Cam.query.all()
for cam in cams:
# start a thread that will perform motion detection for each cam
capture.startCaptureDevice(cam)
# start the flask app
app.run(host=args["ip"], port=args["port"], debug=True,
threaded=True, use_reloader=False)
```
#### File: endpoints/cams/model.py
```python
from app import db
class Cam(db.Model):
__tablename__ = 'cam'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
url = db.Column(db.String(200))
running = db.Column(db.Boolean, unique=False, default=True)
timestamp = db.Column(db.Boolean, unique=False, default=True)
def __repr__(self):
return 'Id: {}, name: {}, url: {}'.format(self.id, self.name, self.url)
```
#### File: rtsp-nvr/lib/capture.py
```python
import threading
import cv2
import imutils
import datetime
import time
import np
from lib.imageprocessors.motion_detection import SingleMotionDetector
from threading import Thread
from endpoints.cams.model import Cam
lock = threading.Lock()
Devices = {}
class CaptureDevice(Thread):
daemon = True
def __init__(self, cam):
super().__init__()
self.running = cam.running
self.name = cam.name
self.url = cam.url
self.outputFrame = None
self.connected = True
self.timestamp = cam.timestamp
self.fps = 1/60
def run(self):
vs = cv2.VideoCapture(self.url, cv2.CAP_FFMPEG)
time.sleep(1)
while self.running:
# loop over frames from the video stream
if self.connected: # read the frame from video stream
success, frame = vs.read()
#frame = imutils.resize(frame, width=1280)
# acquire the lock, set the output frame, and release the
if success:# lock
with lock:
# grab the current timestamp and draw it on the frame
if self.timestamp:
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime(
"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
self.outputFrame = frame.copy()
else:
vs.release()
self.connected = False
with lock:
red = (255, 0, 0)
frame = create_frame(1280,720,"device disconnected",rgb_color=red)
self.outputFrame = frame
else:
print('trying to connect to cam', self.name)
vs = cv2.VideoCapture(self.url, cv2.CAP_FFMPEG)
time.sleep(1)
self.connected = True
time.sleep(self.fps)
vs.release()
with lock:
blue = (0, 0, 255)
frame = create_frame(1280,720,"device stopped",rgb_color=blue)
self.outputFrame = frame
def stop(self):
self.running = False
def startCaptureDevice(cam):
# Create new capture device from cam info
capDevice = CaptureDevice(cam)
# call start method, to initialize thread/run function of the CaptureDevice class
capDevice.start()
# add Capture device by id to global dictionary of all active devices
Devices[cam.id] = capDevice
def setCaptureDevice(cam):
capDevice = Devices[cam.id]
capDevice.stop()
startCaptureDevice(cam)
def deleteCaptureDevice(cam):
capDevice = Devices[cam.id]
capDevice.stop()
Devices.pop(cam.id, None)
def generateFrames(id):
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if id in Devices:
if Devices[id].outputFrame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", Devices[id].outputFrame)
# ensure the frame was successfully encoded
if not flag:
continue
else:
black = (0, 0, 0)
frame = create_frame(1280,720,"no device", rgb_color=black)
(flag, encodedImage) = cv2.imencode(".jpg", frame)
# yield the output frame in the byte format
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
time.sleep(Devices[id].fps)
def create_frame(width, height, text, rgb_color=(50, 50, 50),):
# Create black blank image
frame = np.zeros((height, width, 3), np.uint8)
# Since OpenCV uses BGR, convert the color first
color = tuple(reversed(rgb_color))
# Fill image with color
frame[:] = color
# Add text to frame
cv2.putText(frame, text, (10, frame.shape[0] - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 255), 1)
return frame
```
#### File: backend/admin/form.py
```python
import sqlalchemy
from flask_admin.form import BaseForm
from flask_admin.form.fields import Select2Field
from flask_admin.model.form import converts
from flask_admin.contrib.sqla.form import AdminModelConverter
class ReorderableForm(BaseForm):
def __init__(self, formdata=None, obj=None, prefix=u'', **kwargs):
super().__init__(formdata=formdata, obj=obj, prefix=prefix, **kwargs)
if hasattr(self, 'field_order'):
for field_name in self.field_order:
self._fields.move_to_end(field_name)
class EnumField(Select2Field):
def __init__(self, column, **kwargs):
assert isinstance(column.type, sqlalchemy.sql.sqltypes.Enum)
def coercer(value):
# coerce incoming value to enum value
if isinstance(value, column.type.enum_class):
return value
elif isinstance(value, str):
return column.type.enum_class[value]
else:
raise ValueError('Invalid choice {enumclass} {value}'.format(
enumclass=column.type.enum_class,
value=value
))
super(EnumField, self).__init__(
choices=[(v, v) for v in column.type.enums],
coerce=coercer,
**kwargs
)
def pre_validate(self, form):
for v, _ in self.choices:
if self.data == self.coerce(v):
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class CustomAdminConverter(AdminModelConverter):
@converts('sqlalchemy.sql.sqltypes.Enum')
def convert_enum(self, field_args, **extra):
return EnumField(column=extra['column'], **field_args)
```
#### File: backend/admin/security.py
```python
from flask import abort, redirect, request, url_for
from http import HTTPStatus
from flask_login import current_user as user
class AdminSecurityMixin(object):
def is_accessible(self):
if user.is_active and user.is_authenticated and user.has_role('ROLE_ADMIN'):
return True
return False
def _handle_view(self, name, **kwargs):
if not self.is_accessible():
if not user.is_authenticated:
return redirect(url_for('security.login', next=request.url))
abort(HTTPStatus.FORBIDDEN)
```
#### File: backend/api/decorators.py
```python
import inspect
from enum import Enum
from functools import wraps
from http import HTTPStatus
from flask import abort, request
from flask_sqlalchemy.model import Model, camel_to_snake_case
from backend.utils import was_decorated_without_parenthesis
def param_converter(*decorator_args, **decorator_kwargs):
"""
Call with the url parameter names as keyword argument keys, their values
being the model to convert to.
Models will be looked up by the url param names. If a url param name
is prefixed with the snake-cased model name, the prefix will be stripped.
If a model isn't found, abort with a 404.
The action's argument names must match the snake-cased model names.
For example::
@bp.route('/users/<int:user_id>/posts/<int:id>')
@param_converter(user_id=User, id=Post)
def show_post(user, post):
# the param converter does the database lookups:
# user = User.query.filter_by(id=user_id).first()
# post = Post.query.filter_by(id=id).first()
# and calls the decorated action: show_post(user, post)
# or to customize the argument names passed to the action:
@bp.route('/users/<int:user_id>/posts/<int:post_id>')
@param_converter(user_id={'user_arg_name': User},
post_id={'post_arg_name': Post})
def show_post(user_arg_name, post_arg_name):
# do stuff ...
Also supports parsing arguments from the query string. For query string
keyword arguments, use a lookup (dict, Enum) or callable::
@bp.route('/users/<int:id>')
@param_converter(id=User, foo=str, optional=int)
def show_user(user, foo, optional=10):
# GET /users/1?foo=bar
# calls show_user(user=User.get(1), foo='bar')
"""
def wrapped(fn):
@wraps(fn)
def decorated(*view_args, **view_kwargs):
view_kwargs = _convert_models(view_kwargs, decorator_kwargs)
view_kwargs = _convert_query_params(view_kwargs, decorator_kwargs)
return fn(*view_args, **view_kwargs)
return decorated
if was_decorated_without_parenthesis(decorator_args):
return wrapped(decorator_args[0])
return wrapped
def _convert_models(view_kwargs: dict,
url_param_names_to_models: dict,
) -> dict:
for url_param_name, model_mapping in url_param_names_to_models.items():
arg_name = None
model = model_mapping
if isinstance(model_mapping, dict):
arg_name, model = list(model_mapping.items())[0]
if not (inspect.isclass(model) and issubclass(model, Model)):
continue
if not arg_name:
arg_name = camel_to_snake_case(model.__name__)
filter_by = url_param_name.replace(
camel_to_snake_case(model.__name__) + '_', '')
instance = model.query.filter_by(**{
filter_by: view_kwargs.pop(url_param_name),
}).first()
if not instance:
abort(HTTPStatus.NOT_FOUND)
view_kwargs[arg_name] = instance
return view_kwargs
def _convert_query_params(view_kwargs: dict,
param_name_to_converters: dict,
) -> dict:
for name, converter in param_name_to_converters.items():
if name not in request.args:
continue
value = request.args.getlist(name)
if len(value) == 1:
value = value[0]
if isinstance(converter, (dict, Enum)):
value = converter[value]
elif callable(converter):
value = converter(value)
view_kwargs[name] = value
return view_kwargs
def list_loader(*args, model):
"""
Decorator to automatically query the database for all records of a model.
:param model: The model class to query
"""
def wrapped(fn):
@wraps(fn)
def decorated(*args, **kwargs):
return fn(model.query.all())
return decorated
if was_decorated_without_parenthesis(args):
return wrapped(args[0])
return wrapped
def patch_loader(*args, serializer):
"""
Decorator to automatically load and (partially) update a model from json
request data
:param serializer: The ModelSerializer to use to load data from the request
"""
def wrapped(fn):
@wraps(fn)
def decorated(*args, **kwargs):
result = serializer.load(request.get_json(),
instance=kwargs.pop('instance'),
partial=True)
if not result.errors and not result.data.id:
abort(HTTPStatus.NOT_FOUND)
return fn(*result)
return decorated
if was_decorated_without_parenthesis(args):
return wrapped(args[0])
return wrapped
def put_loader(*args, serializer):
"""
Decorator to automatically load and update a model from json request data
:param serializer: The ModelSerializer to use to load data from the request
"""
def wrapped(fn):
@wraps(fn)
def decorated(*args, **kwargs):
result = serializer.load(request.get_json(),
instance=kwargs.pop('instance'))
if not result.errors and not result.data.id:
abort(HTTPStatus.NOT_FOUND)
return fn(*result)
return decorated
if was_decorated_without_parenthesis(args):
return wrapped(args[0])
return wrapped
def post_loader(*args, serializer):
"""
Decorator to automatically instantiate a model from json request data
:param serializer: The ModelSerializer to use to load data from the request
"""
def wrapped(fn):
@wraps(fn)
def decorated(*args, **kwargs):
return fn(*serializer.load(request.get_json()))
return decorated
if was_decorated_without_parenthesis(args):
return wrapped(args[0])
return wrapped
```
#### File: v2/backend/app.py
```python
import os
import sys
from flask import Flask as BaseFlask, session
from flask.helpers import get_debug_flag
from flask_wtf.csrf import generate_csrf
from .config import (
BaseConfig,
DevConfig,
ProdConfig,
PROJECT_ROOT,
TEMPLATE_FOLDER,
STATIC_FOLDER,
STATIC_URL_PATH,
EXTENSIONS,
DEFERRED_EXTENSIONS,
)
from .logger import logger
from .magic import (
get_bundles,
get_commands,
get_extensions,
)
class Flask(BaseFlask):
bundles = []
models = {}
serializers = {}
def create_app():
"""Creates a pre-configured Flask application.
Defaults to using :class:`backend.config.ProdConfig`, unless the
:envvar:`FLASK_DEBUG` environment variable is explicitly set to "true",
in which case it uses :class:`backend.config.DevConfig`. Also configures
paths for the templates folder and static files.
"""
return _create_app(
DevConfig if get_debug_flag() else ProdConfig,
template_folder=TEMPLATE_FOLDER,
static_folder=STATIC_FOLDER,
static_url_path=STATIC_URL_PATH
)
def _create_app(config_object: BaseConfig, **kwargs):
"""Creates a Flask application.
:param object config_object: The config class to use.
:param dict kwargs: Extra kwargs to pass to the Flask constructor.
"""
# WARNING: HERE BE DRAGONS!!!
# DO NOT FUCK WITH THE ORDER OF THESE CALLS or nightmares will ensue
app = Flask(__name__, **kwargs)
app.bundles = list(get_bundles())
configure_app(app, config_object)
extensions = dict(get_extensions(EXTENSIONS))
register_extensions(app, extensions)
register_blueprints(app)
register_models(app)
register_serializers(app)
register_admins(app)
deferred_extensions = dict(get_extensions(DEFERRED_EXTENSIONS))
extensions.update(deferred_extensions)
register_extensions(app, deferred_extensions)
register_cli_commands(app)
register_shell_context(app, extensions)
return app
def configure_app(app, config_object):
"""General application configuration:
- register the app's config
- register Jinja extensions
- register functions to run on before/after request
"""
# automatically configure a migrations folder for each bundle
config_object.ALEMBIC['version_locations'] = [
(bundle._name, os.path.join(PROJECT_ROOT,
bundle.module_name.replace('.', os.sep),
'migrations'))
for bundle in app.bundles if bundle.has_models
]
app.config.from_object(config_object)
app.jinja_env.add_extension('jinja2_time.TimeExtension')
@app.before_request
def enable_session_timeout():
session.permanent = True # set session to use PERMANENT_SESSION_LIFETIME
session.modified = True # reset the session timer on every request
@app.after_request
def set_csrf_cookie(response):
if response:
response.set_cookie('csrf_token', generate_csrf())
return response
def register_extensions(app, extensions):
"""Register and initialize extensions."""
for extension in extensions.values():
extension.init_app(app)
def register_blueprints(app):
"""Register bundle views."""
# disable strict_slashes on all routes by default
if not app.config.get('STRICT_SLASHES', False):
app.url_map.strict_slashes = False
# register blueprints
for bundle in app.bundles:
for blueprint in bundle.blueprints:
# rstrip '/' off url_prefix because views should be declaring their
# routes beginning with '/', and if url_prefix ends with '/', routes
# will end up looking like '/prefix//endpoint', which is no good
url_prefix = (blueprint.url_prefix or '').rstrip('/')
app.register_blueprint(blueprint, url_prefix=url_prefix)
def register_models(app):
"""Register bundle models."""
models = {}
for bundle in app.bundles:
for model_name, model_class in bundle.models:
models[model_name] = model_class
app.models = models
def register_admins(app):
"""Register bundle admins."""
from backend.extensions import db
from backend.extensions.admin import admin
for bundle in app.bundles:
if bundle.admin_icon_class:
admin.category_icon_classes[bundle.admin_category_name] = bundle.admin_icon_class
for ModelAdmin in bundle.model_admins:
model_admin = ModelAdmin(ModelAdmin.model,
db.session,
category=bundle.admin_category_name,
name=ModelAdmin.model.__plural_label__)
# workaround upstream bug where certain values set as
# class attributes get overridden by the constructor
model_admin.menu_icon_value = getattr(ModelAdmin, 'menu_icon_value', None)
if model_admin.menu_icon_value:
model_admin.menu_icon_type = getattr(ModelAdmin, 'menu_icon_type', None)
admin.add_view(model_admin)
def register_serializers(app):
"""Register bundle serializers."""
serializers = {}
for bundle in app.bundles:
for name, serializer_class in bundle.serializers:
serializers[name] = serializer_class
app.serializers = serializers
def register_cli_commands(app):
"""Register all the Click commands declared in :file:`backend/commands` and
each bundle's commands"""
commands = list(get_commands())
for bundle in app.bundles:
commands += list(bundle.command_groups)
for name, command in commands:
if name in app.cli.commands:
logger.error(f'Command name conflict: "{name}" is taken.')
sys.exit(1)
app.cli.add_command(command)
def register_shell_context(app, extensions):
"""Register variables to automatically import when running `python manage.py shell`."""
def shell_context():
ctx = {}
ctx.update(extensions)
ctx.update(app.models)
ctx.update(app.serializers)
return ctx
app.shell_context_processor(shell_context)
```
#### File: commands/import_articles/__init__.py
```python
import click
import json
import os
import sys
from datetime import datetime
from flask.cli import with_appcontext
from backend.config import (
APP_CACHE_FOLDER,
DEFAULT_ARTICLE_AUTHOR_EMAIL,
ARTICLES_FOLDER,
)
from backend.extensions import db
from backend.security.models import User
from ...models import SeriesArticle
from ..group import blog
from .article_data import ArticleData, load_article_datas
from .series_data import load_series_datas
ARTICLES_METADATA_PATH = os.path.join(APP_CACHE_FOLDER, '.articles-metadata.json')
@blog.command()
@click.option('--reset', is_flag=True, default=False, expose_value=True,
help='Ignore previously updated at timestamps.')
@with_appcontext
def import_articles(reset):
click.echo('Importing new/updated blog articles.')
if _import_articles(reset):
click.echo('Done.')
else:
click.echo('No new articles found. Exiting.')
def _import_articles(reset):
last_updated, default_author = load_metadata(reset)
new_articles = load_article_datas(ARTICLES_FOLDER,
default_author,
last_updated)
count = 0
count += process_article_datas(new_articles, None)
for series_data in load_series_datas(ARTICLES_FOLDER,
default_author,
last_updated):
series, is_create = series_data.create_or_update_series()
should_save = is_create or series_data.last_updated.timestamp() > last_updated
if should_save:
count += 1
series.save()
msg_prefix = ('' if not should_save
else ('Created' if is_create else 'Updated '))
click.echo(f'{msg_prefix}Series: {series.title}')
count += process_article_datas(series_data.articles, series)
if count:
db.session.commit()
save_metadata()
return count
def process_article_datas(article_datas, series):
count = -1
for count, article_data in enumerate(article_datas):
article, is_create = article_data.create_or_update_article()
article.save()
if series and not article.article_series:
if article_data.part:
series.series_articles.append(SeriesArticle(series=series,
article=article,
part=article_data.part))
else:
series.articles.append(article)
msg_prefix = ' - ' if series else ''
msg_prefix += 'Created' if is_create else 'Updated'
click.echo(f'{msg_prefix} Article: {article.title}')
return count + 1
def load_metadata(reset=False):
if not os.path.exists(ARTICLES_FOLDER):
click.secho('Could not find directory ARTICLES_FOLDER'
f'={ARTICLES_FOLDER}', fg='red')
sys.exit(1)
default_author = User.get_by(email=DEFAULT_ARTICLE_AUTHOR_EMAIL)
if not default_author:
click.secho('Could not find a User with DEFAULT_ARTICLE_AUTHOR_EMAIL'
f'={DEFAULT_ARTICLE_AUTHOR_EMAIL}', fg='red')
sys.exit(1)
if reset or not os.path.exists(ARTICLES_METADATA_PATH):
return 0, default_author
with open(ARTICLES_METADATA_PATH) as f:
metadata = json.load(f)
return metadata['last_updated'], default_author
def save_metadata():
os.makedirs(os.path.dirname(ARTICLES_METADATA_PATH), exist_ok=True)
data = json.dumps({'last_updated': datetime.now().timestamp()}, indent=4)
with open(ARTICLES_METADATA_PATH, 'w') as f:
f.write(data + '\n')
```
#### File: blog/models/series.py
```python
from backend.database import (
Column,
Model,
String,
Text,
association_proxy,
attach_events,
foreign_key,
on,
relationship,
slugify,
)
from .series_article import SeriesArticle
from .series_tag import SeriesTag
@slugify('title')
@attach_events
class Series(Model):
title = Column(String(100))
slug = Column(String(100))
file_path = Column(String(255), nullable=True)
header_image = Column(String(255), nullable=True)
summary = Column(Text)
series_articles = relationship('SeriesArticle', back_populates='series',
lazy='joined', innerjoin=True,
order_by='SeriesArticle.part',
cascade='all, delete-orphan')
articles = association_proxy('series_articles', 'article',
creator=lambda article: SeriesArticle(article=article))
category_id = foreign_key('Category', nullable=True)
category = relationship('Category', back_populates='series')
series_tags = relationship('SeriesTag', back_populates='series',
cascade='all, delete-orphan')
tags = association_proxy('series_tags', 'tag',
creator=lambda tag: SeriesTag(tag=tag))
__repr_props__ = ('id', 'title', 'articles')
@on('series_articles', 'append')
def on_append_series_article(self, series_article, *_):
# auto increment series article part number if necessary
if series_article.part is None:
series_article.part = len(self.series_articles) + 1
# set the article's category to be the same as the series' category
article = series_article.article
article.category = self.category
# set the article's tags to include the series' tags
for tag in self.tags:
if tag not in article.tags:
article.tags.append(tag)
```
#### File: backend/commands/db.py
```python
import click
from flask import current_app
from flask.cli import with_appcontext
from flask_alembic.cli.click import cli as db_cli
from backend.utils.date import parse_datetime
# FIXME: document json file format, relationships
@db_cli.command()
@click.option('--reset/--no-reset', expose_value=True,
prompt='Reset DB and run migrations before loading fixtures?')
@click.argument('file', type=click.File())
@with_appcontext
def fixtures(file, reset):
"""Load database fixtures from JSON."""
import json
from backend.extensions import db
if reset:
_reset_db()
# sqlalchemy and postgres sequences don't play so nice together when ids are
# explicitly set. so we need to modify the sequence start-point ourselves
is_postgres = current_app.config.get('SQLALCHEMY_DATABASE_URI', '').startswith('postgres')
sequences = []
if is_postgres:
sequences = [row[0] for row in db.session.execute("""
SELECT relname FROM pg_class WHERE relkind = 'S'
""")]
click.echo('Loading fixtures.')
for fixture in json.load(file):
model = current_app.models[fixture['model']]
for model_kwargs in fixture['items']:
d = {}
for k, v in model_kwargs.items():
# FIXME is this too heavy-handed of an approach? (will it ever
# create a date when it wasn't supposed to?) maybe better to
# somehow declare explicit date fields in the fixtures file
try:
d[k] = parse_datetime(v)
except:
d[k] = v
model.create(**d)
count = len(fixture['items'])
suffix = 's' if count > 1 else ''
click.echo(f"Adding {count} {fixture['model']} record{suffix}.")
if is_postgres:
seq_name = f'{model.__tablename__}_id_seq'
if seq_name in sequences:
db.session.execute(
f'ALTER SEQUENCE {seq_name} RESTART WITH :count',
{'count': count + 1}
)
db.session.commit()
click.echo('Done.')
@db_cli.command()
@click.option('--drop/--no-drop', expose_value=True,
prompt='Drop DB tables?')
@with_appcontext
def drop(drop):
"""Drop database tables."""
if not drop:
exit('Cancelled.')
_drop_db()
click.echo('Done.')
def _drop_db():
from backend.extensions import db
click.echo('Dropping DB tables.')
db.drop_all()
db.engine.execute('DROP TABLE IF EXISTS alembic_version;')
@db_cli.command()
@click.option('--reset', is_flag=True, expose_value=True,
prompt='Drop DB tables and run migrations?')
@with_appcontext
def reset(reset):
"""Drops database tables and runs migrations."""
if not reset:
exit('Cancelled.')
_reset_db()
click.echo('Done.')
def _reset_db():
from backend.extensions import alembic
_drop_db()
click.echo('Running DB migrations.')
alembic.upgrade()
```
#### File: backend/database/column.py
```python
from backend.extensions import db
class Column(db.Column):
"""
Overridden to make nullable False by default
"""
def __init__(self, *args, nullable=False, **kwargs):
super().__init__(*args, nullable=nullable, **kwargs)
```
#### File: backend/extensions/mail.py
```python
from flask_mail import Mail as BaseMail
class Mail(BaseMail):
def init_app(self, app):
self.state = super().init_app(app)
mail = Mail()
```
#### File: backend/security/forms.py
```python
from flask_login import current_user
from flask_security.forms import (
Form,
EqualTo,
Length,
PasswordField,
PasswordFormMixin,
password_required,
)
from flask_security.utils import get_message, verify_and_update_password
password_length = Length(min=8, max=128,
message='Password must be at least 8 characters long.')
class ChangePasswordFormMixin(object):
newPassword = PasswordField(
'New Password',
validators=[password_required, password_length]
)
confirmNewPassword = PasswordField(
'Confirm New Password',
validators=[password_required,
EqualTo('newPassword', message='RETYPE_PASSWORD_MISMATCH')]
)
class ChangePasswordForm(Form, PasswordFormMixin, ChangePasswordFormMixin):
def validate(self):
if not super().validate():
return False
if not verify_and_update_password(self.password.data, current_user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if self.password.data == self.newPassword.data:
self.newPassword.errors.append(get_message('PASSWORD_IS_THE_SAME')[0])
return False
return True
class ResetPasswordForm(Form, ChangePasswordFormMixin):
pass
```
#### File: security/views/confirm_email.py
```python
from flask import after_this_request, redirect
from flask_login import current_user
from flask_security.confirmable import (
confirm_email_token_status,
confirm_user,
send_confirmation_instructions,
)
from flask_security.utils import (
get_url,
login_user,
logout_user,
)
from flask_security.views import _security, _commit
from .blueprint import security
@security.route('/confirm/<token>', methods=['GET'])
def confirm_email(token):
"""View function which handles a email confirmation request."""
expired, invalid, user = confirm_email_token_status(token)
if not user or invalid:
invalid = True
already_confirmed = user is not None and user.confirmed_at is not None
expired_and_not_confirmed = expired and not already_confirmed
if expired_and_not_confirmed:
send_confirmation_instructions(user)
if invalid or expired_and_not_confirmed:
return redirect(get_url(_security.confirm_error_view))
if confirm_user(user):
after_this_request(_commit)
if user != current_user:
logout_user()
login_user(user)
return redirect(get_url(_security.post_confirm_view))
```
#### File: security/views/forgot_password.py
```python
from flask import jsonify, request
from flask_security.recoverable import send_reset_password_instructions
from flask_security.views import _security
from http import HTTPStatus
from werkzeug.datastructures import MultiDict
from .blueprint import frontend, security
from ..decorators import anonymous_user_required
@frontend.route('/login/forgot-password')
@security.route('/reset', methods=['POST'])
@anonymous_user_required
def forgot_password():
"""View function that handles a forgotten password request."""
form = _security.forgot_password_form(MultiDict(request.get_json()))
if form.validate_on_submit():
send_reset_password_instructions(form.user)
else:
return jsonify({'errors': form.errors}), HTTPStatus.BAD_REQUEST
return '', HTTPStatus.NO_CONTENT
```
#### File: security/views/logout.py
```python
from flask import redirect, request, url_for
from flask_login import current_user
from flask_security.utils import logout_user
from http import HTTPStatus
from backend.extensions.api import api
from .blueprint import security
@api.route(security, '/logout')
@security.route('/logout')
def logout():
if current_user.is_authenticated:
logout_user()
if not request.is_json:
return redirect(url_for('admin.index'))
return '', HTTPStatus.NO_CONTENT
```
#### File: security/views/user_resource.py
```python
from flask import after_this_request, current_app
from flask_security.confirmable import generate_confirmation_link
from flask_security.signals import user_registered
from flask_security.utils import config_value, login_user, send_mail
from flask_security.views import _commit, _security
from backend.api import ModelResource, CREATE, GET, PATCH
from backend.extensions.api import api
from .blueprint import security
from ..decorators import anonymous_user_required, auth_required_same_user
from ..models import User
@api.model_resource(security, User, '/users', '/users/<int:id>')
class UserResource(ModelResource):
include_methods = [CREATE, GET, PATCH]
method_decorators = {
CREATE: [anonymous_user_required],
GET: [auth_required_same_user],
PATCH: [auth_required_same_user],
}
def create(self, user, errors):
if errors:
return self.errors(errors)
# complete registration, save user to db, and maybe log them in
user_logged_in = register_user(user)
if user_logged_in:
return self.created({
'token': user.get_auth_token(),
'user': user,
}, save=False)
return self.created({'user': user}, save=False)
def register_user(user):
"""Performs the user registration process.
Returns True if the user has been logged in, false otherwise.
"""
if not _security.confirmable or _security.login_without_confirmation:
user.active = True
# confirmation token depends on having user.id set, which requires
# the user be committed to the database
user.save(commit=True)
confirmation_link, token = None, None
if _security.confirmable:
confirmation_link, token = generate_confirmation_link(user)
user_registered.send(current_app._get_current_object(),
user=user, confirm_token=token)
if config_value('SEND_REGISTER_EMAIL'):
send_mail(config_value('EMAIL_SUBJECT_REGISTER'), user.email,
'welcome', user=user, confirmation_link=confirmation_link)
if not _security.confirmable or _security.login_without_confirmation:
login_user(user)
# login_user will modify the user object if _security.trackable is set,
# but it will not request a session commit itself when it needs it :/
after_this_request(_commit)
return True
return False
```
#### File: site/views/contact_submission_resource.py
```python
from flask import current_app
from backend.api import ModelResource, CREATE
from backend.extensions.api import api
from backend.utils import send_mail
from .blueprint import site
from ..models import ContactSubmission
@api.model_resource(site, ContactSubmission, '/contact-submissions')
class ContactSubmissionResource(ModelResource):
include_methods = (CREATE,)
def create(self, contact_submission, errors):
if errors:
return self.errors(errors)
send_mail(subject='New Contact Submission',
recipients=list(current_app.config.get('MAIL_ADMINS')),
template='email/contact_submission.html',
contact_submission=contact_submission)
return self.created(contact_submission)
```
#### File: backend/utils/date.py
```python
import datetime
import pytz
# export a common alias for dateutil.parser.parse
from dateutil.parser import parse as parse_datetime
def timestamp_to_datetime(seconds, tz=None):
"""Returns a datetime.datetime of seconds in UTC
:param seconds: timestamp relative to the epoch
:param tz: timezone of the timestamp
"""
if tz is None:
tz = pytz.UTC
dt = datetime.datetime.fromtimestamp(seconds, tz)
return dt.astimezone(pytz.UTC)
def utcnow():
"""Returns a current timezone-aware datetime.datetime in UTC
"""
return datetime.datetime.now(datetime.timezone.utc)
```
#### File: backend/utils/decorators.py
```python
def was_decorated_without_parenthesis(args):
return args and callable(args[0])
```
#### File: security/views/test_resend_confirmation_email.py
```python
import pytest
from flask import url_for
@pytest.mark.usefixtures('user')
class TestResendConfirmation:
def test_email_required(self, api_client):
r = api_client.post(url_for('api.resend_confirmation_email'))
assert r.status_code == 400
assert 'email' in r.errors
def test_cannot_reconfirm(self, user, api_client):
r = api_client.post(url_for('api.resend_confirmation_email'),
data=dict(email=user.email))
assert r.status_code == 400
assert 'Your email has already been confirmed.' in r.errors['email']
@pytest.mark.options(SECURITY_CONFIRMABLE=True)
def test_instructions_resent(self, api_client, outbox, templates):
from backend.security.models import User
from backend.security.views.user_resource import register_user
user = User(username='test',
email='<EMAIL>',
password='password',
first_name='the',
last_name='user')
register_user(user)
r = api_client.post(url_for('api.resend_confirmation_email'),
data=dict(email=user.email))
assert r.status_code == 204
assert len(outbox) == len(templates) == 2
assert templates[0].template.name == 'security/email/welcome.html'
assert templates[1].template.name == 'security/email/confirmation_instructions.html'
assert templates[0].context.get('confirmation_link') != templates[1].context.get('confirmation_link')
``` |
{
"source": "jonfairman/initialize-all-the-things",
"score": 3
} |
#### File: archive/tmux/wireless.py
```python
import subprocess
def is_wifi_enabled(info):
if "AirPort" in info:
return(False)
else:
return(True)
# broken
def is_wifi_connected(info):
if "state" in info:
if info["state"] == "running":
return(True)
else:
return(False)
else:
return(False)
def signal_percent(rate, maxRate):
return((float( int(rate) ) / float( int(maxRate)) * 100.0))
def get_signal_percent(info):
lastRate = info["lastTxRate"]
maxRate = info["maxRate"]
return(signal_percent(lastRate,maxRate))
def tmux_output(info):
output = "#[fg=green][" + info["SSID"].strip() + ":" + str(int(get_signal_percent(info))) + "%:" + info["maxRate"] + "]#[default]"
print(output)
# Return a dict of the output from airport --getinfo
def get_info():
airport = "/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport"
output = subprocess.check_output([airport, "--getinfo"],universal_newlines=True)
lines = output.split("\n")
airinfo = {}
for l in lines:
fields = l.strip().split(':')
if fields[0] != '':
airinfo[fields[0]] = str(fields[-1]).strip()
return(airinfo)
def main():
info = get_info()
if is_wifi_enabled(info):
tmux_output(info)
main()
``` |
{
"source": "jonfang/CMPE295_DataAnalyzer",
"score": 2
} |
#### File: CMPE295_DataAnalyzer/analyzer/views.py
```python
from django.http import HttpResponse
from pyspark.sql import SparkSession
from django.shortcuts import render
from datetime import datetime
from core.chartfactory import createBarChart, createPieChart
from core.dataprocessor import DataProcessor
def sample(request):
"""
sample python report
"""
keys = ('Python', 'C++', 'Java', 'Perl', 'Scala', 'Lisp')
values = [10,8,6,4,2,1]
image_base64 = createBarChart(keys, values, 'Usage', 'Programming language usages')
return render(
request,
'analyzer/main.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
}
)
def home(request):
return render(
request,
'analyzer/home.html',
)
def submit(request):
data = {}
if request.method == 'POST':
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=7)
image_base64 = createBarChart(keys, values, 'Company', 'Average Empoyee Rating')
data = {
"title": request.POST.get("title", "defaultTitle"),
"description": request.POST.get("description", "defaultDescription"),
"news": request.POST.get("news", "defaultNews"),
"dataSet": request.POST.get("dataSet", "defaultDataset"),
"bar": request.POST.get("bar", "defaultBar"),
"pie": request.POST.get("pie", "defaultPie"),
"report1":image_base64
}
return render(
request,
'analyzer/new.html',
data
)
def case1(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=1)
image_base64 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category > 400 ')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=2)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category < 400', configs=config)
return render(
request,
'analyzer/case1.html',
{
'report1':image_base64,
'report2':image_base64_1
}
)
def case2(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=5)
image_base64 = createPieChart(keys, values, 'India trade import 2010-2018')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=5)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Total(millions $USD)', 'India trade import 2010-2018', configs=config)
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=6)
image_base64_2 = createPieChart(keys, values, 'India trade export 2010-2018')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=6)
config = {'rotation':90}
image_base64_3 = createBarChart(keys, values, 'Total(millions $USD)', 'India trade export 2010-2018', configs=config)
return render(
request,
'analyzer/case2.html',
{
'report5a':image_base64,
'report5b':image_base64_1,
'report6a':image_base64_2,
'report6b':image_base64_3,
}
)
def case3(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=4)
image_base64 = createPieChart(keys, values, 'Oakland Crime Rate 2011-2016')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=4)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Count', 'Oakland Crime Rate 2011-2016', configs=config)
return render(
request,
'analyzer/case3.html',
{
'report4a':image_base64,
'report4b':image_base64_1,
}
)
#google play app report 1
def report1(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=1)
image_base64 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category > 400')
return render(
request,
'analyzer/main.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
}
)
#google play app report 2
def report2(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=2)
config = {'rotation':90}
image_base64 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category < 400', configs=config)
return render(
request,
'analyzer/main.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
}
)
#google play app report 3
def report3(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=1)
image_base64 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category > 400 ')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=2)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category < 400', configs=config)
return render(
request,
'analyzer/main1.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
'image_base64_1':image_base64_1,
}
)
def report4(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=4)
image_base64 = createPieChart(keys, values, 'Oakland Crime Rate 2011-2016')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=4)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Count', 'Oakland Crime Rate 2011-2016', configs=config)
return render(
request,
'analyzer/main1.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
'image_base64_1':image_base64_1,
}
)
def report5(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=5)
image_base64 = createPieChart(keys, values, 'India trade import 2010-2018')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=5)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Total(millions $USD)', 'India trade import 2010-2018', configs=config)
return render(
request,
'analyzer/main1.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
'image_base64_1':image_base64_1,
}
)
def report6(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=6)
image_base64 = createPieChart(keys, values, 'India trade export 2010-2018')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=6)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Total(millions $USD)', 'India trade export 2010-2018', configs=config)
return render(
request,
'analyzer/main1.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
'image_base64_1':image_base64_1,
}
)
```
#### File: CMPE295_DataAnalyzer/core/dataprocessor.py
```python
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.functions import sum as _sum
from pyspark.sql.functions import avg
from pyspark.sql.types import DecimalType
from functools import reduce
import logging
class DataProcessor:
__instance = None
@staticmethod
def getInstance():
if(DataProcessor.__instance == None):
DataProcessor()
return DataProcessor.__instance
def __init__(self):
if(DataProcessor.__instance != None):
raise Exception("This class is a singleton!")
else:
self.spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
DataProcessor.__instance = self
logging.info("Spark session for Data Processor has started.")
def loadAndProcess(self, keys, values, report_type=1):
if(report_type==1):
df = self.spark.read.csv(path="DataSources/google-play-store-apps/googleplaystore.csv", header="true")
rows = df.groupBy("Category").count().collect()
for r in rows:
if(r[1]>400):
keys.append(r[0])
values.append(r[1])
elif(report_type==2):
df = self.spark.read.csv(path="DataSources/google-play-store-apps/googleplaystore.csv", header="true")
rows = df.groupBy("Category").count().collect()
for r in rows:
if(r[1]>100 and r[1]<300):
keys.append(r[0])
values.append(r[1])
elif(report_type==4):
df2011 = self.spark.read.csv(path="DataSources/oakland-crime-statistics-2011-to-2016/records-for-2011.csv", header="true").select('Incident Type Description')
df2012 = self.spark.read.csv(path="DataSources/oakland-crime-statistics-2011-to-2016/records-for-2012.csv", header="true").select('Incident Type Description')
df2013 = self.spark.read.csv(path="DataSources/oakland-crime-statistics-2011-to-2016/records-for-2013.csv", header="true").select('Incident Type Description')
df2014 = self.spark.read.csv(path="DataSources/oakland-crime-statistics-2011-to-2016/records-for-2014.csv", header="true").select('Incident Type Description')
df2015 = self.spark.read.csv(path="DataSources/oakland-crime-statistics-2011-to-2016/records-for-2015.csv", header="true").select('Incident Type Description')
df2016 = self.spark.read.csv(path="DataSources/oakland-crime-statistics-2011-to-2016/records-for-2016.csv", header="true").select('Incident Type Description')
df = reduce(DataFrame.unionAll, [df2011, df2012, df2013, df2014, df2015, df2016])
rows = df.groupBy("Incident Type Description").count().orderBy('count', ascending=False).limit(10).collect()
for r in rows:
keys.append(r[0])
values.append(r[1])
elif(report_type==5):
df = self.spark.read.csv(path="DataSources/india-trade-data/2018-2010_import.csv", header="true")
df = df.select(df.country, df.value.cast('float').alias('value')).where(df.value.isNotNull())
df = df.groupBy("country").agg(_sum("value").alias("sum_val"))
df = df.select(df.country, df.sum_val.cast('int').alias('total')).orderBy('total', ascending=False)
rows = df.limit(10).collect()
for r in rows:
keys.append(r[0])
values.append(r[1])
elif(report_type==6):
df = self.spark.read.csv(path="DataSources/india-trade-data/2018-2010_export.csv", header="true")
df = df.select(df.country, df.value.cast('float').alias('value')).where(df.value.isNotNull())
df = df.groupBy("country").agg(_sum("value").alias("sum_val"))
df = df.select(df.country, df.sum_val.cast('int').alias('total')).orderBy('total', ascending=False)
rows = df.limit(10).collect()
for r in rows:
keys.append(r[0])
values.append(r[1])
elif(report_type==7):
df = self.spark.read.csv(path="DataSources/employee_reviews/employee_reviews.csv", header="true")
df = df.groupBy("company").agg(avg('overall-ratings').alias("rating"))
df = df.select(df.company, df.rating.cast(DecimalType(precision=10, scale=3)).alias('average rating'))
rows = df.limit(10).collect()
for r in rows:
keys.append(r[0])
values.append(r[1])
df.show()
``` |
{
"source": "jonfanlab/GLOnet",
"score": 3
} |
#### File: jonfanlab/GLOnet/utils.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os
import json
import logging
import csv
import scipy.io as io
import torch
import numpy as np
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
self.update(json_path)
def save(self, json_path):
"""Saves parameters to json file"""
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
"""Loads parameters from json file"""
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']`"""
return self.__dict__
def set_logger(log_path):
"""Sets the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
with open(json_path, 'w') as f:
# We need to convert the values to float for json (it doesn't accept np.array, np.float, )
d = {k: float(v) for k, v in d.items()}
json.dump(d, f, indent=4)
def row_csv2dict(csv_file):
dict_club={}
with open(csv_file)as f:
reader=csv.reader(f,delimiter=',')
for row in reader:
dict_club[(row[0],row[1])]=row[2]
return dict_club
def save_checkpoint(state, checkpoint):
"""Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves
checkpoint + 'best.pth.tar'
Args:
state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict
is_best: (bool) True if it is the best model seen till now
checkpoint: (string) folder where parameters are to be saved
"""
filepath = os.path.join(checkpoint, 'model.pth.tar')
if not os.path.exists(checkpoint):
print("Checkpoint Directory does not exist! Making directory {}".format(checkpoint))
os.mkdir(checkpoint)
else:
print("Checkpoint Directory exists! ")
torch.save(state, filepath)
def load_checkpoint(checkpoint, model, optimizer=None, scheduler=None):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if not os.path.exists(checkpoint):
raise("File doesn't exist {}".format(checkpoint))
checkpoint = torch.load(checkpoint)
model.load_state_dict(checkpoint['gen_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optim_state_dict'])
if scheduler:
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
return checkpoint
def plot_loss_history(loss_history, params):
effs_mean_history, diversity_history, binarization_history = loss_history
iterations = [i*params.plot_iter for i in range(len(effs_mean_history))]
plt.figure()
plt.plot(iterations, effs_mean_history)
plt.plot(iterations, diversity_history)
plt.plot(iterations, binarization_history)
plt.xlabel('iteration')
plt.legend(('Average Efficiency', 'Pattern diversity', 'Binarizaion'))
plt.axis([0, len(effs_mean_history)*params.plot_iter, 0, 1.05])
plt.savefig(params.output_dir + '/figures/Train_history.png')
history_path = os.path.join(params.output_dir,'history.mat')
io.savemat(history_path, mdict={'effs_mean_history' :np.asarray(effs_mean_history),
'diversity_history' :np.asarray(diversity_history),
'binarization_history':np.asarray(binarization_history)})
def plot_histogram(Effs, Iter, fig_path):
ax = plt.figure()
bins = [i*5 for i in range(21)]
plt.hist(Effs*100, bins, facecolor='blue', alpha=0.5)
plt.xlim(0, 100)
plt.ylim(0, 50)
plt.yticks([])
plt.xticks(fontsize=12)
#plt.yticks(fontsize=20)
plt.xlabel('Deflection efficiency (%)', fontsize=12)
plt.title('Iteration {}'.format(Iter), fontsize=16)
plt.savefig(fig_path, dpi=300)
plt.close()
``` |
{
"source": "JonFarz/torqatadashboard",
"score": 2
} |
#### File: api/services/github.py
```python
from typing import List
from api.clients import github
from api.models.github import UserRepoSize
async def get_size_of_repos(user_name: str) -> List[UserRepoSize]:
data = await github.get_user_repos(user_name)
return list(map(lambda x: UserRepoSize(user_name=user_name, repo_name=x['name'], size=x['size']), data))
``` |
{
"source": "jonfen/flask-webpack-loader",
"score": 2
} |
#### File: flask-webpack-loader/flask_webpack_loader/loader.py
```python
import json
import time
from io import open
from .exceptions import (
WebpackError,
WebpackLoaderBadStatsError,
WebpackLoaderTimeoutError,
WebpackBundleLookupError
)
from .config import DEFAULT_CONFIG
class WebpackLoader(object):
def __init__(self, app=None):
self.app = app
self.config = DEFAULT_CONFIG
self.assets = {}
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
:param app: Flask application
:return: None
"""
self.config = app.config.get('WEBPACK_LOADER') or self.config
self.config['CACHE'] = not app.config.get('DEBUG', True)
app.add_template_global(self.render_bundle)
app.add_template_global(self.render_static)
@staticmethod
def _filter_by_extension(bundle, extension):
"""Return only files with the given extension"""
for chunk in bundle:
if chunk['name'].endswith('.{0}'.format(extension)):
yield chunk
def _get_bundle(self, bundle_name, extension):
bundle = self.get_bundle(bundle_name)
if extension:
bundle = self._filter_by_extension(bundle, extension)
return bundle
def get_files(self, bundle_name, extension=None):
"""Returns list of chunks from named bundle"""
return list(self._get_bundle(bundle_name, extension))
def render_bundle(self, bundle_name, extension=None, attrs=''):
"""
Get a list of formatted <script> & <link> tags for the assets in the
named bundle.
:param bundle_name: The name of the bundle
:param extension: (optional) filter by extension, eg. 'js' or 'css'
:param attrs: attrs
:return: a list of formatted tags as strings
"""
bundle = self._get_bundle(bundle_name, extension)
tags = []
for chunk in bundle:
if chunk['name'].endswith(('.js', '.js.gz')):
tags.append(
(
'<script type="text/javascript" src="{0}" {1}></script>'
).format(chunk['url'], attrs)
)
elif chunk['name'].endswith(('.css', '.css.gz')):
tags.append(
(
'<link type="text/css" href="{0}" rel="stylesheet" {1}/>'
).format(chunk['url'], attrs)
)
return '\n'.join(tags)
def render_static(self, asset_name):
"""
:param asset_name: the name of the asset
:return: path to webpack asset as a string
"""
return "{0}{1}".format(
self.get_assets().get('publicPath', self.config['STATIC_URL']),
asset_name
)
def _load_assets(self):
try:
with open(self.config['STATS_FILE'], encoding="utf-8") as f:
return json.load(f)
except IOError:
raise IOError(
'Error reading {0}. Are you sure webpack has generated '
'the file and the path is correct?'.format(
self.config['STATS_FILE']))
def get_assets(self):
if self.config['CACHE']:
if not self.assets:
self.assets = self._load_assets()
return self.assets
return self._load_assets()
def filter_chunks(self, chunks):
for chunk in chunks:
ignore = any(regex.match(chunk['name']) for regex in self.config.get('IGNORES', []))
if not ignore:
chunk['url'] = self.get_chunk_url(chunk)
yield chunk
def get_chunk_url(self, chunk):
public_path = chunk.get('publicPath')
if public_path:
return public_path
return '{0}{1}'.format(self.config['BUNDLE_DIR_NAME'], chunk['name'])
def get_bundle(self, bundle_name):
assets = self.get_assets()
# poll when debugging and block request until bundle is compiled
# or the build times out
if not self.config['CACHE']:
timeout = self.config['TIMEOUT'] or 0
timed_out = False
start = time.time()
while assets['status'] == 'compiling' and not timed_out:
time.sleep(self.config['POLL_INTERVAL'])
if timeout and (time.time() - timeout > start):
timed_out = True
assets = self.get_assets()
if timed_out:
raise WebpackLoaderTimeoutError(
"Timed Out. Bundle `{0}` took more than {1} seconds "
"to compile.".format(bundle_name, timeout)
)
if assets.get('status') == 'done':
chunks = assets['chunks'].get(bundle_name, None)
if chunks is None:
raise WebpackBundleLookupError('Cannot resolve bundle {0}.'.format(bundle_name))
return self.filter_chunks(chunks)
elif assets.get('status') == 'error':
if 'file' not in assets:
assets['file'] = ''
if 'error' not in assets:
assets['error'] = 'Unknown Error'
if 'message' not in assets:
assets['message'] = ''
error = u"""{error} in {file} {message}""".format(**assets)
raise WebpackError(error)
raise WebpackLoaderBadStatsError(
"The stats file does not contain valid data. Make sure "
"webpack-bundle-tracker plugin is enabled and try to run "
"webpack again."
)
``` |
{
"source": "jonfernandes/DXC-Industrialized-AI-Starter",
"score": 3
} |
#### File: ai/clean_data/clean_data.py
```python
import pandas as pd
import janitor #data cleaning
from ftfy import fix_text #data cleaning
import nltk #data cleaning
nltk.download('punkt') #data cleaning
import scrubadub #data cleaning
import arrow #normalizing dates
import numpy as np
from sklearn.base import TransformerMixin
from dxc.ai.global_variables import globals_file
class DataFrameImputer(TransformerMixin):
def __init__(self):
"""Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with mean of column.
"""
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].mean() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
#CLEANING FILE
def clean_dataframe(df, impute = False, text_fields = [], date_fields = [], numeric_fields = [], categorical_fields = []):
clean_df = (
df
#make the column names lower case and remove spaces
.clean_names()
#remove empty columns
.remove_empty()
#remove empty rows and columns
.dropna(how='all')
)
#remove harmful characters. remove personal identifiers. make lowercase
for field in text_fields:
field = '_'.join(field.split()).lower()
clean_df[field] = clean_df[field].apply(fix_text)
clean_df[field] = clean_df[field].apply(scrubadub.clean, replace_with='identifier')
clean_df[field] = clean_df[field].str.lower()
#impute missing values
if impute:
clean_df = DataFrameImputer().fit_transform(clean_df)
#standardize the format of all date fields
for field in date_fields:
field = '_'.join(field.split()).lower()
clean_df[field] = clean_df[field].apply(arrow.get)
#make sure all numeric fields have the proper data type
for field in numeric_fields:
field = '_'.join(field.split()).lower()
clean_df[field] = pd.to_numeric(clean_df[field])
#make sure all categorical variables have the proper data type
for field in categorical_fields:
field = '_'.join(field.split()).lower()
clean_df[field] = clean_df[field].astype('category')
clean_df=clean_df.clean_names()
globals_file.clean_data_used = True
return(clean_df)
```
#### File: ai/read_data/read_csv.py
```python
import json
import pandas as pd
import urllib.parse #input data
from tkinter import Tk
from tkinter import filedialog
from enum import Enum
import io
def get_file_path_csv():
root = Tk()
root.update()
def open_file():
file = filedialog.askopenfilename(filetypes=[("csv files", "*.csv")])
return file
file_path = open_file()
root.destroy()
return file_path
def read_data_frame_from_local_csv(col_names = [], delim_whitespace=False, header = 'infer'):
try:
from google.colab import files
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
uploaded = files.upload()
csv_file_name = list(uploaded.keys())[0]
df = pd.read_csv(io.BytesIO(uploaded[csv_file_name]), delim_whitespace=delim_whitespace, header = header)
if col_names != []:
df.columns = col_names
return(df)
else:
csv_path = get_file_path_csv()
df = pd.read_csv(csv_path, delim_whitespace=delim_whitespace, header = header)
if col_names != []:
df.columns = col_names
return(df)
def read_data_frame_from_remote_csv(csv_url, col_names = [], delim_whitespace=False, header = 'infer'):
df = pd.read_csv(csv_url, delim_whitespace=delim_whitespace, header = header)
if col_names != []:
df.columns = col_names
return(df)
``` |
{
"source": "jonfernq/sanskritselfstudy",
"score": 3
} |
#### File: sanskritselfstudy/makedatabase/pickle_dictionary.py
```python
import re, random, pickle
def random_from_dict(d, n):
random_dict = {}
for i in range(n):
k, v = random.choice(list(d.items()))
random_dict[k] = v
return random_dict
def sktdict_to_dict(in_lines):
logfile = open('logfile2.txt','w', encoding='utf8')
words = {}
for line1 in in_lines:
# cleaning steps
line2 = re.sub(r"\t"," ", line1)
line3 = line2.rstrip()
line4 = re.sub(r",+$","", line3)
# print('line4')
# print(line4)
line = re.sub(r",,",",", line4)
# split and parse info steps
tmp = line.split(',')
if len(tmp) < 4:
# if not tmp[0] or not tmp[1] or not tmp[2] or not tmp[3]:
logfile.write('ERROR: INDEX OUT OF RANGE')
logfile.write(', '.join(tmp))
logfile.write('\n')
# print('ERROR: INDEX OUT OF RANGE', file=open('logfile.txt', 'a', encoding='utf8'))
#print(tmp, file=open('logfile.txt', 'a', encoding='utf8'))
continue
#print(tmp, file=open('logfile.txt', 'a', encoding='utf8'))
word_no = tmp[0]
word = tmp[1] + ' ' + tmp[2] + '.'
definitions = '; '.join(tmp[3:len(tmp)])
flashcard = word + ', ' + definitions
words[word_no] = flashcard
logfile.close()
return words
# some additional functions to create flashcards from random selections from the dictionary
# randomly select n words from dictionary with between l and k definitions
def definition_count(v):
count = len(v.split(';'))
print('count:' + v + '*' + str(count))
return count
def random_from_dict_limited(d, n, l, m):
random_dict = {}
for i in range(n):
k, v = random.choice(list(d.items()))
count = definition_count(v)
# print(l, count, m)
if l <= count and count <= m:
random_dict[k] = v
return random_dict
# MAIN
# read into a list all the entries from a small extract of the DCS Sanskrit dictionary
infile = open ("C:\\PYTHON\\FLASHCARDS_FROM_DICTIONARY_2\\dictionary.csv", "r", encoding='utf8')
in_lines = infile.readlines()
infile.close()
in_lines.pop(0)
# create a python dict from a list
words = sktdict_to_dict(in_lines)
# PICKLED DICTIONARY:
# what is inside:
# flashcard = word + ', ' + definitions
# words[word_no] = flashcard
print('PICKLING DICTIONARY')
filename = 'sktdictionary.bin'
outfile = open(filename,'wb')
pickle.dump(words,outfile)
outfile.close()
quit()
# unpickling dictionary
binary_file = open('sktdictionary.bin', mode='rb')
pickled_dictionary = pickle.load(binary_file)
binary_file.close()
print(pickled_dictionary)
``` |
{
"source": "jonfisik/JupNotebook",
"score": 3
} |
#### File: JupNotebook/TreinoBasico2/16classepath.py
```python
from os import path
import time
def DadosArquivos():
ArquivoExiste = path.exists("NovoArquivo.txt")
ehDiretorio = path.isdir("NovoArquivo.txt")
pathArquivo = path.realpath("NovoArquivo.txt")
pathRelativo = path.relpath("NovoArquivo.txt")
dataCriacao = time.ctime(path.getctime("NovoArquivo.txt"))
dataModificacao = time.ctime(path.getmtime("NovoArquivo.txt"))
print(ArquivoExiste)
print(ehDiretorio)
print(pathArquivo)
print(pathRelativo)
print(dataCriacao)
print(dataModificacao)
DadosArquivos()
```
#### File: JupNotebook/TreinoBasico2/19manipulaJASON.py
```python
import urllib.request
import json
def ManipulaJSON():
endereco = "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson"
webURL = urllib.request.urlopen(endereco)
if(webURL.getcode() == 200):
dados = webURL.read()
oJSON = json.loads(dados)
contagem = oJSON["metadata"]["count"]
print("Contage: " + str(contagem))
for local in oJSON["features"]:
if local["properties"]["place"] == "268 km NE of Saipan, Northern Mariana Islands":
print('***Encontrado registro especial***')
else:
print(local["properties"]["place"])
ManipulaJSON()
```
#### File: JupNotebook/TreinoBasico2/1variaveis.py
```python
print('-'*5)
f = 0
print(f'{f:^5}')
# declarando a mesma vaariável mais de uma vez;
print('-'*5)
f = 'abc'
print(f'{f:^5}')
print('-'*33)
# fazendo concatenação
print('String ligada a string ' + str('0123456789'))
print('-'*33)
# variável global x variável local
def NomeFuncao(): #definição de função
f = 'ABC' # 'f' está declarada como variável local
print(f'{f:^5}')
NomeFuncao()
print(f'{f:^5}')
print('-'*5)
'''def NomeFuncao():
global f
f = 'ABCD'
print(f'{f:^5}')
NomeFuncao()'''
print('-'*5)
```
#### File: JupNotebook/TreinoBasico2/8databib.py
```python
from datetime import date
from datetime import time
from datetime import datetime
def ManipulaDataHora():
hoje = date.today()
print(f'Hoje é {hoje}.')
print(f'Partes da data -> dia {hoje.day} - mês {hoje.month} - ano {hoje.year}.')
print(f'Número do dia da semana: {hoje.weekday()+2}')
dias = ['dom','seg','ter','qua','qui','sex','sab']
print('Nome abreviado do dia da semana: ', dias[hoje.weekday()+1])
data = datetime.now()
print('Data e hora: ', data)
tempo = datetime.time(data)
print('Hora atual: ', tempo)
ManipulaDataHora()
``` |
{
"source": "jonfisik/Projects",
"score": 4
} |
#### File: python/AlgoritmosPython/comprimentoSegmentoIguais.py
```python
__author__ = 'JPaschoal'
__version__ = '1.0.1'
__email__ = '<EMAIL>'
__date__ = '08/05/2021'
'''
Dados n e uma sequência de números inteiros, determinar quantos segmentos de
números iguais consecutivos compõem essa sequência.
Exemplos:
Sequência: 5, 2, 2, 2, 4, 4, 4, 4, 1, 1
Comprimento do segmento igual >>> 5
'''
# 5, 2, 2, 2, 4, 4, 4, 4, 1, 1
# ant = 5
# seg = 1
# n = 10
# cont = 0
#-------------------------------------------
# prox = 2
# ant == prox -->
# ant != prox --> seg += 1
# ant = prox = 2
#-------------------------------------------
# ant = 2
# prox = 2
# ant == prox
# ant = prox
#--------------------------------------------
def traco():
return print('-----'*10)
print('')
print("SEQUÊNCIA IGUAL")
traco()
# input
n = int(input('Digite o tamanho da sequência: '))
ant =int(input('Digite o 1º número da sequência: '))
# váriaveis
cont = seg = 1
while cont < n:
prox = int(input(f'Digite o {cont+1}º número da sequência: '))
if prox != ant:
seg += 1
ant = prox
cont += 1
print(f'A sequência tem {seg} segmento(s) iguais.')
traco()
print('')
# END
```
#### File: python/AlgoritmosPython/cosX.py
```python
__author__ = '<NAME>'
__version__ = '1.0.0'
__email__ = '<EMAIL>'
__status__ = 'Professor - Física & Matemática'
__date__ = '09/01/2022'
'''
"""
Dados x real e n natural, calcular uma aproximação para cos x através dos
n primeiros termos da seguinte série:
cos x = 1/1 - (x**2)/2! + (x**4)/4! - (x**6)/6! + ... + ((-1)**k)*(x**2k)/((2k)!)
#2 termo = 1 termo * -x**2/2*1 --> i = 1 / 2*i = 2 / 2*i - 1 = 1
#3 termo = 2 termo * -x**2/4*3 --> i = 2 / 2*i = 4 / 2*1 - 1 = 3
#4 termo = 3 termo * -x**2/6*5
Compare com os resultados de sua calculadora!
"""
'''
#
## Funções
#
def cosX():
n = int(input('Digite um valor n para nº de termos: '))
alfa = float(input('Digite o valor do ângulo alfa: '))
cos = termo = 1
for i in range(1, n+1):
termo *= (-(alfa**2)/(2*i*(2*i-1)))
cos += termo
return print(f'cos({alfa}) = {cos}')
def traco():
return print('----'*20)
#
## Título
#
traco()
print('Coseno de X - ')
traco()
#
# Rotina principal
#
resp = 'S'
while resp in 'Ss':
cosX()
traco()
resp = str(input('Quer continuar? [S/N]: ')).upper().strip()[0]
traco()
print('FIM')
traco()
#END
```
#### File: python/AlgoritmosPython/divisoresNM.py
```python
__author__ = 'JPaschoal'
__version__ = '1.0.0'
__email__ = '<EMAIL>'
__date__ = '04/01/2022'
"""
#ignorância zero
Dados dois números inteiros positivos i e j diferentes de 0,
imprimir todos os divisores comuns de i e j.
Exemplo: i = 2 e j = 3 a saída deverá ser : 1
i = 9, j = 21 a saída deverá ser: 1, 3
"""
def traco():
return print('-----'*7)
print('')
print("Divisores de i e j - ")
traco()
#
## input e variáveis
#
i = int(input('Digite o número i: '))
j = int(input('Digite o número j: '))
#
## teste lógico
#
print(f'Os divisores comuns de {i} e {j} são: ')
print(1)
divisor = 2
while divisor <= i and divisor <= j:
if i % divisor == 0 and j % divisor == 0:
print(f'{divisor}')
divisor = divisor + 1
traco()
```
#### File: python/AlgoritmosPython/idade.py
```python
__author__ = 'JPaschoal'
__version__ = '1.0.1'
__email__ = '<EMAIL>'
__date__ = '04/05/2021'
def traco():
return print('-----'*10)
print('')
print("IDADE, QUANDO?")
traco()
numero = int(input('Digite o número de pessoas: '))
cont = 0
print('')
print('Informe')
while cont < numero:
nome = str(input('Nome: '))
dia = int(input('Dia de nascimento: '))
mes = int(input('Mês de nascimento: '))
ano = int(input('Ano de nascimento: '))
idade = int(input('Idade a ser completada: '))
print('')
print(f'{nome}, você fará {idade} anos nos dia {dia}/{mes}/{ano + idade}.')
cont += 1
print('')
traco()
print('')
# END
```
#### File: python/AlgoritmosPython/numTriangular.py
```python
__author__ = 'JPaschoal'
__version__ = '1.0.2'
__email__ = '<EMAIL>'
__date__ = '12/05/2021'
'''
Um número triangular é calculado pela fórmula triangular = n*(n+1)//2.
Sendo n o índice desse número triangular
Escreva um programa que imprima os números tringulares com índices múltiplos de 5 entre 5 e 50.
'''
#-----------------------------------------------------------
def traco():
return print('-----'*10)
print('')
print("TRIANGULAR - FOR")
traco()
#input
m = int(input('Digite o valor de inicial: '))
n = int(input('Digite o valor de para calcular: '))
mult = int(input('Múltiplo de quanto? '))
#
#Calcula número triangular
#Váriaveis
#
for n in range(m, n+1, mult):
triangular = n*(n+1)//2 # divisão inteira
print(f'Para n = {n} número triangular = {triangular}.')
traco()
print('')
# END
```
#### File: python/AlgoritmosPython/raizAlgarismos.py
```python
__author__ = 'JPaschoal'
__version__ = '1.0.1'
__email__ = '<EMAIL>'
__date__ = '08/05/2021'
'''
Qualquer número natural de quatro algarismos pode ser dividido
em duas dezenas formadas pelos seus dois primeiros e dois
últimos dígitos.
Exemplos:
1297: 12 e 97
5314: 53 e 14
Escreva um programa que imprime todos os milhares (4 algarismos), 1000 <= n < 10000, cuja raiz quadrada seja a soma das dezenas formadas pela divisão acima.
Exemplo: raiz de 9801 = 99 = 98 + 01
Portanto 9801 é um dos números a ser impresso.
'''
# 1000 --> 10 00 --> 10 + 00 = 10 --> 10**2 = 100 == 1000 [V/F] imprimir 1000
# 1001 --> 10 01 --> 10 + 01 = 11 --> 11**2 = 121 == 1001 [V/F] imprimir 1001
# .
# .
# .
# 9801 --> 98 01 --> 98 + 01 = 99 --> 99**2 = 9801 == 9801 [V/F] imprimir 9801
#--------------------------------------------
def traco():
return print('-----'*10)
print('')
print("TESTE MILHARES 1000 - 10 000")
traco()
# váriaveis
num = 1000
while num < 10000:
aux = num
dois_ultm = aux%100
aux //= 100
dois_prim = aux%100
if (dois_ultm + dois_prim)**2 == num:
print(f'>>> {num}')
num += 1
traco()
print('')
# END
```
#### File: python/AlgoritmosPython/seqForWhile.py
```python
__author__ = 'JPaschoal'
__version__ = '1.0.1'
__email__ = '<EMAIL>'
__date__ = '17/05/2021'
'''
Dados n e n sequências de números inteiros não nulos, cada qual seguida por um 0, calcular a soma dos números
pares de cada sequência.
'''
#-----------------------------------------------------------
import time
def traco():
return print('-----'*5)
print('')
print("SOMA DOS PARES v1.0.1 - ")
traco()
print("Iniciando ...")
print('')
time.sleep(1.0)
#input
seq = int(input('Digite o número de sequência: '))
print('')
#-------------------------------------------------------------
for i in range(seq):
print(f'Sequência {i+1}.')
traco()
num = int(input("Digite o 1º número da sequência: "))
soma = cont = 0
while num != 0:
if num % 2 == 0:
soma += num
cont += 1
num = int(input(f'Digite o {cont+1}º número da sequência: '))
print(f'Soma dos pares da sequência {i+1} é {soma}.')
print('')
traco()
print('')
# END
```
#### File: python/AlgoritmosPython/tabuadaBloco.py
```python
__author__ = 'JPaschoal'
__version__ = '1.0.1'
__email__ = '<EMAIL>'
__date__ = '17/05/2021'
'''
Peça para o usuário entrar com início e o fim da tabuada
e imprima a tabuada correspondente dentro dos intervalos
considerados.
Começo = 1
Fim = 3
Tabuada do 1
1x1 = 1
1x2 = 2
1x3 = 3
Tabuada do 1
2x1 = 2
2x2 = 4
2x3 = 6
Tabuada do 1
3x1 = 3
3x2 = 6
3x3 = 9
'''
#-----------------------------------------------------------
import time
def traco():
return print('-----'*5)
print('')
print("TABUADA BLOCO v1.0.1 - ")
traco()
print("Iniciando tabuada...")
print('')
time.sleep(1.0)
#input
comeco = int(input('Digite o começo da tabuada: '))
fim = int(input('Digite o fim da tabuada: '))
#-------------------------------------------------------------
for i in range(comeco, fim+1):
traco()
print(f'Tabuada do {i}.')
for j in range(comeco, fim+1):
print(f'{i} X {j} = {i*j}')
traco()
print('')
# END
```
#### File: python/AlgoritmosPython/votacao.py
```python
__author__ = 'JPaschoal'
__version__ = '1.0.0'
__email__ = '<EMAIL>'
__date__ = '12/05/2021'
'''
Numa eleição existem três candidatos.
Faça um programa que peça o número total de eleitores.
Peça para cada eleitor votar e ao final mostrar o número de votos de cada candidato.
'''
#-----------------------------------------------------------
def traco():
return print('-----'*10)
print('')
print("VOTAÇÃO - ")
traco()
#input
print("Iniciando votação...")
print('')
m = int(input('Digite o total de eleitores: '))
#-------------------------------------------------------------
voto = 0
nulo = 0
candA = candB = candC = 0
for i in range(1, m+1):
print('''
[1] Votar
[2] Nulo
''')
voto = int(input(f'Eleitor {i}. Escolha a opção >>> '))
if voto == 1:
print('''
[A] Candidato A
[B] Candidato B
[C] Candidato C
''')
votando = str(input('Escolha seu voto >>> '))
if votando in 'aA':
candA = candA + 1
elif votando in 'bB':
candB += 1
elif votando in 'cC':
candC += 1
elif voto == 2:
nulo += 1
# falta colocar porcentagem
print('')
print(f'Canditado A: {candA} - Canditado B: {candB} - Canditado C: {candC}')
print(f'Voto nulo: {nulo}')
traco()
print('')
# END
```
#### File: projetos_dev/class_python/defClasse.py
```python
__author__ = '<NAME>'
__version__ = '1.0.0'
__email__ = '<EMAIL>'
__status__ = 'Professor - Física'
__date__ = '28/06/2021'
'''
Definição classe
Especificação (conj. de regras) de um objeto. Objeto é a instância (personificação) de uma classe.
'''
# Funções
def traco(n=5):
print('---'*n)
return
class Carro:
valMax = 0
ligado = False
cor = ""
carro1 = Carro()
carro2 = Carro()
carro1.valMax = 200
carro1.cor = "Preto"
carro1.ligado = False
print('Classe V1.0.0')
traco()
print(f'Vel. Máx: {carro1.valMax}')
print(f'Cor: {carro1.cor}')
print(f'Ligado: {carro1.ligado}')
estado = 'Sim' if carro1.ligado else 'Não'
print(f'Ligado: {estado}')
traco()
#END
```
#### File: projetos_dev/class_python/jogoVelha.py
```python
__author__ = '<NAME>'
__version__ = '1.0.0'
__email__ = '<EMAIL>'
__status__ = 'Professor - Física'
__date__ = '21/12/2021'
'''
Jogo da velha
Fore, cor da frente / Back, cor da frente / Style, estilo.
'''
import os
import random
from colorama import Fore, Back, Style
#
#Variáveis globais
#
jogarNovamente = 's'
jogadas = 0
quemJoga = 2 # 1 = CPU - 2 = Jogador
maxJogadas = 9
vit = 'n' #Vitória
velha = [
[' ',' ',' '], # L0C0 L0C1 L0C2
[' ',' ',' '], # L1C0 L1C1 L1C2
[' ',' ',' '] # L2C0 L2C1 L2C2
]
def tela():
global velha # variável global
global jogadas
os.system('cls')
print(' 0 1 2')
print('0: ' + velha[0][0] + ' | ' + velha[0][1] + ' | ' + velha[0][2])
print(' -----------')
print('1: ' + velha[1][0] + ' | ' + velha[1][1] + ' | ' + velha[1][2])
print(' -----------')
print('2: ' + velha[2][0] + ' | ' + velha[2][1] + ' | ' + velha[2][2])
print('Jogadas: ' + Fore.GREEN + str(jogadas) + Fore.RESET)
def jogadorJoga():
global jogadas
global quemJoga
global maxJogadas
if quemJoga == 2 and jogadas < maxJogadas:
try:
l = int(input('Linha..: '))
c = int(input('Coluna.: '))
while velha[l][c] != ' ':
l = int(input('Linha..: '))
c = int(input('Coluna.: '))
velha[l][c] = 'X'
quemJoga = 1
jogadas += 1
except:
print('')
print('JOGADA INVÁLIDA!')
os.system('pause')
def cpuJoga():
global jogadas
global quemJoga
global maxJogadas
if quemJoga == 1 and jogadas < maxJogadas:
l = random.randrange(0,3)
c = random.randrange(0,3)
while velha[l][c] != ' ':
l = random.randrange(0,3)
c = random.randrange(0,3)
velha[l][c] = 'O'
jogadas += 1
quemJoga = 2
def verificarVitoria():
global velha
vitoria = 'n'
simbolos = ['X','O']
for s in simbolos:
vitoria = 'n'
#Verificar Linhas
indice_linha = indice_coluna = 0
while indice_linha < 3:
soma = 0
indice_coluna = 0
while indice_coluna < 3:
if (velha[indice_linha][indice_coluna] == s):
soma += 1
indice_coluna += 1
if(soma == 3):
vitoria = s
break
indice_linha += 1
if(vitoria != 'n'):
break
#Verificar Colunas
indice_linha = indice_coluna = 0
while indice_coluna < 3:
soma = 0
indice_linha = 0
while indice_linha < 3:
if (velha[indice_linha][indice_coluna] == s):
soma += 1
indice_linha += 1
if(soma == 3):
vitoria = s
break
indice_coluna += 1
if(vitoria != 'n'):
break
#Verificar Diagonal 1
soma = 0
idiag = 0 # idiag: indice diagonal
while idiag < 3:
if(velha[idiag][idiag] == s):
soma += 1
idiag += 1
if(soma == 3):
vitoria = s
break
#Verificar Diagonal 2
soma = 0
idiagl = 0 # idiagl: indice diagonal linha
idiagc = 2 # idiagc: indice diagonal coluna
while idiagc >= 0:
if(velha[idiagl][idiagc] == s):
soma += 1
idiagl += 1
idiagc -= 1
if(soma == 3):
vitoria = s
break
return vitoria
def redefinir():
global velha
global jogadas
global quemJoga
global maxJogadas
global vit
jogadas = 0
quemJoga = 2 # 1 = CPU - 2 = Jogador
maxJogadas = 9
vit = 'n' #Vitória
velha = [
[' ',' ',' '], # L0C0 L0C1 L0C2
[' ',' ',' '], # L1C0 L1C1 L1C2
[' ',' ',' '] # L2C0 L2C1 L2C2
]
while(jogarNovamente == 's'):
while True:
tela()
jogadorJoga()
cpuJoga()
tela()
vit = verificarVitoria()
if(vit != 'n') or (jogadas >= maxJogadas):
break
print(Fore.RED + 'FIM DE JOGO' + Fore.YELLOW)
if(vit == 'X' or vit == 'O'):
print('Resultado: Jogador ' + vit + ' venceu!!!')
else:
print('Resultado: Empate!!!')
jogarNovamente = input(Fore.BLUE + 'Jogar novamente? [s/n]: ' + Fore.RESET)
redefinir()
```
#### File: projetos_dev/class_python/metodosClasse.py
```python
__author__ = '<NAME>'
__version__ = '1.0.0'
__email__ = '<EMAIL>'
__status__ = 'Professor - Física'
__date__ = '28/06/2021'
'''
Métodos em classe
Método --> Função
Construtor é um método chamado quando a classe é instanciada.
'''
# Funções
def traco(n=8):
print('---'*n)
return
class Carro:
valMax = 0
ligado = False
cor = ""
# def __init__(self referência para própria classe, parâmetros de entrada - o que o método irá fazer):
def __init__(self, vel, lig, c):
self.velMax = vel
self.ligado = lig
self.cor = c
def ligar(self):
self.ligado = True
def desligar(self):
self.ligado = False
def andar(self):
if(self.ligado):
print('Andando.')
else:
print('Carro desligado.')
def mostrar(self):
print(f'Velocidade Máx: {self.velMax}')
print(f'Cor...........: {self.cor}')
estado = 'Sim' if self.ligado else 'Não'
print(f'Ligado........: {estado}')
traco()
#
#Instanciado a classe carro
#
carro1 = Carro(200,False,"Preto")
carro2 = Carro(350,False,"Branco")
# main()
print('')
traco()
print('Método construtor V1.0.0')
traco()
carro1.ligar()
carro1.mostrar()
carro2.mostrar()
carro1.andar()
carro2.andar()
traco()
print('')
#END
```
#### File: projetos_dev/pytela/tela.py
```python
__author__ = '<NAME>'
__version__ = '1.0.0'
__email__ = '<EMAIL>'
__status__ = 'Professor - Física'
__date__ = '20/06/2021'
import PySimpleGUI as sg
# Classe
class TelaPython:
def __init__(self):
#Escolher modelo de Layout
#sg.change_look_and_feel('DarkBronw4')
# Layout
layout =[
# sg.Text - cria a label, sg.Imput - recebe os dados
[sg.Text('Nome',size=(7,0)),sg.Input(size=(25,0),key='nome')],
[sg.Text('Endereço',size=(7,0)),sg.Input(size=(25,0),key='endereco')],
[sg.Text('Número',size=(7,0)),sg.Input(size=(6,0), key='num'),sg.Text('CEP',size=(4,0)),sg.Input(size=(10,0),key='cep')],
[sg.Text('Quais provedores de e-mail são aceitos?')],
[sg.Checkbox('Gmail',key='gmail'),sg.Checkbox('Outlook',key='outlook'),sg.Checkbox('Yahoo',key='yahoo')],
[sg.Text('Aceita cartão')],
[sg.Radio('Sim','cartoes',key='aceitaCartao'),sg.Radio('Não','cartoes',key='naoAceitaCartao')],
[sg.Slider(range=(0,225), default_value=0, orientation='h',size=(15,20),key='sliderVelocidade')],
# sg.Button - cria o botão
[sg.Button('Enviar Dados')],
[sg.Output(size=(30,20))]
]
# Janela
self.janela = sg.Window('Dados do usuário').layout(layout)
# def Iniciar(self):
# print(self.values)
def Iniciar(self):
while True:
# Extrair dados da tela
self.button, self.values = self.janela.Read()
nome = self.values['nome']
endereco = self.values['endereco']
num = self.values['num']
cep = self.values['cep']
aceita_gmail = self.values['gmail']
aceita_outlook =self.values['outlook']
aceita_yahoo = self.values['yahoo']
aceita_cartao = self.values['aceitaCartao']
nao_aceita_cartao = self.values['naoAceitaCartao']
velocidade_script = self.values['sliderVelocidade']
print(f'nome: {nome}')
print(f'endereco: {endereco}')
print(f'numero: {num}')
print(f'cep: {cep}')
print(f'aceita gmail: {aceita_gmail}')
print(f'aceita outlook: {aceita_outlook}')
print(f'aceita yahoo: {aceita_cartao}')
print(f'aceita cartao: {aceita_cartao}')
print(f'nao aceita cartao: {nao_aceita_cartao}')
print(f'velocidade script {velocidade_script}')
# Instanciando classe
tela = TelaPython()
tela.Iniciar()
```
#### File: projetos_dev/python_basico/exer_numero_par.py
```python
def traco():
print('-'*45)
traco()
print('PAR OU ÍMPAR'.center(45))
traco()
try:
num = int(input('Digite um número inteiro: '))
if num % 2 == 0:
print(f'{num} é um número PAR.')
else:
print(f'{num} é ímpar!')
except:
print('ERRO! Não foi digitado um número inteiro.')
traco()
```
#### File: projetos_dev/python_basico/exer_tamanho_nome.py
```python
def traco():
print('-'*50)
traco()
print('TAMANHO NOME v1.0'.center(50))
traco()
nome = str(input('Digite seu primeiro nome: ')).capitalize().strip()
# if nome.isnumeric():
# print('Teu nome é número?')
# traco()
# elif not nome.isnumeric():
# print('Escreve esse nome direito.')
#exit()
if nome == 'Jonatan':
print('Esse nome é lindo!!!')
elif 5 <= len(nome) <= 6:
print(f'{nome}, seu nome é normalzinho. Dá pra o gasto.')
elif len(nome) <= 4:
print(f'{nome}, que nome curto. Teu pai é economista?')
elif len(nome) > 6:
print(f'Nossa {nome}, que exageiro de nome!')
traco()
```
#### File: projetos_dev/python_programacao_procedural/aula21_try_except_condicional.py
```python
def converte_numero(valor, fator):
try:
valor = int(valor)
fator = int(fator)
return valor * fator
except ValueError:
try:
valor = float(valor)
fator = float(fator)
return valor * fator
except ValueError:
return 'Isso não é um número.'
while True:
numero1 = input('Digite um número: ')
numero2 = input('Por quanto quer multiplicar? ')
#converte_numero(numero1, numero2)
if numero1 and numero2 is not None:
print(f'{numero1} x {numero2} = {converte_numero(numero1,numero2)}')
while True:
resp = input('Quer continuar? [S/N] - ')
if resp not in 'NnSs':
print('Opção inválida.')
elif resp in 'Ss':
continue
else:
# elif resp in 'Nn':
# print('Finalizando programa.')
break
```
#### File: projetos_dev/python_programacao_procedural/aula5_funcao_lambda.py
```python
def linha(n=10):
print('*-*' * n)
def produto(num1, num2):
return num1 * num2
linha(12)
a = 2
b = 5
resp = produto(a, b)
print(f'Resultado da função produto {a} x {b} = {resp}')
linha(12)
resp2 = lambda a, b: a * b
print(f'Resultado da função Lambda {a} x {b} = {resp}')
linha(12)
lista = [
['prod1', 36],
['prod2', 6],
['prod3', 50],
['prod4', 25],
['prod5', 69],
['prod6', 100]
]
def funcao(item):
'''
Ordenando pelo índice da lista
:param item:
:return:item[?]
'''
return item[1]
print(lista)
lista.sort(key=funcao)
#lista.sort(key=funcao, reverse=True)
print(lista)
lista.sort(key=lambda item: item[1])
print(lista)
print(sorted(lista, key=lambda i: i[1], reverse=True))
``` |
{
"source": "jonfisik/ScriptsPython",
"score": 4
} |
#### File: ScriptsPython/Scripts13/Script104.py
```python
def leiaInt(msg):
ok = False
valor = 0
while True:
n = str(input(msg))
if n.isnumeric():
valor = int(n)
ok = True
else:
print(f'''\033[0;31m
ERRO! Você digitou "{n}".
Digite um número inteiro válido.\033[m''')
print()
if ok:
break
return valor
#programa
print('---'*10)
n = leiaInt('Digite um número: ')
print(f'Você digitou o número {n}.')
print('---'*10)
``` |
{
"source": "jon-flatley/opentitan",
"score": 2
} |
#### File: util/dvsim/Deploy.py
```python
r"""
Classes
"""
import logging as log
import pprint
import random
import re
import secrets
import shlex
import sys
import time
import hjson
from .utils import *
class Deploy():
"""
Abstraction for deploying builds and runs.
"""
# Maintain a list of dispatched items.
dispatch_counter = 0
# Misc common deploy settings.
print_interval = 5
max_parallel = 16
max_odirs = 5
def __self_str__(self):
if log.getLogger().isEnabledFor(VERBOSE):
return pprint.pformat(self.__dict__)
else:
ret = self.cmd
if self.sub != []: ret += "\nSub:\n" + str(self.sub)
return ret
def __str__(self):
return self.__self_str__()
def __repr__(self):
return self.__self_str__()
def __init__(self, sim_cfg):
# Cross ref the whole cfg object for ease.
self.sim_cfg = sim_cfg
# Common vars
self.cmd = ""
self.odir = ""
self.log = ""
self.fail_msg = ""
# Flag to indicate whether to 'overwrite' if odir already exists,
# or to backup the existing one and create a new one.
# For builds, we want to overwrite existing to leverage the tools'
# incremental / partition compile features. For runs, we may want to
# create a new one.
self.renew_odir = False
# List of vars required to be exported to sub-shell
self.exports = {}
# Deploy sub commands
self.sub = []
# Process
self.process = None
self.log_fd = None
self.status = None
# These are command, outut directory and log file
self.mandatory_misc_attrs.update({
"name": False,
"build_mode": False,
"flow_makefile": False,
"exports": False,
"dry_run": False
})
# Function to parse a dict and extract the mandatory cmd and misc attrs.
def parse_dict(self, ddict):
if not hasattr(self, "target"):
log.error(
"Class %s does not have the mandatory attribute \"target\" defined",
self.__class__.__name__)
sys.exit(1)
ddict_keys = ddict.keys()
for key in self.mandatory_cmd_attrs.keys():
if self.mandatory_cmd_attrs[key] == False:
if key in ddict_keys:
setattr(self, key, ddict[key])
self.mandatory_cmd_attrs[key] = True
for key in self.mandatory_misc_attrs.keys():
if self.mandatory_misc_attrs[key] == False:
if key in ddict_keys:
setattr(self, key, ddict[key])
self.mandatory_misc_attrs[key] = True
def __post_init__(self):
# Ensure all mandatory attrs are set
for attr in self.mandatory_cmd_attrs.keys():
if self.mandatory_cmd_attrs[attr] is False:
log.error("Attribute \"%s\" not found for \"%s\".", attr,
self.name)
sys.exit(1)
for attr in self.mandatory_misc_attrs.keys():
if self.mandatory_misc_attrs[attr] is False:
log.error("Attribute \"%s\" not found for \"%s\".", attr,
self.name)
sys.exit(1)
# Recursively search and replace wildcards
self.__dict__ = find_and_substitute_wildcards(self.__dict__,
self.__dict__)
# Set the command, output dir and log
self.odir = getattr(self, self.target + "_dir")
# Set the output dir link name to the basename of odir (by default)
self.odir_ln = os.path.basename(os.path.normpath(self.odir))
self.log = self.odir + "/" + self.target + ".log"
# If using LSF, redirect stdout and err to the log file
self.cmd = self.construct_cmd()
def construct_cmd(self):
cmd = "make -f " + self.flow_makefile + " " + self.target
if self.dry_run is True:
cmd += " -n"
for attr in self.mandatory_cmd_attrs.keys():
value = getattr(self, attr)
if type(value) is list:
pretty_value = []
for item in value:
pretty_value.append(item.strip())
value = " ".join(pretty_value)
if type(value) is bool:
value = int(value)
if type(value) is str:
value = value.strip()
cmd += " " + attr + "=\"" + str(value) + "\""
# TODO: If not running locally, redirect stdout and err to the log file
# self.cmd += " > " + self.log + " 2>&1 &"
return cmd
def dispatch_cmd(self):
self.exports.update(os.environ)
args = shlex.split(self.cmd)
try:
self.odir_limiter()
os.system("mkdir -p " + self.odir)
os.system("ln -s " + self.odir + " " + self.sim_cfg.links['D'] +
'/' + self.odir_ln)
f = open(self.log, "w")
self.process = subprocess.Popen(args,
bufsize=4096,
universal_newlines=True,
stdout=f,
stderr=f,
env=self.exports)
self.log_fd = f
self.status = "."
Deploy.dispatch_counter += 1
except IOError:
log.error('IO Error: See %s', self.log)
if self.log_fd: self.log_fd.close()
self.status = "K"
# Function to backup previously run output directory to maintain a history of
# limited number of output directories. It deletes the output directory with the
# oldest timestamp, if the limit is reached.
def odir_limiter(self):
# Return if renew_odir flag is False - we'd be reusing the existing odir.
if not self.renew_odir: return
try:
# If output directory exists, back it up.
if os.path.exists(self.odir):
ts = run_cmd("date '+" + self.sim_cfg.ts_format + "' -d \"" +
"$(stat -c '%y' " + self.odir + ")\"")
os.system('mv ' + self.odir + " " + self.odir + "_" + ts)
except IOError:
log.error('Failed to back up existing output directory %s',
self.odir)
# Delete older directories.
try:
pdir = os.path.realpath(self.odir + "/..")
if os.path.exists(pdir):
find_cmd = "find " + pdir + " -mindepth 1 -maxdepth 1 -type d "
num_dirs = int(run_cmd(find_cmd + " | wc -l"))
num_rm_dirs = num_dirs - Deploy.max_odirs
if num_rm_dirs > -1:
dirs = run_cmd(find_cmd +
"-printf '%T+ %p\n' | sort | head -n " +
str(num_rm_dirs + 1) +
" | awk '{print $2}'")
dirs = dirs.replace('\n', ' ')
os.system("/usr/bin/rm -rf " + dirs)
except IOError:
log.error("Failed to delete old run directories!")
def set_status(self):
self.status = 'P'
if self.dry_run is False:
for fail_pattern in self.fail_patterns:
grep_cmd = "grep -m 1 -E \'" + fail_pattern + "\' " + self.log
(status, rslt) = subprocess.getstatusoutput(grep_cmd + " -c")
if rslt != "0":
(status, rslt) = subprocess.getstatusoutput(grep_cmd)
msg = "```\n{}\n```\n".format(rslt)
self.fail_msg += msg
log.log(VERBOSE, msg)
self.status = 'F'
break
# Return if status is fail - no need to look for pass patterns.
if self.status == 'F': return
# If fail patterns were not found, ensure pass patterns indeed were.
for pass_pattern in self.pass_patterns:
grep_cmd = "grep -c -m 1 -E \'" + pass_pattern + "\' " + self.log
(status, rslt) = subprocess.getstatusoutput(grep_cmd)
if rslt == "0":
msg = "Pass pattern \"{}\" not found.<br>\n".format(
pass_pattern)
self.fail_msg += msg
log.log(VERBOSE, msg)
self.status = 'F'
break
# Recursively set sub-item's status if parent item fails
def set_sub_status(self, status):
if self.sub == []: return
for sub_item in self.sub:
sub_item.status = status
sub_item.set_sub_status(status)
def link_odir(self):
if self.status == '.':
log.error("Method unexpectedly called!")
else:
old_link = self.sim_cfg.links['D'] + "/" + self.odir_ln
new_link = self.sim_cfg.links[self.status] + "/" + self.odir_ln
cmd = "ln -s " + self.odir + " " + new_link + "; "
cmd += "rm " + old_link
try:
os.system(cmd)
except Exception as e:
log.error("Cmd \"%s\" could not be run", cmd)
def get_status(self):
if self.status != ".": return
if self.process.poll() is not None:
self.log_fd.close()
if self.process.returncode != 0:
msg = "Last 5 lines of the log:<br>\n"
self.fail_msg += msg
log.log(VERBOSE, msg)
get_fail_msg_cmd = "tail -n 5 " + self.log
msg = run_cmd(get_fail_msg_cmd)
msg = "```\n{}\n```\n".format(msg)
self.fail_msg += msg
log.log(VERBOSE, msg)
self.status = "F"
else:
self.set_status()
log.log(VERBOSE, "Item %s has completed execution: %s", self.name,
self.status)
Deploy.dispatch_counter -= 1
self.link_odir()
del self.process
@staticmethod
def deploy(items):
dispatched_items = []
def dispatch_items(items):
item_names = {}
for item in items:
if item.target not in item_names.keys():
item_names[item.target] = "["
if item.status is None:
item_names[item.target] += " "
if log.getLogger().isEnabledFor(VERBOSE):
item_names[
item.target] += item.name + ":" + item.log + ",\n"
else:
item_names[item.target] += item.odir_ln + ", "
item.dispatch_cmd()
dispatched_items.append(item)
for target in item_names.keys():
if item_names[target] != "[":
item_names[target] = " [" + item_names[target][3:]
item_names[target] = item_names[target][:-2] + "]"
log.info("[dvsim]: %s:\n%s", target, item_names[target])
# Dispatch the given items
dispatch_items_queue = []
if len(items) > Deploy.max_parallel:
dispatch_items(items[0:Deploy.max_parallel - 1])
dispatch_items_queue = items[Deploy.max_parallel:]
else:
dispatch_items(items)
all_done = False
num_secs = 0
status = {}
status_str = {}
status_str_prev = {}
while not all_done:
time.sleep(1)
num_secs += 1
trig_print = ((num_secs % Deploy.print_interval) == 0)
for item in dispatched_items:
if item.target not in status.keys():
status[item.target] = {}
if item not in status[item.target].keys():
status[item.target][item] = ""
item.get_status()
if item.status != status[
item.target][item] and item.status != ".":
trig_print = True
if item.status != "P":
# Kill sub items
item.set_sub_status("K")
dispatch_items_queue.extend(item.sub)
status[item.target][item] = item.status
# Dispatch more from the queue
if len(dispatch_items_queue) == 0:
all_done = True
else:
num_slots = Deploy.max_parallel - Deploy.dispatch_counter
if num_slots > 0:
if len(dispatch_items_queue) > num_slots:
dispatch_items(dispatch_items_queue[0:num_slots])
dispatch_items_queue = dispatch_items_queue[num_slots:]
else:
dispatch_items(dispatch_items_queue)
dispatch_items_queue = []
status_str_prev = status_str.copy()
status_str = {}
for target in status.keys():
if target not in status_str.keys(): status_str[target] = "["
for item in status[target].keys():
if status[target][item] is not None:
status_str[target] += status[target][item]
if status[target][item] == ".":
all_done = False
status_str[target] += "]"
# Print the status string periodically
if trig_print:
for target in status_str.keys():
if (target in status_str_prev.keys()) and \
(status_str[target] == status_str_prev[target]) and \
(status_str[target].find(".") == -1):
continue
log.info("[dvsim]: [%06ds] [%s]: %s", num_secs, target,
status_str[target])
class CompileSim(Deploy):
"""
Abstraction for building the simulation executable.
"""
# Register all builds with the class
items = []
def __init__(self, build_mode, sim_cfg):
self.target = "build"
self.pass_patterns = []
self.fail_patterns = []
self.mandatory_cmd_attrs = { # RAL gen
"skip_ral": False,
"gen_ral_pkg_cmd": False,
"gen_ral_pkg_dir": False,
"gen_ral_pkg_opts": False,
# Flist gen
"sv_flist_gen_cmd": False,
"sv_flist_gen_dir": False,
"sv_flist_gen_opts": False,
# Build
"build_dir": False,
"build_cmd": False,
"build_opts": False
}
self.mandatory_misc_attrs = {}
# Initialize
super().__init__(sim_cfg)
super().parse_dict(build_mode.__dict__)
# Call this method again with the sim_cfg dict passed as the object,
# since it may contain additional mandatory attrs.
super().parse_dict(sim_cfg.__dict__)
self.build_mode = self.name
self.__post_init__()
# Start fail message construction
self.fail_msg = "\n**BUILD:** {}<br>\n".format(self.name)
log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '')
self.fail_msg += "**LOG:** $scratch_path/{}<br>\n".format(log_sub_path)
CompileSim.items.append(self)
class RunTest(Deploy):
"""
Abstraction for running tests. This is one per seed for each test.
"""
# Initial seed values when running tests (if available).
seeds = []
# Register all runs with the class
items = []
def __init__(self, index, test, sim_cfg):
self.target = "run"
self.pass_patterns = []
self.fail_patterns = []
self.mandatory_cmd_attrs = {
"uvm_test": False,
"uvm_test_seq": False,
"run_opts": False,
"sw_dir": False,
"sw_name": False,
"run_dir": False,
"run_cmd": False,
"run_opts": False
}
self.mandatory_misc_attrs = {
"run_dir_name": False,
"pass_patterns": False,
"fail_patterns": False
}
self.index = index
self.seed = RunTest.get_seed()
# Initialize
super().__init__(sim_cfg)
super().parse_dict(test.__dict__)
# Call this method again with the sim_cfg dict passed as the object,
# since it may contain additional mandatory attrs.
super().parse_dict(sim_cfg.__dict__)
self.test = self.name
self.renew_odir = True
self.build_mode = test.build_mode.name
self.__post_init__()
# For output dir link, use run_dir_name instead.
self.odir_ln = self.run_dir_name
# Start fail message construction
self.fail_msg = "\n**TEST:** {}, ".format(self.name)
self.fail_msg += "**SEED:** {}<br>\n".format(self.seed)
log_sub_path = self.log.replace(self.sim_cfg.scratch_root + '/', '')
self.fail_msg += "**LOG:** {}<br>\n".format(log_sub_path)
RunTest.items.append(self)
@staticmethod
def get_seed():
if RunTest.seeds == []:
# Py lib 'secrets' provides crypto quality strong random numbers.
for i in range(1000):
seed = secrets.token_bytes(4)
seed = int.from_bytes(seed, byteorder='little')
RunTest.seeds.append(seed)
return RunTest.seeds.pop(0)
```
#### File: util/dvsim/SimCfg.py
```python
r"""
Class describing simulation configuration object
"""
import logging as log
import sys
from testplanner import class_defs, testplan_utils
from .Deploy import *
from .FlowCfg import FlowCfg
from .Modes import *
from .utils import *
class SimCfg(FlowCfg):
"""Simulation configuration object
A simulation configuration class holds key information required for building a DV
regression framework.
"""
def __init__(self, flow_cfg_file, proj_root, args):
super().__init__(flow_cfg_file, proj_root, args)
# Options set from command line
self.simulator = args.simulator
self.build_opts = []
self.build_opts.extend(args.build_opts)
self.en_build_modes = []
self.en_build_modes.extend(args.build_modes)
self.run_opts = []
self.run_opts.extend(args.run_opts)
self.en_run_modes = []
self.en_run_modes.extend(args.run_modes)
self.build_unique = args.build_unique
self.build_only = args.build_only
self.run_only = args.run_only
self.reseed_ovrd = args.reseed
self.reseed_multiplier = args.reseed_multiplier
self.waves = args.waves
self.dump = args.dump
self.max_waves = args.max_waves
self.cov = args.cov
self.profile = args.profile
self.xprop_off = args.xprop_off
self.no_rerun = args.no_rerun
self.verbosity = "{" + args.verbosity + "}"
self.email = args.email
self.verbose = args.verbose
self.dry_run = args.dry_run
self.skip_ral = args.skip_ral
self.map_full_testplan = args.map_full_testplan
# Set default sim modes for unpacking
if self.waves is True: self.en_build_modes.append("waves")
if self.cov is True: self.en_build_modes.append("cov")
if self.profile is not 'none': self.en_build_modes.append("profile")
if self.xprop_off is not True: self.en_build_modes.append("xprop")
# Options built from cfg_file files
self.project = ""
self.flow = ""
self.flow_makefile = ""
self.scratch_path = ""
self.build_dir = ""
self.run_dir = ""
self.sw_build_dir = ""
self.pass_patterns = []
self.fail_patterns = []
self.name = ""
self.dut = ""
self.tb = ""
self.testplan = ""
self.fusesoc_core = ""
self.ral_spec = ""
self.build_modes = []
self.run_modes = []
self.regressions = []
# Options from simulators - for building and running tests
self.build_cmd = ""
self.flist_gen_cmd = ""
self.flist_gen_opts = []
self.flist_file = ""
self.run_cmd = ""
self.dump_file = ""
# Generated data structures
self.links = {}
self.build_list = []
self.run_list = []
self.deploy = []
# Parse the cfg_file file tree
self.parse_flow_cfg(flow_cfg_file)
# Stop here if this is a master cfg list
if self.is_master_cfg: return
# If build_unique is set, then add current timestamp to uniquify it
if self.build_unique:
self.build_dir += "_" + self.timestamp
# Process overrides before substituting the wildcards.
self._process_overrides()
# Make substitutions, while ignoring the following wildcards
# TODO: Find a way to set these in sim cfg instead
ignored_wildcards = [
"build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq"
]
self.__dict__ = find_and_substitute_wildcards(self.__dict__,
self.__dict__,
ignored_wildcards)
# Print info
log.info("Scratch path for %s: %s", self.name, self.scratch_path)
# Set directories with links for ease of debug / triage.
self.links = {
"D": self.scratch_path + "/" + "dispatched",
"P": self.scratch_path + "/" + "passed",
"F": self.scratch_path + "/" + "failed",
"K": self.scratch_path + "/" + "killed"
}
# Use the default build mode for tests that do not specify it
if not hasattr(self, "build_mode"):
setattr(self, "build_mode", "default")
self._process_exports()
# Create objects from raw dicts - build_modes, sim_modes, run_modes,
# tests and regressions
self._create_objects()
# Post init checks
self.__post_init__()
def __post_init__(self):
# Run some post init checks
super().__post_init__()
self.results_title = self.name.upper() + " Simulation Results"
@staticmethod
def create_instance(flow_cfg_file, proj_root, args):
'''Create a new instance of this class as with given parameters.
'''
return SimCfg(flow_cfg_file, proj_root, args)
# Purge the output directories. This operates on self.
def _purge(self):
if self.scratch_path is not "":
try:
log.info("Purging scratch path %s", self.scratch_path)
os.system("/bin/rm -rf " + self.scratch_path)
except IOError:
log.error('Failed to purge scratch directory %s',
self.scratch_path)
def _create_objects(self):
# Create build and run modes objects
build_modes = Modes.create_modes(BuildModes,
getattr(self, "build_modes"))
setattr(self, "build_modes", build_modes)
run_modes = Modes.create_modes(RunModes, getattr(self, "run_modes"))
setattr(self, "run_modes", run_modes)
# Walk through build modes enabled on the CLI and append the opts
for en_build_mode in self.en_build_modes:
build_mode_obj = Modes.find_mode(en_build_mode, build_modes)
if build_mode_obj is not None:
self.build_opts.extend(build_mode_obj.build_opts)
self.run_opts.extend(build_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_build_mode)
sys.exit(1)
# Walk through run modes enabled on the CLI and append the opts
for en_run_mode in self.en_run_modes:
run_mode_obj = Modes.find_mode(en_run_mode, run_modes)
if run_mode_obj is not None:
self.run_opts.extend(run_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_run_mode)
sys.exit(1)
# Create tests from given list of items
tests = Tests.create_tests(getattr(self, "tests"), self)
setattr(self, "tests", tests)
# Regressions
# Parse testplan if provided.
if self.testplan != "":
self.testplan = testplan_utils.parse_testplan(self.testplan)
# Extract tests in each milestone and add them as regression target.
self.regressions.extend(self.testplan.get_milestone_regressions())
# Create regressions
regressions = Regressions.create_regressions(
getattr(self, "regressions"), self, tests)
setattr(self, "regressions", regressions)
def _print_list(self):
for list_item in self.list_items:
log.info("---- List of %s in %s ----", list_item, self.name)
if hasattr(self, list_item):
items = getattr(self, list_item)
for item in items:
log.info(item)
else:
log.error("Item %s does not exist!", list_item)
def _create_build_and_run_list(self):
# Walk through the list of items to run and create the build and run
# objects.
# Allow multiple regressions to run as long as the do not enable
# sim_modes or run_modes
def get_overlapping_tests(tests, run_list_names):
overlapping_tests = []
for test in tests:
if test.name in run_list_names:
overlapping_tests.append(test)
return overlapping_tests
def prune_items(items, marked_items):
pruned_items = []
for item in items:
if item not in marked_items: pruned_items.append(item)
return pruned_items
# Check if there are items to run
if self.items == []:
log.error(
"No items provided for running this simulation / regression")
sys.exit(1)
items_list = self.items
run_list_names = []
marked_items = []
# Process regressions first
for regression in self.regressions:
if regression.name in items_list:
overlapping_tests = get_overlapping_tests(
regression.tests, run_list_names)
if overlapping_tests != []:
log.error("Regression \"%s\" added for run contains tests that overlap with " + \
"other regressions added. This can result in conflicting " + \
"build / run_opts to be set causing unexpected results.",
regression.name)
sys.exit(1)
self.run_list.extend(regression.tests)
# Merge regression's build and run opts with its tests and their
# build_modes
regression.merge_regression_opts()
run_list_names.extend(regression.test_names)
marked_items.append(regression.name)
items_list = prune_items(items_list, marked_items)
# Process individual tests
for test in self.tests:
if test.name in items_list:
overlapping_tests = get_overlapping_tests([test],
run_list_names)
if overlapping_tests == []:
self.run_list.append(test)
run_list_names.append(test.name)
marked_items.append(test.name)
items_list = prune_items(items_list, marked_items)
# Merge the global build and run opts
Tests.merge_global_opts(self.run_list, self.build_opts, self.run_opts)
# Check if all items has been processed
if items_list != []:
log.error("The items %s added for run were not found in \n%s!" + \
"\nUse the --list switch to see a list of available tests / regressions.", \
items_list, self.flow_cfg_file)
sys.exit(1)
# Process reseed override and create the build_list
build_list_names = []
for test in self.run_list:
# Override reseed if available.
if self.reseed_ovrd != -1:
test.reseed = self.reseed_ovrd
# Apply reseed multiplier if set on the command line.
test.reseed *= self.reseed_multiplier
# Create the unique set of builds needed.
if test.build_mode.name not in build_list_names:
self.build_list.append(test.build_mode)
build_list_names.append(test.build_mode.name)
def _create_dirs(self):
'''Create initial set of directories
'''
# Invoking system calls has a performance penalty.
# Construct a single command line chained with '&&' to invoke
# the system call only once, rather than multiple times.
create_link_dirs_cmd = ""
for link in self.links.keys():
create_link_dirs_cmd += "/bin/rm -rf " + self.links[link] + " && "
create_link_dirs_cmd += "mkdir -p " + self.links[link] + " && "
create_link_dirs_cmd += " true"
try:
os.system(create_link_dirs_cmd)
except IOError:
log.error("Error running when running the cmd \"%s\"",
create_link_dirs_cmd)
sys.exit(1)
def _create_deploy_objects(self):
'''Create deploy objects from the build and run lists.
'''
# Create the build and run list first
self._create_build_and_run_list()
builds = []
build_map = {}
for build in self.build_list:
item = CompileSim(build, self)
builds.append(item)
build_map[build] = item
runs = []
for test in self.run_list:
for num in range(test.reseed):
item = RunTest(num, test, self)
if self.build_only is False:
build_map[test.build_mode].sub.append(item)
runs.append(item)
if self.run_only is True:
self.deploy = runs
else:
self.deploy = builds
# Create initial set of directories before kicking off the regression.
self._create_dirs()
def _gen_results(self):
'''
The function is called after the regression has completed. It collates the status of
all run targets and generates a dict. It parses the testplan and maps the generated
result to the testplan entries to generate a final table (list). It uses the fmt arg
to dump the final result as a markdown or html.
'''
# TODO: add support for html
def retrieve_result(name, results):
for item in results:
if name == item["name"]: return item
return None
def gen_results_sub(items, results, fail_msgs):
'''
Generate the results table from the test runs (builds are ignored).
The table has 3 columns - name, passing and total as a list of dicts.
This is populated for all tests. The number of passing and total is
in reference to the number of iterations or reseeds for that test.
This list of dicts is directly consumed by the Testplan::results_table
method for testplan mapping / annotation.
'''
if items == []: return (results, fail_msgs)
for item in items:
if item.status == "F":
fail_msgs += item.fail_msg
# Generate results table for runs.
if item.target == "run":
result = retrieve_result(item.name, results)
if result is None:
result = {"name": item.name, "passing": 0, "total": 0}
results.append(result)
if item.status == "P": result["passing"] += 1
result["total"] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results,
fail_msgs)
return (results, fail_msgs)
regr_results = []
fail_msgs = ""
(regr_results, fail_msgs) = gen_results_sub(self.deploy, regr_results,
fail_msgs)
# Add title if there are indeed failures
if fail_msgs != "":
fail_msgs = "\n## List of Failures\n" + fail_msgs
# Generate results table for runs.
results_str = "## " + self.results_title + "\n"
results_str += "### " + self.timestamp_long + "\n"
# Add path to testplan.
testplan = "https://" + self.doc_server + '/' + self.rel_path
testplan = testplan.replace("/dv", "/doc/dv_plan/#testplan")
results_str += "### [Testplan](" + testplan + ")\n\n"
# TODO: check if testplan is not null?
results_str += self.testplan.results_table(
regr_results=regr_results,
map_full_testplan=self.map_full_testplan)
results_str += "\n"
# Append failures for triage
self.results_md = results_str + fail_msgs
# Write results to the scratch area
self.results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
log.info("Detailed results are available at %s", self.results_file)
f = open(self.results_file, 'w')
f.write(self.results_md)
f.close()
# Return only the tables
return results_str
``` |
{
"source": "JonForce/bitcoin-clock",
"score": 3
} |
#### File: JonForce/bitcoin-clock/clockwindow.py
```python
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QMovie, QFont
class ClockWindow(object):
def __init__(self, MainWindow, width, height):
MainWindow.setObjectName("MainWindow")
dim = MainWindow.size()
self.WIDTH = width
self.HEIGHT = height
self.RATIO = self.WIDTH / self.HEIGHT
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(0, 0, self.WIDTH, self.HEIGHT))
self.label.setObjectName("label")
self.display_text = QtWidgets.QLabel(self.centralwidget)
self.display_text.setObjectName("display_text")
self.display_text.setFont(QFont('Helvetica', 30))
self.display_text.setStyleSheet("background-color: black; color: green")
self.set_display_text("Loading...", "green")
MainWindow.setCentralWidget(self.centralwidget)
def update_gif(self, gif_path):
self.movie = QMovie(gif_path)
self.label.setMovie(self.movie)
self.movie.start()
self.movie.setScaledSize(self.label.size())
def set_display_text(self, text, color):
self.display_text.setStyleSheet(f"background-color: black; color: {color}")
self.display_text.setText(text)
self.display_text.move(self.WIDTH / 2 - self._label_width(self.display_text)/2, self.HEIGHT - self._label_height(self.display_text))
self.display_text.setFixedWidth(self._label_width(self.display_text))
def _label_width(self, label):
return label.fontMetrics().boundingRect(label.text()).width()
def _label_height(self, label):
return label.fontMetrics().boundingRect(label.text()).height()
```
#### File: JonForce/bitcoin-clock/controller.py
```python
import requests
import configparser
from datetime import datetime
from datetime import timedelta
from PyQt5.QtCore import QTimer,QDateTime
from PyQt5.QtCore import QThread
import random
class Controller:
UPDATE_FREQUENCY = 2000 # millis
STORE_BTC_PRICE_FREQUENCY = 60 # seconds
OLDEST_ALLOWED_PRICE = 60*60*24 # seconds
GIF_DURATION = 60 # seconds
prices = []
gif_search_terms_positive = {
"to the moon": 5,
"money": 5,
"rich": 5,
"bitcoin": 5,
"bullish": 2,
"printing money": 4,
"success": 3,
"unstoppable": 2,
"happy": 3
}
terms_positive_sum = None
gif_search_terms_negative = {
"cry": 5,
"diamond hands": 5,
"car crash": 2,
"crash": 3,
"burning money": 5,
"hold!": 8,
"<NAME>": 1
}
terms_negative_sum = None
def __init__(self, gif_manager, clock_window, config):
self.config = config
self.gif_manager = gif_manager
self.clock_window = clock_window
self.gif_start_time = datetime.now()
self.timer = QTimer()
self.timer.timeout.connect(self._refresh_timer)
self.timer.start(self.UPDATE_FREQUENCY)
self._refresh_timer()
def _refresh_timer(self):
print("Refresh")
self.timer.start(self.UPDATE_FREQUENCY)
self._update_btc_price()
if (self.config['DEFAULT']['HAPPY_MODE'] != "True" or self._percent_change() >= 0) and self.btc_price is not None:
text = f"${self.btc_price['str']} ({round(self._percent_change()*100, ndigits=2)}%)"
self.clock_window.set_display_text(text, "green" if self._percent_change() >= 0 else "red")
else:
self.clock_window.set_display_text("Have a great day!", "green")
if self._gif_duration() > self.GIF_DURATION:
term = None
if self._percent_change() >= 0:
term = self._random_positive_term()
else:
term = self._random_negative_term()
if self._percent_change() < 0 and self.config['DEFAULT']['HAPPY_MODE'] == "True":
self.clock_window.update_gif(self.gif_manager.random_idling_gif(self))
else:
self.clock_window.update_gif(self.gif_manager.grab_gif(term))
self.gif_start_time = datetime.now()
print("Updating gif")
def _update_btc_price(self):
try:
response = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')
except:
print("No internet")
self.btc_price = None
return
price = {
"time": datetime.now(),
"float": float(response.json()["bpi"]["USD"]["rate"].replace(",", "")),
"str": response.json()["bpi"]["USD"]["rate"].split(".")[0]
}
self.btc_price = price
if len(self.prices) == 0 or self._elapsed(self.prices[-1]['time']) > self.STORE_BTC_PRICE_FREQUENCY:
self.prices.append(price)
print(f"Added new btc price, length = {len(self.prices)}")
i = 0
while True:
oldest_price = self.prices[0]
if oldest_price is None or i >= len(self.prices):
break
elif self._elapsed(oldest_price['time']) > self.OLDEST_ALLOWED_PRICE:
self.prices.remove(oldest_price)
print(f"Removed btc price, age : {self._elapsed(oldest_price['time']).seconds}")
else:
i += 1
# Default to the percent change in the last hour
def _percent_change(self, duration=60*60):
if self.btc_price is None:
return 0
closest_price = None
target_time = datetime.now() - timedelta(seconds=duration)
for price in self.prices:
if closest_price is None or abs((price["time"] - target_time).seconds) < abs((closest_price["time"] - target_time).seconds):
closest_price = price
# % increase = Increase ÷ Original Number × 100.
return (self.btc_price["float"] - closest_price["float"]) / closest_price["float"]
def _elapsed(self, old_time):
return (datetime.now() - old_time).seconds
def _gif_duration(self):
return self._elapsed(self.gif_start_time)
def _random_positive_term(self):
if self.terms_positive_sum is None:
self.terms_positive_sum = 0
for term in self.gif_search_terms_positive.keys():
self.terms_positive_sum += self.gif_search_terms_positive[term]
select_i = random.randint(0, self.terms_positive_sum - 1)
for term in self.gif_search_terms_positive.keys():
if select_i <= self.gif_search_terms_positive[term]:
return term
else:
select_i -= self.gif_search_terms_positive[term]
def _random_negative_term(self):
if self.terms_negative_sum is None:
self.terms_negative_sum = 0
for term in self.gif_search_terms_negative.keys():
self.terms_negative_sum += self.gif_search_terms_negative[term]
select_i = random.randint(0, self.terms_negative_sum - 1)
for term in self.gif_search_terms_negative.keys():
if select_i <= self.gif_search_terms_negative[term]:
return term
else:
select_i -= self.gif_search_terms_negative[term]
```
#### File: JonForce/bitcoin-clock/gifmanager.py
```python
import os
import giphy_client
from giphy_client.rest import ApiException
import requests
import random
class GifManager:
def __init__(self, base_dir, tmp_dir, config):
self.config = config
self.idling_dir = os.path.join(base_dir, "idling")
self.tmp_dir = tmp_dir
self.giphy = giphy_client.DefaultApi()
def random_idling_gif(self):
dir = os.path.join(self.idling_dir, f"{random.randint(0, 5)}.gif")
return dir
def grab_gif(self, query: str):
try:
api_response = self.giphy.gifs_search_get(self.config['DEFAULT']['SECRET_KEY_GIPHY'], query, limit=1, offset=random.randint(0, 9),
lang='en',
fmt='json')
url = api_response.data[0].images.original.url
path = os.path.join(self.tmp_dir, f"giphy.gif")
with open(path, 'wb') as f:
f.write(requests.get(url).content)
return path
except ApiException as e:
print("Exception when calling DefaultApi->gifs_search_get: %s\n" % e)
# def _create_top_gif_id(self):
# _top_gif_id = 0
# while os.path.exists(os.path.join(self.tmp_dir, f"{_top_gif_id}.gif")):
# _top_gif_id += 1
# return _top_gif_id
``` |
{
"source": "JonFreer/neural_rendering_gau_gan",
"score": 2
} |
#### File: JonFreer/neural_rendering_gau_gan/options.py
```python
from absl import flags
import numpy as np
FLAGS = flags.FLAGS
# ------------------------------------------------------------------------------
# Train flags
# ------------------------------------------------------------------------------
# Dataset, model directory and run mode
flags.DEFINE_string('train_dir', '/tmp/nerual_rendering',
'Directory for model training.')
flags.DEFINE_string('dataset_name', 'sanmarco9k', 'name ID for a dataset.')
flags.DEFINE_string(
'dataset_parent_dir', '',
'Directory containing generated tfrecord dataset.')
flags.DEFINE_string('run_mode', 'train', "{'train', 'eval', 'infer'}")
flags.DEFINE_string('imageset_dir', None, 'Directory containing trainset '
'images for appearance pretraining.')
flags.DEFINE_string('metadata_output_dir', None, 'Directory to save pickled '
'pairwise distance matrix for appearance pretraining.')
flags.DEFINE_integer('save_samples_kimg', 50, 'kimg cycle to save sample'
'validation ouptut during training.')
# Network inputs/outputs
flags.DEFINE_boolean('use_depth', True, 'Add depth image to the deep buffer.')
flags.DEFINE_boolean('use_alpha', False,
'Add alpha channel to the deep buffer.')
flags.DEFINE_boolean('use_semantic', True,
'Add semantic map to the deep buffer.')
flags.DEFINE_boolean('use_appearance', True,
'Capture appearance from an input real image.')
flags.DEFINE_integer('deep_buffer_nc', 7,
'Number of input channels in the deep buffer.')
flags.DEFINE_integer('appearance_nc', 10,
'Number of input channels to the appearance encoder.')
flags.DEFINE_integer('output_nc', 3,
'Number of channels for the generated image.')
# Staged training flags
flags.DEFINE_string(
'vgg16_path', './vgg16_weights/vgg16.npy',
'path to a *.npy file with vgg16 pretrained weights')
flags.DEFINE_boolean('load_pretrained_app_encoder', False,
'Warmstart appearance encoder with pretrained weights.')
flags.DEFINE_string('appearance_pretrain_dir', '',
'Model dir for the pretrained appearance encoder.')
flags.DEFINE_boolean('train_app_encoder', False, 'Whether to make the weights '
'for the appearance encoder trainable or not.')
flags.DEFINE_boolean(
'load_from_another_ckpt', False, 'Load weights from another trained model, '
'e.g load model trained with a fixed appearance encoder.')
flags.DEFINE_string('fixed_appearance_train_dir', '',
'Model dir for training G with a fixed appearance net.')
# -----------------------------------------------------------------------------
# More hparams
flags.DEFINE_integer('train_resolution', 256,
'Crop train images to this resolution.')
flags.DEFINE_float('d_lr', 0.001, 'Learning rate for the discriminator.')
flags.DEFINE_float('g_lr', 0.001, 'Learning rate for the generator.')
flags.DEFINE_float('ez_lr', 0.0001, 'Learning rate for appearance encoder.')
flags.DEFINE_integer('batch_size', 8, 'Batch size for training.')
flags.DEFINE_boolean('use_scaling', True, "use He's scaling.")
flags.DEFINE_integer('num_crops', 30, 'num crops from train images'
'(use -1 for random crops).')
flags.DEFINE_integer('app_vector_size', 8, 'Size of latent appearance vector.')
flags.DEFINE_integer('total_kimg', 20000,
'Max number (in kilo) of training images for training.')
flags.DEFINE_float('adam_beta1', 0.0, 'beta1 for adam optimizer.')
flags.DEFINE_float('adam_beta2', 0.99, 'beta2 for adam optimizer.')
# Loss weights
flags.DEFINE_float('w_loss_vgg', 0.3, 'VGG loss weight.')
flags.DEFINE_float('w_loss_feat', 10., 'Feature loss weight (from pix2pixHD).')
flags.DEFINE_float('w_loss_l1', 50., 'L1 loss weight.')
flags.DEFINE_float('w_loss_z_recon', 10., 'Z reconstruction loss weight.')
flags.DEFINE_float('w_loss_gan', 1., 'Adversarial loss weight.')
flags.DEFINE_float('w_loss_z_gan', 1., 'Z adversarial loss weight.')
flags.DEFINE_float('w_loss_kl', 0.01, 'KL divergence weight.')
flags.DEFINE_float('w_loss_l2_reg', 0.01, 'Weight for L2 regression on Z.')
# -----------------------------------------------------------------------------
# Architecture and training setup
flags.DEFINE_string('arch_type', 'spade',
'Architecture type: {pggan, pix2pixhd}.')
flags.DEFINE_string('training_pipeline', 'staged',
'Training type type: {staged, bicycle_gan, drit}.')
flags.DEFINE_integer('g_nf', 64,
'num filters in the first/last layers of U-net.')
flags.DEFINE_boolean('concatenate_skip_layers', True,
'Use concatenation for skip connections.')
## if arch_type == 'spade':
flags.DEFINE_boolean('random_style', False,
'Use random style (no mean,var)')
## if arch_type == 'pggan':
flags.DEFINE_integer('pggan_n_blocks', 5,
'Num blocks for the pggan architecture.')
## if arch_type == 'pix2pixhd':
flags.DEFINE_integer('p2p_n_downsamples', 3,
'Num downsamples for the pix2pixHD architecture.')
flags.DEFINE_integer('p2p_n_resblocks', 4, 'Num residual blocks at the '
'end/start of the pix2pixHD encoder/decoder.')
## if use_drit_pipeline:
flags.DEFINE_boolean('use_concat', True, '"concat" mode from DRIT.')
flags.DEFINE_boolean('normalize_drit_Ez', True, 'Add pixelnorm layers to the '
'appearance encoder.')
flags.DEFINE_boolean('concat_z_in_all_layers', True, 'Inject z at each '
'upsampling layer in the decoder (only for DRIT baseline)')
flags.DEFINE_string('inject_z', 'to_bottleneck', 'Method for injecting z; '
'one of {to_encoder, to_bottleneck}.')
flags.DEFINE_boolean('use_vgg_loss', True, 'vgg v L1 reconstruction loss.')
# ------------------------------------------------------------------------------
# Inference flags
# ------------------------------------------------------------------------------
flags.DEFINE_string('inference_input_path', '',
'Parent directory for input images at inference time.')
flags.DEFINE_string('inference_output_dir', '', 'Output path for inference')
flags.DEFINE_string('target_img_basename', '',
'basename of target image to render for interpolation')
flags.DEFINE_string('virtual_seq_name', 'full_camera_path',
'name for the virtual camera path suffix for the TFRecord.')
flags.DEFINE_string('inp_app_img_base_path', '',
'base path for the input appearance image for camera paths')
flags.DEFINE_string('appearance_img1_basename', '',
'basename of the first appearance image for interpolation')
flags.DEFINE_string('appearance_img2_basename', '',
'basename of the first appearance image for interpolation')
flags.DEFINE_list('input_basenames', [], 'input basenames for inference')
flags.DEFINE_list('input_app_basenames', [], 'input appearance basenames for '
'inference')
flags.DEFINE_string('frames_dir', '',
'Folder with input frames to a camera path')
flags.DEFINE_string('output_validation_dir', '',
'dataset_name for storing results in a structured folder')
flags.DEFINE_string('input_rendered', '',
'input rendered image name for inference')
flags.DEFINE_string('input_depth', '', 'input depth image name for inference')
flags.DEFINE_string('input_seg', '',
'input segmentation mask image name for inference')
flags.DEFINE_string('input_app_rgb', '',
'input appearance rgb image name for inference')
flags.DEFINE_string('input_app_rendered', '',
'input appearance rendered image name for inference')
flags.DEFINE_string('input_app_depth', '',
'input appearance depth image name for inference')
flags.DEFINE_string('input_app_seg', '',
'input appearance segmentation mask image name for'
'inference')
flags.DEFINE_string('output_img_name', '',
'[OPTIONAL] output image name for inference')
# -----------------------------------------------------------------------------
# Some validation and assertions
# -----------------------------------------------------------------------------
def validate_options():
if FLAGS.use_drit_training:
assert FLAGS.use_appearance, 'DRIT pipeline requires --use_appearance'
assert not (
FLAGS.load_pretrained_appearance_encoder and FLAGS.load_from_another_ckpt), (
'You cannot load weights for the appearance encoder from two different '
'checkpoints!')
if not FLAGS.use_appearance:
print('**Warning: setting --app_vector_size to 0 since '
'--use_appearance=False!')
FLAGS.set_default('app_vector_size', 0)
# -----------------------------------------------------------------------------
# Print all options
# -----------------------------------------------------------------------------
def list_options():
configs = ('# Run flags/options from options.py:\n'
'# ----------------------------------\n')
configs += ('## Train flags:\n'
'## ------------\n')
configs += 'train_dir = %s\n' % FLAGS.train_dir
configs += 'dataset_name = %s\n' % FLAGS.dataset_name
configs += 'dataset_parent_dir = %s\n' % FLAGS.dataset_parent_dir
configs += 'run_mode = %s\n' % FLAGS.run_mode
configs += 'save_samples_kimg = %d\n' % FLAGS.save_samples_kimg
configs += '\n# --------------------------------------------------------\n\n'
configs += ('## Network inputs and outputs:\n'
'## ---------------------------\n')
configs += 'use_depth = %s\n' % str(FLAGS.use_depth)
configs += 'use_alpha = %s\n' % str(FLAGS.use_alpha)
configs += 'use_semantic = %s\n' % str(FLAGS.use_semantic)
configs += 'use_appearance = %s\n' % str(FLAGS.use_appearance)
configs += 'deep_buffer_nc = %d\n' % FLAGS.deep_buffer_nc
configs += 'appearance_nc = %d\n' % FLAGS.appearance_nc
configs += 'output_nc = %d\n' % FLAGS.output_nc
configs += 'train_resolution = %d\n' % FLAGS.train_resolution
configs += '\n# --------------------------------------------------------\n\n'
configs += ('## Staged training flags:\n'
'## ----------------------\n')
configs += 'load_pretrained_app_encoder = %s\n' % str(
FLAGS.load_pretrained_app_encoder)
configs += 'appearance_pretrain_dir = %s\n' % FLAGS.appearance_pretrain_dir
configs += 'train_app_encoder = %s\n' % str(FLAGS.train_app_encoder)
configs += 'load_from_another_ckpt = %s\n' % str(FLAGS.load_from_another_ckpt)
configs += 'fixed_appearance_train_dir = %s\n' % str(
FLAGS.fixed_appearance_train_dir)
configs += '\n# --------------------------------------------------------\n\n'
configs += ('## More hyper-parameters:\n'
'## ----------------------\n')
configs += 'd_lr = %f\n' % FLAGS.d_lr
configs += 'g_lr = %f\n' % FLAGS.g_lr
configs += 'ez_lr = %f\n' % FLAGS.ez_lr
configs += 'batch_size = %d\n' % FLAGS.batch_size
configs += 'use_scaling = %s\n' % str(FLAGS.use_scaling)
configs += 'num_crops = %d\n' % FLAGS.num_crops
configs += 'app_vector_size = %d\n' % FLAGS.app_vector_size
configs += 'total_kimg = %d\n' % FLAGS.total_kimg
configs += 'adam_beta1 = %f\n' % FLAGS.adam_beta1
configs += 'adam_beta2 = %f\n' % FLAGS.adam_beta2
configs += '\n# --------------------------------------------------------\n\n'
configs += ('## Loss weights:\n'
'## -------------\n')
configs += 'w_loss_vgg = %f\n' % FLAGS.w_loss_vgg
configs += 'w_loss_feat = %f\n' % FLAGS.w_loss_feat
configs += 'w_loss_l1 = %f\n' % FLAGS.w_loss_l1
configs += 'w_loss_z_recon = %f\n' % FLAGS.w_loss_z_recon
configs += 'w_loss_gan = %f\n' % FLAGS.w_loss_gan
configs += 'w_loss_z_gan = %f\n' % FLAGS.w_loss_z_gan
configs += 'w_loss_kl = %f\n' % FLAGS.w_loss_kl
configs += 'w_loss_l2_reg = %f\n' % FLAGS.w_loss_l2_reg
configs += '\n# --------------------------------------------------------\n\n'
configs += ('## Architecture and training setup:\n'
'## --------------------------------\n')
configs += 'arch_type = %s\n' % FLAGS.arch_type
configs += 'training_pipeline = %s\n' % FLAGS.training_pipeline
configs += 'g_nf = %d\n' % FLAGS.g_nf
configs += 'concatenate_skip_layers = %s\n' % str(
FLAGS.concatenate_skip_layers)
configs += 'p2p_n_downsamples = %d\n' % FLAGS.p2p_n_downsamples
configs += 'p2p_n_resblocks = %d\n' % FLAGS.p2p_n_resblocks
configs += 'use_concat = %s\n' % str(FLAGS.use_concat)
configs += 'normalize_drit_Ez = %s\n' % str(FLAGS.normalize_drit_Ez)
configs += 'inject_z = %s\n' % FLAGS.inject_z
configs += 'concat_z_in_all_layers = %s\n' % str(FLAGS.concat_z_in_all_layers)
configs += 'use_vgg_loss = %s\n' % str(FLAGS.use_vgg_loss)
configs += '\n# --------------------------------------------------------\n\n'
return configs
``` |
{
"source": "jonfroehlich/makeabilitylabwebsite",
"score": 2
} |
#### File: makeabilitylabwebsite/website/admin.py
```python
from django.contrib import admin
from django.contrib.admin import widgets
from .models import Person, Publication, Position, Talk, Project, Poster, Keyword, News, Banner, Video, Project_header, Photo, Project_umbrella, Project_Role, Sponsor
from website.admin_list_filters import PositionRoleListFilter, PositionTitleListFilter, PubVenueTypeListFilter, PubVenueListFilter
from sortedm2m_filter_horizontal_widget.forms import SortedFilteredSelectMultiple
import django
from django import forms
from django.http import HttpResponse
from datetime import datetime
from django.template import loader
from django.template import RequestContext
from django.shortcuts import redirect
from django import forms
import urllib
import bibtexparser
from image_cropping import ImageCroppingMixin
class BannerAdmin(ImageCroppingMixin, admin.ModelAdmin):
fieldsets = [
(None, {'fields': ["page", "title", "caption", "alt_text", "link", "favorite", "project"]}),
# ('Image', {'fields': ["image", "image_preview"]})
('Image', {'fields': ["image", "cropping"]})
]
# The list display lets us control what is shown in the default persons table at Home > Website > Banners
# info on displaying multiple entries comes from http://stackoverflow.com/questions/9164610/custom-columns-using-django-admin
list_display = ('title', 'project', 'page', 'favorite', 'image')
# readonly_fields = ["image_preview"]
class PositionInline(admin.StackedInline):
model = Position
# This specifies that the Inline is linked to the main owner of the position rather than any of the advisor roles.
fk_name = "person"
# This specifies that the field appears only once (by default)
extra = 0
def formfield_for_foreignkey(self, db_field, request, **kwargs):
print("PositionInline.formfield_for_foreignkey: db_field: {} db_field.name {} request: {}".format(db_field, db_field.name, request))
if db_field.name == "advisor" or db_field.name == "co_advisor":
# Filters advisors to professors and sorts by first name
# Based on: http://stackoverflow.com/a/30627555
professor_ids = [person.id for person in Person.objects.all() if person.is_professor()]
filtered_persons = Person.objects.filter(id__in=professor_ids).order_by('first_name')
print(filtered_persons)
kwargs["queryset"] = filtered_persons
elif db_field.name == "grad_mentor":
# Filters grad mentor list to current grad students (either member or collaborator)
grad_ids = [person.id for person in Person.objects.all() if person.is_grad_student() and (person.is_current_member() or person.is_current_collaborator())]
filtered_persons = Person.objects.filter(id__in=grad_ids).order_by('first_name')
print(filtered_persons)
kwargs["queryset"] = filtered_persons
return super(PositionInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class ProjectRoleInline(admin.StackedInline):
model = Project_Role
extra = 0
class ProjectHeaderInline(ImageCroppingMixin, admin.StackedInline):
model = Project_header
extra = 0
# Uses format as per https://github.com/jonasundderwolf/django-image-cropping to add cropping to the admin page
class NewsAdmin(ImageCroppingMixin, admin.ModelAdmin):
# Filters authors only to current members and sorts by firstname
# Based on: http://stackoverflow.com/a/30627555
def formfield_for_foreignkey(self, db_field, request, **kwargs):
# print("NewsAdmin.formfield_for_foreignkey: db_field: {} db_field.name {} request: {}".format(db_field, db_field.name, request))
if db_field.name == "author":
current_member_ids = [person.id for person in Person.objects.all() if person.is_current_member()]
filtered_persons = Person.objects.filter(id__in=current_member_ids).order_by('first_name')
print(filtered_persons)
kwargs["queryset"] = filtered_persons
return super(NewsAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == "project":
kwargs["widget"] = widgets.FilteredSelectMultiple("project", is_stacked=False)
return super(NewsAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
class PhotoAdmin(ImageCroppingMixin, admin.ModelAdmin):
list_display = ('__str__', 'admin_thumbnail')
class ProjectAdmin(ImageCroppingMixin, admin.ModelAdmin):
inlines = [ProjectHeaderInline]
# The list display lets us control what is shown in the Project table at Home > Website > Project
# info on displaying multiple entries comes from http://stackoverflow.com/questions/9164610/custom-columns-using-django-admin
list_display = ('name', 'start_date', 'end_date', 'has_ended', 'get_people_count',
'get_current_member_count', 'get_past_member_count',
'get_most_recent_artifact_date', 'get_most_recent_artifact_type',
'get_publication_count', 'get_video_count', 'get_talk_count')
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == "sponsors":
kwargs["widget"] = widgets.FilteredSelectMultiple("sponsors", is_stacked=False)
if db_field.name == "keywords":
kwargs["widget"] = widgets.FilteredSelectMultiple("keywords", is_stacked=False)
if db_field.name == "project_umbrellas":
kwargs["widget"] = widgets.FilteredSelectMultiple("project umbrellas", is_stacked=False)
return super(ProjectAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
class PersonAdmin(ImageCroppingMixin, admin.ModelAdmin):
# inlines allow us to edit models on the same page as a parent model
# see: https://docs.djangoproject.com/en/1.11/ref/contrib/admin/#inlinemodeladmin-objects
inlines = [PositionInline, ProjectRoleInline]
# The list display lets us control what is shown in the default persons table at Home > Website > People
# info on displaying multiple entries comes from http://stackoverflow.com/questions/9164610/custom-columns-using-django-admin
list_display = ('get_full_name', 'get_current_title', 'get_current_role', 'is_active', 'get_start_date', 'get_end_date', 'get_time_in_current_position', 'get_total_time_as_member')
#TODO setup filter here that has diff categories (like active members, past, etc.):
#https://www.elements.nl/2015/03/16/getting-the-most-out-of-django-admin-filters/
#related to: https://github.com/jonfroehlich/makeabilitylabwebsite/issues/238
list_filter = (PositionRoleListFilter, PositionTitleListFilter)
class VideoAdmin(admin.ModelAdmin):
# The list display lets us control what is shown in the default persons table at Home > Website > Videos
# info on displaying multiple entries comes from http://stackoverflow.com/questions/9164610/custom-columns-using-django-admin
list_display = ('title', 'date', 'caption', 'project')
# search_fields are used for auto-complete, see:
# https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.autocomplete_fields
search_fields = ['title', 'get_video_host_str', 'date']
# default the sort order in table to descending order by date
ordering = ('-date',)
class TalkAdmin(admin.ModelAdmin):
# The list display lets us control what is shown in the default talk table at Home > Website > Talk
# See: https://docs.djangoproject.com/en/dev/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_display
list_display = ('title', 'date', 'get_speakers_as_csv', 'forum_name', 'location', 'talk_type')
# search_fields are used for auto-complete, see:
# https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.autocomplete_fields
# for example, the PublicationAdmin uses auto-complete select2 for talks
search_fields = ['title', 'forum_name']
# Filters speakers only to current members and collaborators and sorts by first name
# Based on: https://stackoverflow.com/a/17457828
# Update: we no longer do this because sometimes we want to add a talk by a former member or collaborator
def formfield_for_manytomany(self, db_field, request, **kwargs):
print("TalkAdmin.formfield_for_manytomany: db_field: {} db_field.name {} request: {}".format(db_field, db_field.name, request))
if db_field.name == "projects":
kwargs["widget"] = widgets.FilteredSelectMultiple("projects", is_stacked=False)
if db_field.name == "project_umbrellas":
kwargs["widget"] = widgets.FilteredSelectMultiple("project umbrellas", is_stacked=False, )
if db_field.name == "speakers":
# Uncomment the following block of code to limit the speakers field in the admin UI only to current lab members
# Note: we don't actually want to do this (see https://github.com/jonfroehlich/makeabilitylabwebsite/issues/534)
# but keeping it here because code may be useful in the future for other areas of admin interface
# current_member_and_collab_ids = [person.id for person in Person.objects.all() if person.is_current_member()]
# filtered_speakers = Person.objects.filter(id__in=current_member_and_collab_ids).order_by('first_name')
# kwargs["queryset"] = filtered_speakers
kwargs["widget"] = widgets.FilteredSelectMultiple("speakers", is_stacked=False)
if db_field.name == "keywords":
kwargs["widget"] = widgets.FilteredSelectMultiple("keywords", is_stacked=False)
return super(TalkAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
class PosterAdmin(admin.ModelAdmin):
# search_fields are used for auto-complete, see:
# https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.autocomplete_fields
search_fields = ['title', 'date']
def formfield_for_manytomany(self, db_field, request, **kwargs):
print("PosterAdmin.formfield_for_manytomany: db_field: {} db_field.name {} request: {}".format(db_field, db_field.name, request))
if db_field.name == "projects":
kwargs["widget"] = widgets.FilteredSelectMultiple("projects", is_stacked=False)
if db_field.name == "authors":
kwargs["widget"] = widgets.FilteredSelectMultiple("authors", is_stacked=False)
if db_field.name == "keywords":
kwargs["widget"] = widgets.FilteredSelectMultiple("keywords", is_stacked=False)
return super(PosterAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
class ProjectUmbrellaAdmin(admin.ModelAdmin):
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == "keywords":
kwargs["widget"] = widgets.FilteredSelectMultiple("keywords", is_stacked=False)
return super(ProjectUmbrellaAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
#from https://stackoverflow.com/questions/9602217/define-an-order-for-manytomanyfield-with-django
#display items inline
class PublicationAuthorInline(admin.TabularInline):
model = Publication.authors.through
verbose_name = "Author"
verbose_name_plural = "Author Order"
class PublicationAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['title', 'authors', 'date']}),
('Files', {'fields': ['pdf_file']}),
('Pub Venue information', {'fields': ['pub_venue_url','pub_venue_type', 'book_title', 'book_title_short', 'geo_location', 'total_papers_submitted', 'total_papers_accepted']}),
('Archival Info', {'fields': ['official_url', 'extended_abstract', 'peer_reviewed', 'award' ]}),
('Page Info', {'fields': ['num_pages', 'page_num_start', 'page_num_end']}),
('Supplementary Artifacts', {'fields': ['poster', 'video', 'talk', 'code_repo_url']}),
('Project Info', {'fields': ['projects', 'project_umbrellas']}),
('Keyword Info', {'fields': ['keywords']}),
]
list_display = ('title', 'book_title_short', 'date')
# default the sort order in table to descending order by date
ordering = ('-date',)
list_filter = (PubVenueTypeListFilter, PubVenueListFilter)
# add in auto-complete fields for talks, see:
# https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.autocomplete_fields
# this addresses: https://github.com/jonfroehlich/makeabilitylabwebsite/issues/553
# You must also update the search_fields in the respective admins like PosterAdmin, VideoAdmin, and TalkAdmin
# these search fields become what the auto-complete function searches for filtering
autocomplete_fields = ['poster', 'video', 'talk']
def get_form(self, request, obj=None, **kwargs):
"""We custom style some of the admin UI, including expanding the width of the talk select interface"""
form = super(PublicationAdmin, self).get_form(request, obj, **kwargs)
# we style the talks select2 widget so that it's wider, see:
# https://docs.djangoproject.com/en/2.2/ref/forms/widgets/#customizing-widget-instances
# see also:
# https://stackoverflow.com/questions/10588275/django-change-field-size-of-modelmultiplechoicefield
# https://stackoverflow.com/questions/110378/change-the-width-of-form-elements-created-with-modelform-in-django
# and finally, this is what worked for me:
# https://stackoverflow.com/q/35211809
# to address: https://github.com/jonfroehlich/makeabilitylabwebsite/issues/851
text_min_width = 750
form.base_fields['title'].widget.attrs['style'] = 'min-width: {}px;'.format(text_min_width)
form.base_fields['book_title'].widget.attrs['style'] = 'min-width: {}px;'.format(text_min_width)
form.base_fields['book_title_short'].widget.attrs['style'] = 'min-width: {}px;'.format(500)
select_min_width = 600
form.base_fields['poster'].widget.attrs['style'] = 'min-width: {}px;'.format(select_min_width)
form.base_fields['video'].widget.attrs['style'] = 'min-width: {}px;'.format(select_min_width)
form.base_fields['talk'].widget.attrs['style'] = 'min-width: {}px;'.format(select_min_width)
return form
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == "authors":
kwargs['widget'] = SortedFilteredSelectMultiple()
elif db_field.name == "projects":
kwargs["widget"] = widgets.FilteredSelectMultiple("projects", is_stacked=False)
elif db_field.name == "project_umbrellas":
kwargs["widget"] = widgets.FilteredSelectMultiple("project umbrellas", is_stacked=False)
elif db_field.name == "keywords":
kwargs["widget"] = widgets.FilteredSelectMultiple("keywords", is_stacked=False)
return super(PublicationAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
admin.site.register(Person, PersonAdmin)
admin.site.register(Publication, PublicationAdmin)
admin.site.register(Talk, TalkAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Poster, PosterAdmin)
admin.site.register(Keyword)
admin.site.register(News, NewsAdmin)
admin.site.register(Banner, BannerAdmin)
admin.site.register(Video, VideoAdmin)
admin.site.register(Photo, PhotoAdmin)
admin.site.register(Project_umbrella, ProjectUmbrellaAdmin)
admin.site.register(Sponsor)
# For modifying more on the front admin landing page, see https://medium.com/django-musings/customizing-the-django-admin-site-b82c7d325510
admin.site.index_title = "Makeability Lab Admin. Django version: " + django.get_version() + " ML Version: 0.5.7a"
```
#### File: website/utils/ml_utils.py
```python
import datetime
import random
from django.conf import settings # for access to settings variables, see https://docs.djangoproject.com/en/4.0/topics/settings/#using-settings-in-python-code
from operator import itemgetter
def get_video_embed(video_url):
"""Returns proper embed code for a video url"""
if 'youtu.be' in video_url or 'youtube.com' in video_url:
# https://youtu.be/i0IDbHGir-8 or https://www.youtube.com/watch?v=i0IDbHGir-8
base_url = "https://youtube.com/embed"
unique_url = video_url[video_url.find("/", 9):]
# See https://developers.google.com/youtube/youtube_player_demo for details on parameterizing YouTube video
return base_url + unique_url + "?showinfo=0&iv_load_policy=3"
elif 'vimeo' in video_url:
# https://player.vimeo.com/video/164630179
vimeo_video_id = video_url.rsplit('/', 1)[-1]
return "https://player.vimeo.com/video/" + vimeo_video_id
else:
return "unknown video service for '{}'".format(video_url)
def filter_incomplete_projects(projects):
'''
Filters out projects that don't have thumbnails, publications, an about information
:param projects:
:return:
'''
filtered = list()
for project in projects:
# I tested this and if project.about or project.gallery_image are not set,
# they will be interpreted as False by Python
if project.has_publication() and project.about and project.gallery_image:
filtered.append(project)
return filtered
def sort_projects_by_most_recent_pub(projects, include_projects_with_no_artifacts=False):
return sort_projects_by_most_recent_artifact(projects, include_projects_with_no_artifacts,
only_look_at_pubs=True)
def sort_projects_by_most_recent_artifact(projects, include_projects_with_no_artifacts=False,
only_look_at_pubs=True):
"""Sorts projects by most recent artifact
:return: a sorted list of projects by most recent artifact date"""
# print(projects)
sorted_projects = list()
for project in projects:
# most_recent_artifact is a tuple of (date, artifact)
most_recent_artifact = project.get_most_recent_artifact()
# get most recent pub. use this instead
if only_look_at_pubs:
most_recent_pub = project.get_most_recent_publication()
if most_recent_pub is not None:
most_recent_artifact = (most_recent_pub.date, most_recent_pub)
else:
most_recent_artifact = None
# _logger.debug("The most recent artifact: ", str(most_recent_artifact))
if most_recent_artifact is not None:
project_date_tuple = (project, most_recent_artifact[0])
sorted_projects.append(project_date_tuple)
elif include_projects_with_no_artifacts and project.start_date is not None:
sorted_projects.append((project, project.start_date))
# sort the artifacts by date
sorted_projects = sorted(sorted_projects, key=itemgetter(1), reverse=True)
ordered_projects = []
if len(sorted_projects) > 0:
ordered_projects, temp = zip(*sorted_projects)
return ordered_projects
##### BANNER HELPER FUNCTIONS ######
# All of these functions were written by <NAME>
def weighted_choice(choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w >= r:
return c
upto += w
# assert False, "Shouldn't get here"
return choices[0][0]
def choose_banners_helper(banners, count):
banner_weights = []
total_weight = 0
for banner in banners:
elapsed = (datetime.datetime.now().date() - banner.date_added).days / 31.0
if elapsed <= 0:
elapsed = 1.0 / 31.0
weight = 1.0 + 1.0 / elapsed
banner_weights.append((banner, weight))
total_weight += weight
for i in range(0, len(banner_weights)):
banner_weights[i] = (banner_weights[i][0], banner_weights[i][1] / total_weight)
print(banner_weights[i][1])
selected_banners = []
for i in range(0, count):
if len(selected_banners) == len(banners):
break
banner = weighted_choice(banner_weights)
selected_banners.append(banner)
index = [y[0] for y in banner_weights].index(banner)
total_weight -= banner_weights[index][1]
del banner_weights[index]
if len(banner_weights) == 0:
break
for i in range(0, len(banner_weights)):
banner_weights[i] = (banner_weights[i][0], banner_weights[i][1] / total_weight)
return selected_banners
def choose_banners(banners):
favorite_banners = []
other_banners = []
for banner in banners:
if banner.favorite == True:
favorite_banners.append(banner)
else:
other_banners.append(banner)
selected_banners = choose_banners_helper(favorite_banners, settings.MAX_BANNERS)
if len(selected_banners) < settings.MAX_BANNERS:
temp = choose_banners_helper(other_banners, settings.MAX_BANNERS - len(selected_banners))
for banner in temp:
selected_banners.append(banner)
return selected_banners
```
#### File: website/views/faq.py
```python
from django.conf import settings
from website.models import Banner
import website.utils.ml_utils as ml_utils
# for render https://docs.djangoproject.com/en/4.0/topics/http/shortcuts/#render
from django.shortcuts import render
def faq(request):
all_banners = Banner.objects.filter(page=Banner.PEOPLE)
displayed_banners = ml_utils.choose_banners(all_banners)
context = {'banners': displayed_banners,
'debug': settings.DEBUG}
# Render is a Django helper function. It combines a given template—in this case faq.html—with
# a context dictionary and returns an HttpResponse object with that rendered text.
# See: https://docs.djangoproject.com/en/4.0/topics/http/shortcuts/#render
return render(request, "website/faq.html", context)
```
#### File: website/views/news.py
```python
from django.conf import settings # for access to settings variables, see https://docs.djangoproject.com/en/4.0/topics/settings/#using-settings-in-python-code
from website.models import Banner, News
import website.utils.ml_utils as ml_utils
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# This method and the news functionality in general was written by <NAME>
def news(request, news_id):
all_banners = Banner.objects.filter(page=Banner.NEWSLISTING)
displayed_banners = ml_utils.choose_banners(all_banners)
news = get_object_or_404(News, pk=news_id)
max_extra_items = 4 # Maximum number of authors
all_author_news = news.author.news_set.order_by('-date')
author_news = []
for item in all_author_news:
if item != news:
author_news.append(item)
project_news = {}
if news.project != None:
for project in news.project.all():
ind_proj_news = []
all_proj_news = project.news_set.order_by('-date')
for item in all_proj_news:
if item != news:
ind_proj_news.append(item)
project_news[project] = ind_proj_news[:max_extra_items]
context = {'banners': displayed_banners,
'news': news,
'author_news': author_news[:max_extra_items],
'project_news': project_news,
'debug': settings.DEBUG}
# Render is a Django helper function. It combines a given template—in this case news.html—with
# a context dictionary and returns an HttpResponse object with that rendered text.
# See: https://docs.djangoproject.com/en/4.0/topics/http/shortcuts/#render
return render(request, 'website/news.html', context)
``` |
{
"source": "jonfrs/IC-USP",
"score": 3
} |
#### File: Help/build/gui.py
```python
from pathlib import Path
# from tkinter import *
# Explicit imports to satisfy Flake8
from tkinter import Tk, Canvas, Entry, Text, Button, PhotoImage
OUTPUT_PATH = Path(__file__).parent
ASSETS_PATH = OUTPUT_PATH / Path("./assets")
def relative_to_assets(path: str) -> Path:
return ASSETS_PATH / Path(path)
window = Tk()
window.geometry("600x400")
window.configure(bg = "#FFFFFF")
help = Canvas(
window,
bg = "#FFFFFF",
height = 400,
width = 600,
bd = 0,
highlightthickness = 0,
relief = "ridge"
)
help.place(x = 0, y = 0)
help.create_text(
50.0,
5.0,
anchor="nw",
text="Help - Argumentos para Validação dos dados",
fill="#000000",
font=("Tahoma", 16 * -1,'bold')
)
logos_small2_image = PhotoImage(
file=relative_to_assets("logos_small2.png"))
logos_samll2 = help.create_image(
557.0,
15.0,
image=logos_small2_image
)
help.create_rectangle(
50.0,
34.0,
600.0,
34.0,
fill="#000000",
outline="")
bg_help_image = PhotoImage(
file=relative_to_assets("bg_help.png"))
bg_help = help.create_image(
299.0,
217.0,
image=bg_help_image
)
window.resizable(False, False)
window.mainloop()
```
#### File: IC-USP/mindspore_integration/linear_reg.py
```python
import numpy as np
import pandas as pd
from mindspore import dataset as ds
from mindspore.common.initializer import Normal
from mindspore import nn, Model, context
from mindspore.train.callback import LossMonitor
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
import os
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
def get_data(df, x_column, y_column):
x_data = df[x_column].to_numpy()
y_data = df[y_column].to_numpy()
for i, j in zip(x_data, y_data):
yield np.array([i]).astype(np.float32), np.array([j]).astype(np.float32)
def create_dataset(df, batch_size, x_column, y_column):
input_data = ds.GeneratorDataset(list(get_data(df, x_column, y_column)), column_names=['data', 'label'])
input_data = input_data.batch(batch_size)
#input_data = input_data.repeat(repeat_size)
return input_data
class LinearNet(nn.Cell):
def __init__(self):
super(LinearNet, self).__init__()
self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02))
def construct(self, x):
x = self.fc(x)
return x
def train_reg_linear(df, x_column, y_column, n_steps):
batch_number = 15
epochs = n_steps
#learning rate
lr = 0.005
momentum = 0.9
#Defines the Network
net = LinearNet()
#Loss Function
net_loss = nn.loss.MSELoss()
#Optimizer Function
opt = nn.Momentum(net.trainable_params(), lr, momentum)
model = Model(net, net_loss, opt)
model_dir = 'linreg_models'
os.makedirs(model_dir,exist_ok=True)
#Callbacks definition
config_ck = CheckpointConfig(save_checkpoint_steps=50, keep_checkpoint_max=1)
ckpoint_cb = ModelCheckpoint(prefix='linreg', directory=model_dir, config=config_ck)
#Dataset definition
ds_train = create_dataset(df, batch_number, x_column, y_column)
#Training
model.train(epochs, ds_train, callbacks=[LossMonitor(), ckpoint_cb], dataset_sink_mode=False)
parameters = []
for param in net.trainable_params():
parameters.append(param.asnumpy())
return parameters
``` |
{
"source": "JonFStr/SongBeamer_SimilarSongFinder",
"score": 2
} |
#### File: SongBeamer_SimilarSongFinder/gui/LoadedSongsWindow.py
```python
import sys
from math import floor
from threading import Thread
from time import sleep
from typing import List
from PySide6.QtCore import Signal
from PySide6.QtGui import QAction
from PySide6.QtWidgets import (QWidget, QPushButton, QMainWindow, QScrollArea, QVBoxLayout, QApplication)
from LoadedSongs import LoadedSongs
from Song import Song
from gui.LoadSongsDialog import LoadSongsDialog
from gui.OrderableListItem import LoadedSongListItem
from gui.OrderableListWidget import OrderableListWidget
from gui.ProgressBar import ProgressBar
from gui.SongDetailsDialog import SongDetailsDialog
class LoadedSongsWindow(QMainWindow):
def __init__(self, loaded_songs_list):
"""Show and modify the list of all loaded songs
:type loaded_songs_list: LoadedSongs
:param loaded_songs_list: All currently loaded songs"""
super().__init__()
# Main layout
self.resize(900, 600)
self.setWindowTitle("Loaded Songs")
self._list_widget = OrderableListWidget()
self.setCentralWidget(self._list_widget)
# Setup parameters
self._song_list: LoadedSongs = loaded_songs_list
self._song_list.subscribe(LoadedSongs.ADDED, self._song_added)
self._song_list.subscribe(LoadedSongs.DELETED, self._song_deleted)
self._song_gui_list: dict[Song: QWidget] = {}
self._progress_bar = ProgressBar()
self._load_songs_dialog = LoadSongsDialog(self, self._progress_bar)
self._signal_song_added.connect(self._song_added_function)
# Setup gui
self._create_menu_bar()
self._status_bar.addPermanentWidget(self._progress_bar)
def _create_menu_bar(self):
"""Build the windows menu bar"""
menu_bar = self.menuBar()
# Load songs action
self._load_songs_action: QAction = QAction("&Load Files", self)
self._load_songs_action.triggered.connect(self._do_load_songs_gui_action)
self._load_song_dir_action: QAction = QAction("Load &Directory", self)
self._load_song_dir_action.triggered.connect(self._do_load_song_dir_gui_action)
# Song menu
songs_menu = menu_bar.addMenu("&Songs")
songs_menu.addActions([
self._load_songs_action,
self._load_song_dir_action,
])
# Status bar
self._status_bar = self.statusBar()
def _do_load_songs_gui_action(self):
"""Show a popup dialog to select songs to load"""
song_list = self._load_songs_dialog.get_songs_by_file()
thread: Thread = Thread(target=self._add_song_list, args=(song_list,))
thread.start()
def _do_load_song_dir_gui_action(self):
"""Show a popup dialog to select songs to load"""
song_list = self._load_songs_dialog.get_songs_by_dir()
thread: Thread = Thread(target=self._add_song_list, args=(song_list,))
thread.start()
def _add_song_list(self, song_list):
"""Add a list of new songs to the list
:type song_list: list[Song]
:param song_list: The list of songs to add"""
# Setup parameters
song: Song
total_song_count: int = len(song_list)
song_num: int = 0
prev_percentage_done: float = 0
self._progress_bar.startTimer()
self._progress_bar.set_progress.emit(prev_percentage_done)
# Load songs
for song in song_list:
self._song_list.add(song)
song_num += 1
percentage_done = floor(song_num / total_song_count * 100)
if prev_percentage_done != percentage_done:
prev_percentage_done = percentage_done
self._progress_bar.set_progress.emit(percentage_done)
# Take a break here and there to let the gui catch up
if 0 == song_num % 100:
sleep(1)
self._progress_bar.set_progress.emit(100)
def _song_added(self, song):
self._signal_song_added.emit(song)
_signal_song_added: Signal = Signal(Song)
def _song_added_function(self, song):
"""Add a new song was added to the list
:type song: Song
:param song: The song that was added"""
# Add to gui
list_item: LoadedSongListItem = LoadedSongListItem(song)
self._list_widget.add(list_item)
self._song_gui_list[song] = list_item
def _song_deleted(self, song):
"""A song was deleted from the list
:type song: Song
:param song: The song that was deleted"""
# Remove from gui
list_item: LoadedSongListItem = self._song_gui_list[song]
self._song_gui_list.pop(song)
self._list_widget.delete_item(list_item)
if __name__ == '__main__':
app = QApplication()
loaded_songs = LoadedSongs()
window = LoadedSongsWindow(loaded_songs)
window.show()
sys.exit(app.exec_())
```
#### File: SongBeamer_SimilarSongFinder/gui/OrderableListWidget.py
```python
from PySide6.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLayout, QScrollArea, QWidgetItem
from PySide6.QtCore import Qt
from gui.OrderableListItem import OrderableListItem
class OrderableListWidget(QScrollArea):
"""All available items in this list"""
_item_list: list[OrderableListItem]
"""This lists actual widget"""
_widget: QWidget
"""The widgets layout"""
_layout: QLayout
"""Decides which way this list is ordered; 1 for ascending, -1 for descending"""
_order_factor: int
def __init__(self, order_asc=True, orientation_horizontal=False):
"""Init gui
:type order_asc: bool
:param order_asc: Whether to order the list ascending
:type orientation_horizontal: bool
:param orientation_horizontal: Should the list orientation be horizontal?
"""
super().__init__()
if order_asc:
self._order_factor = 1
else:
self._order_factor = -1
self._widget = QWidget()
self.setWidget(self._widget)
self.setWidgetResizable(True)
# Set layout
if orientation_horizontal:
self._layout = QHBoxLayout()
else:
self._layout = QVBoxLayout()
self._widget.setLayout(self._layout)
self._layout.setAlignment(Qt.AlignTop)
self._item_list = []
def _get_order(self, list_item_a, list_item_b):
"""Defines this lists widget order
:type list_item_a: OrderableListItem
:param list_item_a: The first item to compare
:type list_item_b: OrderableListItem
:param list_item_b: The second item to compare
:returns -1|0|1: list_item_a is: before, same, after list_item_b"""
str_a: str = list_item_a.get_order_string()
str_b: str = list_item_b.get_order_string()
if str_a == str_b:
return 0
elif str_a < str_b:
return -1 * self._order_factor
else:
return 1 * self._order_factor
def add(self, list_item):
"""Add a new item to the list
:type list_item: OrderableListItem
:param list_item: The item to add
"""
# Subscribe to changes
list_item.subscribe(OrderableListItem.DELETED, self._item_deleted)
list_item.subscribe(OrderableListItem.UPDATED, self._item_updated)
# Make sure to add the item only once
if list_item not in self._item_list:
list_item_inserted = False
self._item_list.append(list_item)
# Walk all existing items
for i in range(self._layout.count()):
existing_item: OrderableListItem = self._layout.itemAt(i).widget()
if 1 == self._get_order(existing_item, list_item):
self._layout.insertWidget(i, list_item)
list_item_inserted = True
break
if not list_item_inserted:
self._layout.addWidget(list_item)
def _item_deleted(self, item):
"""Delete an item from the list
:type item: OrderableListItem
:param item: The item to delete
"""
# See if the item exists in this list
try:
i: int = self._item_list.index(item)
except ValueError:
return
# Delete the item
self._item_list.pop(i)
def _item_updated(self, item):
"""Update the list with the items new information
:type item: OrderableListItem
:param item: The item that was updated
"""
pass
```
#### File: JonFStr/SongBeamer_SimilarSongFinder/SimilarityFinder.py
```python
import math
from threading import Thread
import networkx as nx
import numpy as np
import pandas as pd
from PySide6.QtCore import Signal
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from LoadedSongs import LoadedSongs
from Song import Song
from gui.ProgressBar import ProgressBar
class SimilarityFinder:
def __init__(self, song_list, progress_bar=None, calculations_done_signal=None, similarity_threshold=0.6):
"""Find similarities between songs in a directory
:type song_list: LoadedSongs
:param song_list: All songs to compare
:type progress_bar: ProgressBar
:param progress_bar: The progress bar object tracking the calculation progress
:type calculations_done_signal: Signal
:param calculations_done_signal: The signal to emit to when calculations are done
:type similarity_threshold: float
:param similarity_threshold: The threshold of what counts as "similar"
"""
# Init parameters
self._similarities = {}
self._song_lookup = {}
self._songs = pd.DataFrame(columns=['name', 'text'])
# Get passed parameters
self._song_list: LoadedSongs = song_list
self._progress_bar: ProgressBar = progress_bar
self._calculations_done_signal: Signal = calculations_done_signal
self._cosine_threshold: float = similarity_threshold
# Run calculations
finder_thread = Thread(target=self.run, name="Similarity Finder")
finder_thread.start()
def run(self):
"""Start the calculations"""
if None is not self._progress_bar:
self._progress_bar.startTimer()
# Prepare songs
self.__prepare_songs()
# Do the actual calculations
self._collect_similarities()
# Notify the user that all calculations have been done
if self._calculations_done_signal is not None:
self._calculations_done_signal.emit()
def __prepare_songs(self):
"""Prepare all songs for calculation"""
# Init parameters
song_dict: dict = {'name': [], 'text': []}
# Get the texts from all songs
song: Song
for song in self._song_list:
song_dict['name'].append(str(song))
song_dict['text'].append(song.get_text_as_line())
self._song_lookup[str(song)] = song
# Prepare songs with pandas
self._songs.name = pd.Series(song_dict['name'])
self._songs.text = pd.Series(song_dict['text'])
def __replace_indices(self, idx):
"""Replace the indices in all songs
:type idx: int
:param idx: Indices to replace"""
return self._songs['name'].values[idx]
def _collect_similarities(self):
"""Calculate the similarities between all loaded songs"""
# Prepare for calculations
self._similarities = {}
# Transform song vectors
tfidf = TfidfVectorizer()
tfidf.fit(self._songs.text)
tfidf_transform = tfidf.transform(self._songs.text)
# Subdivide data into batches and process each batch
batch_size = 2048
total_batches = math.floor(tfidf_transform.shape[0] / batch_size)
batch_num = 0
processing_not_finished = True
collected_similarities = {}
# Run each bach
while processing_not_finished:
# Check if this is the last batch
start = batch_num * batch_size
end = start + batch_size
if end + 1 >= tfidf_transform.shape[0]:
end = tfidf_transform.shape[0] - 1
processing_not_finished = False
# Get cosine similarity between songs
song_vec = tfidf_transform[start:end]
similarities = cosine_similarity(tfidf_transform, song_vec)
# Only look at lower triangle of matrix
similarities = np.tril(similarities, -1)
if 0 < np.sum(similarities):
# Get song indices of matching songs
indices = np.argwhere(similarities > self._cosine_threshold)
# Add start value of batch to column ids for correct ids in dataframe
names = indices.copy()
names[:, 1] += batch_num * batch_size
names = self.__replace_indices(names)
# Create dict with matching songs
for i in range(len(names)):
song_tuple = names[i]
score_index = indices[i]
similarity_score = similarities[score_index[0]][score_index[1]]
#if not 0.999 > similarity_score:
# continue
song_orig = self._song_lookup[song_tuple[0]]
song_copy = self._song_lookup[song_tuple[1]]
collected_similarities[(song_orig, song_copy)] = similarity_score
# Update progress if not finished
if processing_not_finished:
# Calculate progress
percentage_done = (batch_num + 1) / total_batches
percentage_done_nice = round(percentage_done * 100, 2)
# Command line output
if self._progress_bar is None:
print(percentage_done_nice, '%')
# Gui progress bar
else:
self._progress_bar.set_progress.emit(percentage_done_nice)
batch_num += 1
# Store similarities
groups = self._get_similarity_groups(collected_similarities)
self._similarities = groups
self._similarity_scores = collected_similarities
# Command line output
if self._progress_bar is None:
print("100 %")
# Gui progress bar
else:
self._progress_bar.set_progress.emit(100)
@staticmethod
def _get_similarity_groups(similarity_pairs_list):
"""Get all grouped similarities
:type similarity_pairs_list: dict[tuple[Song, Song], int]
:param similarity_pairs_list: All similarity pairs with songs as indexes
:return list[list[Song]]: All similarity groups with songs as indexes"""
graph = nx.Graph()
graph.add_edges_from(similarity_pairs_list.keys())
cliques = nx.find_cliques(graph)
groups = list(cliques)
return groups
def get_similarities(self):
"""Get a list of all calculated similarities
:return list[list[Song]], dict[tuple[Song, Song], int]: All calculated similarities"""
return self._similarities, self._similarity_scores
```
#### File: JonFStr/SongBeamer_SimilarSongFinder/SongLine.py
```python
class SongLine:
# Uniquely ids all song lines
next_id = 0
def __init__(self, song_text: str, song):
"""A song line of a song
:type song_text: str
:param song_text: The lines text
:type song: Song.Song
:param song: The song this line is a part of
"""
# Set unique id
self.id = self.next_id
SongLine.next_id += 1
# Setup the song line
self._text = song_text
self.song = song
def get_text(self):
"""Get the lines song text
:return str: The lines song text"""
return self._text
def __repr__(self):
return self._text
def __eq__(self, other_line):
return self.id == other_line.id
def __hash__(self):
return self.id
```
#### File: JonFStr/SongBeamer_SimilarSongFinder/Song.py
```python
import os
import re
from pathlib import Path
from SongLine import SongLine
from Subscribable import Subscribable
class Song(Subscribable):
supported_verse_heading_list = ["Unbekannt", "Unbenannt", "Unknown",
"Intro", "Vers", "Verse", "Strophe",
"Pre-Bridge", "Bridge", "Misc",
"Pre-Refrain", "Refrain", "Pre-Chorus",
"Chorus", "Pre-Coda", "Zwischenspiel",
"Instrumental", "Interlude", "Coda",
"Ending", "Outro", "Teil", "Part", "Chor",
"Solo"]
"""Available subscription types"""
DELETED = 1
UPDATED = 2
"""This songs file"""
_song_file: Path
"""Id to uniquely identify each song"""
next_id = 0
"""Should this song be deleted?"""
_marked_for_deleting: bool = False
"""Should this song be kept?"""
_marked_for_keeping: bool = False
def __init__(self, song_file):
"""Extract a song from the given file
:type song_file: Path
:param song_file: Path to the file to extract from"""
# Set unique id
self.id = self.next_id
Song.next_id += 1
# Register subscriptions
super().__init__((self.DELETED, self.UPDATED))
# Setup song
self.valid = False
self._song_file = song_file
self._song_line_list = []
# Read file line by line and convert them into song lines
try:
with open(song_file, encoding='UTF-8') as file:
content = file.readlines()
self._read_lines(content)
# After
self.valid = True
except (UnicodeDecodeError, FileNotFoundError):
print("Error reading file", song_file)
def unload(self):
"""Unload this song from the program"""
# Trigger subscriptions
self._trigger_subscriptions(self.DELETED, song=self)
# Unload this song
self.valid = False
self._song_line_list = []
self._song_file = Path()
self.id = -1
def mark_for_deleting(self):
"""Mark this song to be deleted"""
self._marked_for_deleting = True
self._marked_for_keeping = False
self._trigger_subscriptions(self.UPDATED, song=self)
def is_marked_for_deleting(self):
"""Check if this song is marked for deleting"""
return self._marked_for_deleting
def mark_for_keeping(self):
"""Mark this song to be deleted"""
self._marked_for_keeping = True
self._marked_for_deleting = False
self._trigger_subscriptions(self.UPDATED, song=self)
def is_marked_for_keeping(self):
"""Check if this song is marked for keeping"""
return self._marked_for_keeping
def do_keep_or_delete(self):
"""Keep or delete this song file"""
if self._marked_for_keeping:
return
elif self._marked_for_deleting:
# Delete the actual file
self._song_file.unlink(True)
# Unload self from program
self.unload()
def _read_lines(self, content):
"""Parse the given song file content into valid song lines
:type content: list[str]
:param content: All lines of a song file"""
header_has_ended = False
verse_ended_in_last_line = False
# Go through the file line by line
line: str
for line in content:
line_is_song_text = True
line = line.strip()
# Skip empty lines
if "" == line:
continue
# Check if we are still in the heading
if not header_has_ended and "#" == line[0]:
continue
else:
header_has_ended = True
# Check if a verse has just ended
if "--" == line or "---" == line:
verse_ended_in_last_line = True
continue
# Check if this is a verse heading
if verse_ended_in_last_line:
# Filter out custom marker
if "$$M=" == line[0:3]:
line_is_song_text = False
else:
split_line = line.split(' ', 1)
# Check for a verse heading
verse_heading = split_line[0]
if 2 == len(split_line):
heading_number = split_line[1]
else:
heading_number = ""
if ((verse_heading in self.supported_verse_heading_list and
("" == heading_number or re.search("^[0-9][0-9]?[a-z]?$", heading_number))) or
(("Part" == verse_heading or "Teil" == verse_heading) and
re.search("^[A-Z]$", heading_number))):
line_is_song_text = False
# Remove markers
if line_is_song_text:
if "#C " == line[0:3] or "#H " == line[0:3]:
line = line[3:]
elif re.search("^##[0-9] ", line):
line = line[4:]
# Check if the line has passed all tests and can be converted to a song line
if line_is_song_text:
self._song_line_list.append(SongLine(line, self))
# Reset variables
verse_ended_in_last_line = False
def get_line_list(self):
"""Get the list of all song lines
:return list[SongLine]: The song line list
:raises ReferenceError"""
if not self.valid:
raise ReferenceError('Song is not valid')
return self._song_line_list
def get_text(self):
"""Get the songs text as a multiline text
:return str: The songs text as multiline
:raises ReferenceError"""
if not self.valid:
raise ReferenceError('Song is not valid')
return '\n'.join(str(line) for line in self._song_line_list)
def get_text_as_line(self):
"""Get the songs text as one line
:return str: The songs text as one line
:raises ReferenceError"""
if not self.valid:
raise ReferenceError('Song is not valid')
return ' '.join(str(line) for line in self._song_line_list)
def get_name(self):
"""Get the songs name
:return str: The songs name
:raises ReferenceError"""
if not self.valid:
raise ReferenceError('Song is not valid')
return self._song_file.name
def __repr__(self):
if not self.valid:
raise ReferenceError('Song is not valid')
return str(self._song_file)
def __hash__(self):
if not self.valid:
raise ReferenceError('Song is not valid')
return self.id
def __eq__(self, other_song):
# Only valid songs can be compared
if not self.valid:
return False
# Compare to other song object
if type(other_song) == Song:
other_song: Song
return self._song_file == other_song._song_file
# Compare to string
elif type(other_song) == str:
other_song: str
return self._song_file == other_song
# Default
else:
return False
``` |
{
"source": "jong82/documentation",
"score": 3
} |
#### File: py/build/process_agent_config.py
```python
import json
import re
import sys
from os import getenv
from collections import defaultdict
def format_agent_config_string(string):
"""
Takes a string from the agent config template and formats it for output in
agent_config shortcode.
- If string contains exactly one '#' and no alphabetic characters,
remove the '#' and any new line characters.
- If the string contains exactly one '#' and has alphabetic characters,
remove the '#' (this represents a config key/value)
"""
# Match any lines containing strictly one '#' character that can have any
# number of leading or trailing whitespaces, and no words.
# Matching lines in the config template file are for spacing only.
regex_string = r"[^a-zA-Z0-9#]*#[^a-zA-Z0-9#]*$"
if re.match(regex_string, string):
return string.replace('#', '').replace('\n', '').strip()
elif '#' in string and '##' not in string:
return string.replace('# ', '') + '\n'
else:
return string + '\n'
def create_agent_config_dict(dd_agent_config_string):
"""
Returns a dictionary where the keys represent each config type
(i.e. "Basic Configuration", "Log Collection Configuration", etc),
and the values are a string containing the config options comments.
"""
config_type_header_delimiter = '#######'
agent_config_array = dd_agent_config_string.splitlines()
agent_config_dict = {}
current_config_type = ''
for index, line in enumerate(agent_config_array):
if config_type_header_delimiter in line:
config_type = agent_config_array[index + 1].replace('#', '') \
.strip().lower()
if config_type:
agent_config_dict.setdefault(config_type, '')
current_config_type = config_type
else:
# Skip any Go template strings or comment boxes used to delineate
# config types.
if '{{' not in line and not re.match(r"^##.+##$", line):
formatted_string = format_agent_config_string(line)
agent_config_dict[current_config_type] += formatted_string
return agent_config_dict
def document_config_types(agent_config_dict):
"""
Returns a single string containing all available config types,
for Docs team to reference.
"""
return 'Available config types: \n\n' + '\n'.join(agent_config_dict.keys())
def process_agent_config(dd_agent_config_string):
"""
Takes the Datadog Agent Config template as a string, separates it by type,
formats the strings, and outputs the results as json to be consumed by the
agent_config.html shortcode for display.
"""
try:
agent_config_dict = create_agent_config_dict(dd_agent_config_string)
formatted_agent_config_json = json.dumps(agent_config_dict)
config_types_string = document_config_types(agent_config_dict)
with open('data/agent_config.json', 'w+') as agent_json_config_outfile:
agent_json_config_outfile.write(formatted_agent_config_json)
# Documenting what config types are available for Docs team to use in
# agent config shortcode.
with open('agent_config_types_list.txt', 'w+') as config_types_outfile:
config_types_outfile.write(config_types_string)
except Exception as err:
print('An error occurred building agent config data:')
print(err)
if getenv("LOCAL") != 'True':
sys.exit(1)
``` |
{
"source": "jongablop/cctbx_project",
"score": 2
} |
#### File: mmtbx/probe/AtomTypes.py
```python
from __future__ import print_function, nested_scopes, generators, division
from __future__ import absolute_import, unicode_literals
import sys
import re
import iotbx
from iotbx.map_model_manager import map_model_manager
from iotbx.data_manager import DataManager
import mmtbx
import boost_adaptbx.boost.python as bp
bp.import_ext("mmtbx_probe_ext")
import mmtbx_probe_ext as probe
##################################################################################
# Helper functions.
# Pad the name with spaces on the right to ensure that it is at least as long as
# requested.
def Pad(s, n=4):
ret = s
while len(s) < n:
s += ' '
return ret
# Gobble up all spaces from the end of the name after non-space characters
def Unpad(n):
# Gobble up all spaces from the end of the name.
while n[-1] == ' ':
n = n[:-1]
return n
# Is a carbon atom a Carbonyl from a standard amino acid?
def IsSpecialAminoAcidCarbonyl(resName, atomName):
"""Given a residue and atom name, determine whether that atom is a C=O.
This does not mark the ' C ' atom that is always a Carbonyl; that is checked separately.
:param resName: String containing the 1-3-character residue name in all caps, including leading space.
:param atomName: String containing the 1-4-character atom name in all caps, including leading space.
:returns True if the atom is a C=O in a standard residue, False if not. Does not handle HET atoms.
"""
if Unpad(atomName) == ' CG':
return resName in ['ASP','ASN','ASX']
if Unpad(atomName) == ' CD':
return resName in ['GLU','GLN','GLX']
return False
# Table of aromatic-acceptor atoms by residue and atom name. The first entry in each list element is
# a list of residue names with trailing spaces trimmed. The second is a list of atoms that qualify
# for all of the entries in the residue names. In both cases, the strings are stripped of all
# spaces to the left and right.
_AromaticTable = [
# Note: Some atoms from these residues are listed in other sections. The combination of
# reside and atom name is not duplicated, but there are multiple entries for some residues --
# this is not a set.
[ ['HIS'], ['ND1','NE2'] ],
[ ['ADE','A'], ['N1','N3','N7','C2','C4','C5','C6','C8','N9'] ],
[ ['CYT','C'], ['N3','N1','C2','C4','C5','C6'] ],
[ ['GUA','G'], ['N3','N7','N1','C2','C4','C5','C6','C8','N9'] ],
[ ['THY','T'], ['N1','C2','N3','C4','C5','C6'] ],
[ ['URA','U'], ['N1','C2','N3','C4','C5','C6'] ],
[ ['DA'], ['N1','N3','N7','C2','C4','C5','C6','C8','N9'] ],
[ ['DC'], ['N3','N1','C2','C4','C5','C6'] ],
[ ['DG'], ['N3','N7','N1','C2','C4','C5','C6','C8','N9'] ],
[ ['DT'], ['N1','C2','N3','C4','C5','C6'] ],
[ ['HEM'], ['N A','N B','N C','N D'] ],
# Here we treat the aromatic Pi-bonds as hydrogen bond acceptors.
# Note: Some atoms from these residues are listed in other sections. The combination of
# reside and atom name is not duplicated, but there are multiple entries for some residues --
# this is not a set.
[ ['HEM'], ['C1A','C2A','C3A','C4A',
'C1B','C2B','C3B','C4B',
'C1C','C2C','C3C','C4C',
'C1D','C2D','C3D','C4D'] ],
[ ['PHE'], ['CZ','CE2','CE1','CD2','CD1','CG'] ],
[ ['TYR'], ['CZ','CE2','CE1','CD2','CD1','CG'] ],
# [ ['HIS'], ['CD2','CE1','CG'] ],
[ ['TRP'], ['CH2','CZ3','CZ2','CE3','CE2','NE1','CD2','CD1','CG'] ],
# Here we add the hydrogens and deuteriums that can be part of a ring from probe:select.c
[ ['PHE'], ['HD1','HD2','HE1','HE2','HZ','DD1','DD2','DE1','DE2','DZ'] ],
[ ['HIS'], ['HD1','HD2','HE1','HE2','DD1','DD2','DE1','DE2'] ],
[ ['TYR'], ['HD1','HD2','HE1','HE2','DD1','DD2','DE1','DE2'] ],
[ ['TRP'], ['HD1','HE1','HE3','HZ2','HZ3','HH2','DD1','DE1','DE3','DZ2','DZ3','DH2'] ],
[ ['U','URA','UTP','UDP','UMP','UR'], ['H3','HN3','H5','H6','D3','DN3','D5','D6'] ],
[ ['T','THY','TTP','TDP','TMP','5MU','DT','TR'], ['H3','HN3','H6','D3','DN3','D6'] ],
[ ['A','ADE','ATP','ADP','AMP','1MA','RIA','T6A','DA','AR'], ['H8','H2','D8','D2'] ],
[ ['C','CYT','CTP','CDP','CMP','5MC','OMC','DC','CR'], ['H5','H6','D5','D6'] ],
[ ['G','GUA','GTP','GDP','GMP','GSP','2MG','M2G','7MG','OMG','DG','GR'], ['H8','H1','HN1','D8','D1','DN1'] ],
[ ['YG','1MG'], ['H8','D8'] ],
[ ['PSU'], ['H6','D6','H1','HN1','D1','DN1','H3','HN3','D3','DN3'] ],
[ ['I','DI'], ['H8','H2','H1','HN1','D8','D2','D1','DN1'] ]
]
# Is a carbon or nitrogen or hydrogen atom part of an aromatic ring?
def IsAromatic(resName, atomName):
"""Given a residue and atom name, determine whether that atom is part of an aromatic ring.
:param resName: String containing the 1-3-character residue name in all caps, including leading space.
:param atomName: String containing the 1-4-character atom name in all caps, including leading space.
:returns True if the atom is aromatic in a standard residue, False if not. Does not handle HET atoms.
"""
for e in _AromaticTable:
if resName.strip() in e[0] and atomName.strip() in e[1]:
return True
return False
##################################################################################
class AtomFlags(object):
"""Flags describing attributes that atoms can have.
"""
EMPTY_FLAGS = 0 # No flags set
IGNORE_ATOM = 1 << 0 # This atom should be ignored during processing, as if it did not exist
DONOR_ATOM = 1 << 1 # Can be an electron donor
ACCEPTOR_ATOM = 1 << 2 # Can be an electron acceptor
HB_ONLY_DUMMY_ATOM = 1 << 3 # This is a dummy hydrogen added temporarily to a water when a donor is needed; it can Hbond but not clash.
METALLIC_ATOM = 1 << 4 # The atom is metallic
##################################################################################
class AtomInfo(object):
"""Class that stores extra information about an atom that is looked up by the AtomTypes
class methods. The information is stored in properties.
"""
def __init__(self, myValList = None):
try:
self._atomicNumber = myValList[0]
except Exception:
self._atomicNumber = 0
try:
self._name = myValList[1]
except Exception:
self._name = "?"
try:
self._fullName = myValList[2]
except Exception:
self._fullName = "unknown"
try:
self._vdwElectronCloudExplicit = myValList[3]
except Exception:
self._vdwElectronCloudExplicit = 0
try:
self._vdwNeutronExplicit = myValList[4]
except Exception:
self._vdwNeutronExplicit = 0
try:
self._vdwElectronCloudImplicit = myValList[5]
except Exception:
self._vdwElectronCloudImplicit = 0
try:
self._covalent = myValList[6]
except Exception:
self._covalent = 0
try:
self._kinemageColor = myValList[7]
except Exception:
self._kinemageColor = "grey"
try:
self._flags = myValList[8]
except Exception:
self._flags = AtomFlags.EMPTY_FLAGS
# Getter and setter methods
def get_atomicNumber(self): return self._atomicNumber
def set_atomicNumber(self, val): self._atomicNumber = val
def get_name(self): return self._name
def set_name(self, val): self._name = val
def get_fullName(self): return self._fullName
def set_fullName(self, val): self._fullName = val
def get_vdwElectronCloudExplicit(self): return self._vdwElectronCloudExplicit
def set_vdwElectronCloudExplicit(self, val): self._vdwElectronCloudExplicit = val
def get_vdwElectronCloudImplicit(self): return self._vdwElectronCloudImplicit
def set_vdwElectronCloudImplicit(self, val): self._vdwElectronCloudImplicit = val
def get_vdwNeutronExplicit(self): return self._vdwNeutronExplicit
def set_vdwNeutronExplicit(self, val): self._vdwNeutronExplicit = val
def get_covalent(self): return self._covalent
def set_covalent(self, val): self._covalent = val
def get_kinemageColor(self): return self._kinemageColor
def set_kinemageColor(self, val): self._kinemageColor = val
def get_flags(self): return self._flags
def set_flags(self, val): self._flags = val
# Properties
atomicNumber = property(get_atomicNumber, set_atomicNumber)
name = property(get_name, set_name)
fullName = property(get_fullName, set_fullName)
vdwElectronCloudExplicit = property(get_vdwElectronCloudExplicit, set_vdwElectronCloudExplicit)
vdwElectronCloudImplicit = property(get_vdwElectronCloudImplicit, set_vdwElectronCloudImplicit)
vdwNeutronExplicit = property(get_vdwNeutronExplicit, set_vdwNeutronExplicit)
covalent = property(get_covalent, set_covalent)
kinemageColor = property(get_kinemageColor, set_kinemageColor)
flags = property(get_flags, set_flags)
class AtomTypes(object):
"""Class that looks up extra information for atoms that is required by the MolProbity Probe and
Reduce modules.
"""
def __init__(self, useNeutronDistances = False, useImplicitHydrogenDistances = False):
"""Constructor.
:param useNeutronDistances: Use neutron distances and radii for scoring.
The default is to use electron-cloud distances. This is used both for the
separation between a Hydgrogen and its bound partner and for the radius of the
Hydrogen and it must be set consistently across the entire code base. When this is
True, it supercedes useImplicitHydrogenDistances.
:param useImplicitHydrogenDistances: Default is to use distances consistent with
explicitly-listed Hydrgoens, but setting this to True implicit-Hydrogen distances instead.
This must be set consistently with the hydrogens in the model.
"""
##################################################################################
# Store state based on options.
self._useNeutronDistances = useNeutronDistances
self._useImplicitHydrogenDistances = useImplicitHydrogenDistances
##################################################################################
# Table of information about each type of atom. The elements in each list are as
# follows:
# Atomic number
# Name of the type, used to look up the atom type
# Full name of the type, useful when printing
# VDW radius for explicit hydrogen bonds at the electron cloud distance
# VDW radius for explicit hydrogen bonds at the neutron distance
# VDW radius for implicit hydrogen bonds at the electron cloud distance
# Covalent bond radius
# Name of the color to use in Mage/Kinemage to display the atom
# Flags describing the behavior of the atom, as follows:
# IGNORE_ATOM : This atom should be ignored
# DONOR_ATOM : This atom can be a hydrogen bond donor
# ACCEPTOR_ATOM : This atom can be a hydrogen bond acceptor
# HB_ONLY_DUMMY_ATOM : This is a dummy hydrogen added temporarily
# to a water when a donor is needed; it can Hbond but not clash.
# METALLIC_ATOM : This atom is metallic
#
# This table is based on the following:
# For non-metals, explicit VDW radii from
# Gavezzotti, J. Am. Chem. Soc. (1983) 105, 5220-5225.
# or, if unavailable,
# Bondi, J. Phys. Chem. (1964), V68, N3, 441-451.
# Covalent and ionic radii from
# Advanced Inorganic Chemistry, Cotton & Wilkinson, 1962, p93.
self._AtomTable = [
[ 0, "?", "unknown", 1.05, 1.05, 0.00, 0.00, "magenta", AtomFlags.EMPTY_FLAGS],
[ 0, "ignore", "ignore", 0.00, 0.00, 0.00, 0.00, "magenta", AtomFlags.IGNORE_ATOM],
[ 1, "H", "hydrogen", 1.22, 1.17, 0.00, 0.30, "grey", AtomFlags.EMPTY_FLAGS],
[ 1, "Har","hydrogen(aromatic)", 1.05, 1.00, 0.00, 0.30, "grey", AtomFlags.EMPTY_FLAGS],
[ 1, "Hpol","hydrogen(polar)", 1.05, 1.00, 0.00, 0.30, "grey", AtomFlags.DONOR_ATOM],
[ 1, "Ha+p","hydrogen(aromatic&polar)", 1.05, 1.00, 0.00, 0.30, "grey", AtomFlags.DONOR_ATOM],
[ 1, "HOd","hydrogen(only dummy)", 1.05, 1.00, 0.00, 0.30, "grey", AtomFlags.DONOR_ATOM|AtomFlags.HB_ONLY_DUMMY_ATOM],
[ 6, "C", "carbon", 1.70, 1.70, 1.90, 0.77, "white", AtomFlags.EMPTY_FLAGS],
[ 6, "Car","carbon(aromatic)", 1.75, 1.75, 1.90, 0.77, "white", AtomFlags.ACCEPTOR_ATOM],
[ 6, "C=O","carbon(carbonyl)", 1.65, 1.65, 1.80, 0.77, "white", AtomFlags.EMPTY_FLAGS],
[ 7, "N", "nitrogen", 1.55, 1.55, 1.70, 0.70, "sky", AtomFlags.EMPTY_FLAGS],
[ 7, "Nacc","nitrogen(acceptor)",1.55, 1.55, 1.70, 0.70, "sky", AtomFlags.ACCEPTOR_ATOM],
[ 8, "O", "oxygen", 1.40, 1.40, 1.50, 0.66, "red", AtomFlags.ACCEPTOR_ATOM],
[15, "P", "phosphorus", 1.80, 1.80, 1.80, 1.10, "pink", AtomFlags.EMPTY_FLAGS],
[16, "S", "sulfur", 1.80, 1.80, 1.90, 1.04, "yellow", AtomFlags.ACCEPTOR_ATOM],
[33, "As", "arsenic", 2.00, 2.00, 2.10, 1.21, "grey", AtomFlags.EMPTY_FLAGS],
[34, "Se", "selenium", 1.90, 1.90, 2.00, 1.17, "green", AtomFlags.EMPTY_FLAGS],
[ 9, "F", "fluorine", 1.30, 1.30, 1.30, 0.58, "green", AtomFlags.ACCEPTOR_ATOM],
[17, "Cl", "chlorine", 1.77, 1.77, 1.77, 0.99, "green", AtomFlags.ACCEPTOR_ATOM],
[35, "Br", "bromine", 1.95, 1.95, 1.95, 1.14, "brown", AtomFlags.ACCEPTOR_ATOM],
[53, "I", "iodine", 2.10, 2.10, 2.10, 1.33, "brown", AtomFlags.ACCEPTOR_ATOM],
# for most common metals we use Pauling's ionic radii
# "covalent radii" does not really mean anything, but the interaction distance for
# all interactions uses the same, ionic, radius. This code differs from the C++
# and C tables from Probe and Reduce. It was modified because of further study by the
# Richardson group in 2021.
[ 3, "Li", "lithium", 0.60, 0.60, 0.60, 0.60, "grey", AtomFlags.METALLIC_ATOM],
[11, "Na", "sodium", 0.95, 0.95, 0.95, 0.95, "grey", AtomFlags.METALLIC_ATOM],
[13, "Al", "aluminum", 0.50, 0.50, 0.50, 0.50, "grey", AtomFlags.METALLIC_ATOM],
[19, "K", "potassium", 1.33, 1.33, 1.33, 1.33, "grey", AtomFlags.METALLIC_ATOM],
[12, "Mg", "magnesium", 0.65, 0.65, 0.65, 0.65, "grey", AtomFlags.METALLIC_ATOM],
[20, "Ca", "calcium", 0.99, 0.99, 0.99, 0.99, "grey", AtomFlags.METALLIC_ATOM],
[25, "Mn", "manganese", 0.80, 0.80, 0.80, 0.80, "grey", AtomFlags.METALLIC_ATOM],
[26, "Fe", "iron", 0.74, 0.74, 0.74, 0.74, "grey", AtomFlags.METALLIC_ATOM],
[27, "Co", "cobalt", 0.70, 0.70, 0.70, 0.70, "blue", AtomFlags.METALLIC_ATOM],
[28, "Ni", "nickel", 0.66, 0.66, 0.66, 0.66, "grey", AtomFlags.METALLIC_ATOM],
[29, "Cu", "copper", 0.72, 0.72, 0.72, 0.72,"orange",AtomFlags.METALLIC_ATOM],
[30, "Zn", "zinc", 0.71, 0.71, 0.71, 0.71, "grey", AtomFlags.METALLIC_ATOM],
[37, "Rb", "rubidium", 1.48, 1.48, 1.48, 1.48, "grey", AtomFlags.METALLIC_ATOM],
[38, "Sr", "strontium", 1.10, 1.10, 1.10, 1.10, "grey", AtomFlags.METALLIC_ATOM],
[42, "Mo", "molybdenum", 0.93, 0.93, 0.93, 0.93, "grey", AtomFlags.METALLIC_ATOM],
[47, "Ag", "silver", 1.26, 1.26, 1.26, 1.26, "white",AtomFlags.METALLIC_ATOM],
[48, "Cd", "cadmium", 0.91, 0.91, 0.91, 0.91, "grey", AtomFlags.METALLIC_ATOM],
[49, "In", "indium", 0.81, 0.81, 0.81, 0.81, "grey", AtomFlags.METALLIC_ATOM],
[55, "Cs", "cesium", 1.69, 1.69, 1.69, 1.69, "grey", AtomFlags.METALLIC_ATOM],
[56, "Ba", "barium", 1.29, 1.29, 1.29, 1.29, "grey", AtomFlags.METALLIC_ATOM],
[79, "Au", "gold", 1.10, 1.10, 1.10, 1.10, "gold", AtomFlags.METALLIC_ATOM],
[80, "Hg", "mercury", 1.00, 1.00, 1.00, 1.00, "grey", AtomFlags.METALLIC_ATOM],
[81, "Tl", "thallium", 1.44, 1.44, 1.44, 1.44, "grey", AtomFlags.METALLIC_ATOM],
[82, "Pb", "lead", 0.84, 0.84, 0.84, 0.84, "grey", AtomFlags.METALLIC_ATOM],
# for other metals we use Shannon's ionic radii
# Acta Crystallogr. (1975) A32, pg751.
[23, "V", "vanadium", 0.79, 0.79, 0.79, 0.79, "grey", AtomFlags.METALLIC_ATOM],
[24, "Cr", "chromium", 0.73, 0.73, 0.73, 0.73, "grey", AtomFlags.METALLIC_ATOM],
[52, "Te", "tellurium", 0.97, 0.97, 0.97, 0.97, "grey", AtomFlags.METALLIC_ATOM],
[62, "Sm", "samarium", 1.08, 1.08, 1.08, 1.08, "grey", AtomFlags.METALLIC_ATOM],
[64, "Gd", "gadolinium", 1.05, 1.05, 1.05, 1.05, "grey", AtomFlags.METALLIC_ATOM],
[70, "Yb", "ytterbium", 1.14, 1.14, 1.14, 1.14, "grey", AtomFlags.METALLIC_ATOM],
[74, "W", "tungsten", 0.66, 0.66, 0.66, 0.66, "grey", AtomFlags.METALLIC_ATOM],
[78, "Pt", "platinum", 0.63, 0.63, 0.63, 0.63, "grey", AtomFlags.METALLIC_ATOM],
[92, "U", "uranium", 1.03, 1.03, 1.03, 1.03, "grey", AtomFlags.METALLIC_ATOM],
# Cotton & Wilkinson and also-
# <NAME> (ed.) in Table of interatomic distances and configuration in molecules
# and ions, Supplement 1956-1959, Special publication No. 18, Chemical Society,
# London, UK, 1965 (as listed in web-elements by <NAME>)
# http://www.shef.ac.uk/chemistry/web-elements
[ 2, "He", "helium", 1.60, 1.60, 1.60, 0.00, "sky", AtomFlags.EMPTY_FLAGS],
[ 4, "Be", "beryllium", 0.31, 0.31, 0.31, 0.90, "grey", AtomFlags.METALLIC_ATOM],
[ 5, "B", "boron", 0.20, 0.20, 0.20, 0.86, "grey", AtomFlags.EMPTY_FLAGS],
[10, "Ne", "neon", 1.60, 1.60, 1.60, 0.00, "pink", AtomFlags.EMPTY_FLAGS],
[14, "Si", "silicon", 2.10, 2.10, 2.10, 1.17, "grey", AtomFlags.METALLIC_ATOM],
[18, "Ar", "argon", 1.89, 1.89, 1.89, 0.00, "orange", AtomFlags.EMPTY_FLAGS],
[21, "Sc", "scandium", 0.68, 0.68, 0.68, 0.44, "grey", AtomFlags.METALLIC_ATOM],
[22, "Ti", "titanium", 0.75, 0.75, 0.75, 1.49, "grey", AtomFlags.METALLIC_ATOM],
[31, "Ga", "gallium", 0.53, 0.53, 0.53, 1.27, "grey", AtomFlags.METALLIC_ATOM],
[32, "Ge", "germanium", 0.60, 0.60, 0.60, 1.34, "grey", AtomFlags.METALLIC_ATOM],
[36, "Kr", "krypton", 2.01, 2.01, 2.01, 0.00, "greentint", AtomFlags.EMPTY_FLAGS],
[39, "Y", "yttrium", 0.90, 0.90, 0.90, 1.64, "grey", AtomFlags.METALLIC_ATOM],
[40, "Zr", "zirconium", 0.77, 0.77, 0.77, 1.51, "grey", AtomFlags.METALLIC_ATOM],
[50, "Sn", "tin", 0.71, 0.71, 0.71, 1.45, "grey", AtomFlags.METALLIC_ATOM],
[51, "Sb", "antimony", 2.20, 2.20, 2.20, 1.41, "grey", AtomFlags.METALLIC_ATOM],
[54, "Xe", "xenon", 2.18, 2.18, 2.18, 0.00, "magenta", AtomFlags.EMPTY_FLAGS],
[57, "La", "lanthanum", 1.03, 1.03, 1.03, 1.77, "grey", AtomFlags.METALLIC_ATOM],
[58, "Ce", "cerium", 0.87, 0.87, 0.87, 1.61, "grey", AtomFlags.METALLIC_ATOM],
[87, "Fr", "francium", 1.94, 1.94, 1.94, 2.68, "grey", AtomFlags.METALLIC_ATOM],
[88, "Ra", "radium", 1.62, 1.62, 1.62, 2.36, "grey", AtomFlags.METALLIC_ATOM],
[90, "Th", "thorium", 1.08, 1.08, 1.08, 1.82, "grey", AtomFlags.METALLIC_ATOM],
# finally, we have a set of elements where the radii are unknown
# so we use estimates and extrapolations based on web-elements data
[41, "Nb", "niobium", 0.86, 0.86, 0.86, 1.40, "grey", AtomFlags.METALLIC_ATOM],
[43, "Tc", "technetium", 0.71, 0.71, 0.71, 1.25, "grey", AtomFlags.METALLIC_ATOM],
[44, "Ru", "ruthenium", 0.82, 0.82, 0.82, 1.36, "grey", AtomFlags.METALLIC_ATOM],
[45, "Rh", "rhodium", 0.76, 0.76, 1.76, 1.30, "grey", AtomFlags.METALLIC_ATOM],
[46, "Pd", "palladium", 1.05, 1.05, 1.05, 1.59, "grey", AtomFlags.METALLIC_ATOM],
[59, "Pr", "praseodymium", 1.11, 1.11, 1.11, 1.65, "grey", AtomFlags.METALLIC_ATOM],
[60, "Nd", "neodymium", 1.10, 1.10, 1.10, 1.64, "grey", AtomFlags.METALLIC_ATOM],
[61, "Pm", "promethium", 1.15, 1.15, 1.15, 1.89, "grey", AtomFlags.METALLIC_ATOM],
[63, "Eu", "europium", 1.31, 1.31, 1.31, 1.85, "grey", AtomFlags.METALLIC_ATOM],
[65, "Tb", "terbium", 1.05, 1.05, 1.05, 1.59, "grey", AtomFlags.METALLIC_ATOM],
[66, "Dy", "dysprosium", 1.05, 1.05, 1.05, 1.59, "grey", AtomFlags.METALLIC_ATOM],
[67, "Ho", "holmium", 1.04, 1.04, 1.04, 1.58, "grey", AtomFlags.METALLIC_ATOM],
[68, "Er", "erbium", 1.03, 1.03, 1.03, 1.57, "grey", AtomFlags.METALLIC_ATOM],
[69, "Tm", "thulium", 1.02, 1.02, 1.02, 1.56, "grey", AtomFlags.METALLIC_ATOM],
[71, "Lu", "lutetium", 1.02, 1.02, 1.02, 1.56, "grey", AtomFlags.METALLIC_ATOM],
[72, "Hf", "hafnium", 0.85, 0.85, 0.85, 1.46, "grey", AtomFlags.METALLIC_ATOM],
[73, "Ta", "tantalum", 0.86, 0.86, 0.86, 1.40, "grey", AtomFlags.METALLIC_ATOM],
[75, "Re", "rhenium", 0.77, 0.77, 0.77, 1.31, "grey", AtomFlags.METALLIC_ATOM],
[76, "Os", "osmium", 0.78, 0.78, 0.78, 1.32, "grey", AtomFlags.METALLIC_ATOM],
[77, "Ir", "iridium", 0.80, 0.80, 0.80, 1.34, "grey", AtomFlags.METALLIC_ATOM],
[83, "Bi", "bismuth", 1.17, 1.17, 1.17, 1.71, "grey", AtomFlags.METALLIC_ATOM],
[84, "Po", "polonium", 0.99, 0.99, 0.99, 1.53, "grey", AtomFlags.METALLIC_ATOM],
[85, "At", "astatine", 0.91, 0.91, 0.91, 1.45, "grey", AtomFlags.METALLIC_ATOM],
[86, "Rn", "radon", 2.50, 2.50, 2.50, 0.00, "pinktint", AtomFlags.EMPTY_FLAGS],
[89, "Ac", "actinium", 1.30, 1.30, 1.30, 2.00, "grey", AtomFlags.METALLIC_ATOM],
[91, "Pa", "protoactinium", 1.10, 1.10, 1.10, 1.85, "grey", AtomFlags.METALLIC_ATOM],
[93, "Np", "neptunium", 1.00, 1.00, 1.00, 1.72, "grey", AtomFlags.METALLIC_ATOM],
[94, "Pu", "plutonium", 1.00, 1.00, 1.00, 1.67, "grey", AtomFlags.METALLIC_ATOM],
[95, "Am", "americium", 1.00, 1.00, 1.00, 1.63, "grey", AtomFlags.METALLIC_ATOM],
[96, "Cm", "curium", 1.00, 1.00, 1.00, 1.60, "grey", AtomFlags.METALLIC_ATOM],
[97, "Bk", "berkelium", 1.00, 1.00, 1.00, 1.58, "grey", AtomFlags.METALLIC_ATOM],
[98, "Cf", "californium", 1.00, 1.00, 1.00, 1.57, "grey", AtomFlags.METALLIC_ATOM],
[99, "Es", "einsteinium", 1.00, 1.00, 1.00, 1.56, "grey", AtomFlags.METALLIC_ATOM],
[100,"Fm", "fermium", 1.00, 1.00, 1.00, 1.55, "grey", AtomFlags.METALLIC_ATOM],
[101,"Md", "mendelevium", 1.00, 1.00, 1.00, 1.55, "grey", AtomFlags.METALLIC_ATOM],
[102,"No", "nobelium", 1.00, 1.00, 1.00, 1.55, "grey", AtomFlags.METALLIC_ATOM]
]
##################################################################################
# Construct a dictionary that maps from the name of the type in the _AtomTable
# to its full entry in the table to make it fast to look up an atom by its type
# name.
self._Index = {}
for e in self._AtomTable:
self._Index[e[1]] = e
##################################################################################
# Make a string that has all of the special first characters from an atom name that
# would cause it to parse the remainder of the name as if it were a full name (this
# uses a table that is a subset of the full name-parsing table).
self._specialAtomFirstChars = '*"\'`_+- 0123456789'
##################################################################################
# Make a dictionary for upper-case second letters in multi-letter atom names where
# the first letter is H but the atom is not a Hydrogen.
# It stores the list of residues for which this name is valid.
# For example, "Hg" would have an entry named 'G' which lists the residues that can
# have an atom named "Hg" in it.
# Making an empty list here will have the same effect as not having an entry for
# a given letter. All names in the list must be fully upper case. Spaces are
# significant in the residue names.
# Atoms whose names start with H but are not on one of the lists will be converted
# to Hydrogen.
self._legalResiduesForHElements = {
'E' : [],
'F' : ['PHF', 'HF3', 'HF5'],
'G' : [' HG', 'HG2', 'HGB', 'HGC', 'HGI', 'MAC', 'MBO', 'MMC',
'PHG', 'PMB', 'AAS', 'AMS', 'BE7', 'CMH', 'EMC', 'EMT'],
'O' : [' HO', 'HO3'],
'S' : []
}
##################################################################################
# Upper-cased legal names for atoms whose name begins with H. Used to generate a
# warning if we have a different name than these and it is not a special atom name.
# HH and HD are there to get rid of warnings for PBD v3 names and H5'' is
# to handle RNA/DNA backbone atoms.
self._legalStandardHAtomNames = ['H', 'HE', 'HF', 'HG', 'HO', 'HS',
"H5''", 'HH', 'HD']
##################################################################################
# Table of allowed names for atoms that begin with one of the special characters.
# This table operates on the name after the special character has been removed.
# Each entry has a regular expression to match, the resulting name, and a Boolean
# telling whether to warn about this translation.
self._specialNameTable = [
[ r'A.1', 'O', True ],
[ r'A.2', 'N', True ],
[ r'B.*', 'B', False ],
[ r'C.*', 'C', False ],
[ r'D.*', 'H', True ], # These are counted as H internally, but output at D
[ r'F.*', 'F', False ],
# H atoms are handled separately
[ r'I.*', 'I', False ],
[ r'K.*', 'K', False ],
[ r'N.*', 'N', False ],
[ r'O.*', 'O', False ],
[ r'P.*', 'P', False ],
[ r'S.*', 'S', False ],
[ r'U.*', 'U', False ],
[ r'V.*', 'V', False ],
[ r'W.*', 'W', False ],
[ r'Y.*', 'Y', False ]
]
##################################################################################
# Table of allowed names for atoms that do not begin with one of the special characters.
# Each entry has a regular expression to match, the resulting name, and a Boolean
# telling whether to warn about this translation.
# The dot character '.' matches any single character and the * character means 0 or more
# instances of the previous character, so '.*' at the end matches an arbitrary ending.
# The expressions should be upper-case. The resulting names should match the case of
# the _Index dictionary.
# Earlier entries are checked before later entries, so for example the C.* at the end of the
# C's will match all atoms that don't match other than the C entries before it.
self._nameTable = [
[ r'AC.*', 'C', True ],
[ r'AG.*', 'Ag', False ],
[ r'AH.*', 'H', True ],
[ r'AL.*', 'Al', False ],
[ r'AM.*', 'Am', False ],
[ r'AN.*', 'N', True ],
[ r'AO.*', 'O', True ],
[ r'AP.*', 'P', True ],
[ r'AR.*', 'Ar', False ],
[ r'AS.*', 'As', False ],
[ r'AT.*', 'At', False ],
[ r'AU.*', 'Au', False ],
[ r'BA.*', 'Ba', False ],
[ r'BE.*', 'Be', False ],
[ r'BI.*', 'Bi', False ],
[ r'BK.*', 'Bk', False ],
[ r'BR.*', 'Br', False ],
[ r'CA.*', 'Ca', False ],
[ r'CC.*', 'C', True ],
[ r'CD.*', 'Cd', False ],
[ r'CE.*', 'Ce', False ],
[ r'CF.*', 'Cf', False ],
[ r'CH.*', 'H', True ],
[ r'CL.*', 'Cl', False ],
[ r'CM.*', 'Cm', False ],
[ r'CN.*', 'N', True ],
[ r'CO.*', 'Co', False ],
[ r'CP.*', 'C', True ],
[ r'CR.*', 'Cr', False ],
[ r'CS.*', 'Cs', False ],
[ r'CU.*', 'Cu', False ],
[ r'C.*', 'C', True ], # All other atoms starting with C are called Carbon with a warning
[ r'DY.*', 'Dy', False ],
[ r'DC.*', 'C', True ],
[ r'DH.*', 'H', True ],
[ r'DN.*', 'N', True ],
[ r'DO.*', 'O', True ],
[ r'DP.*', 'P', True ],
[ r'D.*', 'H', True ], # All other atoms starting with D are called Hydrogen
[ r'ER.*', 'Er', False ],
[ r'ES.*', 'Es', False ],
[ r'EU.*', 'Eu', False ],
[ r'EC.*', 'C', True ],
[ r'EH.*', 'H', True ],
[ r'EN.*', 'N', True ],
[ r'EO.*', 'O', True ],
[ r'EP.*', 'P', True ],
[ r'FE.*', 'Fe', False ],
[ r'FM.*', 'Fm', False ],
[ r'FR.*', 'Fr', False ],
[ r'FC.*', 'C', True ],
[ r'FH.*', 'H', True ],
[ r'FN.*', 'N', True ],
[ r'FO.*', 'O', True ],
[ r'FP.*', 'P', True ],
[ r'GA.*', 'Ga', False ],
[ r'GD.*', 'Gd', False ],
[ r'GE.*', 'Ge', False ],
[ r'GC.*', 'C', True ],
[ r'GH.*', 'H', True ],
[ r'GN.*', 'N', True ],
[ r'GO.*', 'O', True ],
[ r'GP.*', 'P', True ],
# H atoms are handled separately
[ r'IN.*', 'In', False ],
[ r'IR.*', 'Ir', False ],
[ r'KR.*', 'Kr', False ],
[ r'LA.*', 'La', False ],
[ r'LI.*', 'Li', False ],
[ r'LU.*', 'Lu', False ],
[ r'MD.*', 'Md', False ],
[ r'MG.*', 'Mg', False ],
[ r'MN.*', 'Mn', False ],
[ r'MO.*', 'Mo', False ],
[ r'NA.*', 'Na', True ],
[ r'NB.*', 'Nb', True ],
[ r'NC.*', 'C', True ],
[ r'ND.*', 'Nd', True ],
[ r'NE.*', 'Ne', True ],
[ r'NH.*', 'H', True ],
[ r'NI.*', 'Ni', False ],
[ r'NN.*', 'N', True ],
[ r'NO.*', 'O', True ], # Non standard
[ r'NP.*', 'P', True ], # Non standard
[ r'NS.*', 'S', True ],
[ r'N.*', 'N', True ], # All other atoms starting with N are called Nitrogen
[ r'OS.*', 'Os', False ],
[ r'O.*', 'O', True ], # All other atoms starting with O are called Oxygen
[ r'PA.*', 'Pa', True ],
[ r'PB.*', 'Pb', True ],
[ r'PD.*', 'Pd', True ],
[ r'PM.*', 'Pm', False ],
[ r'PO.*', 'Po', False ],
[ r'PR.*', 'Pr', False ],
[ r'PT.*', 'Pt', False ],
[ r'PU.*', 'Pu', False ],
[ r'P.*', 'P', True ], # All other atoms starting with P
[ r'RA.*', 'Ra', False ],
[ r'RB.*', 'Rb', False ],
[ r'RE.*', 'Re', False ],
[ r'RH.*', 'Rh', False ],
[ r'RN.*', 'Rn', False ],
[ r'RU.*', 'Ru', False ],
[ r'SB.*', 'Sb', True ],
[ r'SC.*', 'Sc', False ],
[ r'SE.*', 'Se', True ],
[ r'SI.*', 'Si', False ],
[ r'SM.*', 'Sm', False ],
[ r'SN.*', 'Sn', False ],
[ r'SR.*', 'Sr', False ],
[ r'S.*', 'S', True ], # All other atoms starting with S
[ r'TA.*', 'Ta', False ],
[ r'TB.*', 'Tb', False ],
[ r'TC.*', 'Tc', False ],
[ r'TE.*', 'Te', False ],
[ r'TH.*', 'Th', False ],
[ r'TI.*', 'Ti', False ],
[ r'TL.*', 'Tl', False ],
[ r'TM.*', 'Tm', False ],
[ r'XE.*', 'Xe', False ],
[ r'YB.*', 'Yb', False ],
[ r'ZN.*', 'Zn', False ],
[ r'ZR.*', 'Zr', False ]
]
##################################################################################
# Table of last-chance names for atoms that were not found in one of the above
# tables.
# This table operates on the name after the first character has been removed.
# Each entry has a regular expression to match, the resulting name, and a Boolean
# telling whether to warn about this translation -- these always warn.
self._lastChanceNameTable = [
[ r'H.*', 'H', True ],
[ r'D.*', 'H', True ],
[ r'C.*', 'C', True ],
[ r'N.*', 'N', True ],
[ r'O.*', 'O', True ],
[ r'P.*', 'P', True ],
[ r'S.*', 'S', True ],
[ r'I.*', 'I', True ],
[ r'K.*', 'K', True ],
[ r'V.*', 'V', True ],
[ r'W.*', 'W', True ],
[ r'U.*', 'U', True ],
[ r'AG.*', 'Ag', True ],
[ r'AL.*', 'Al', True ],
[ r'AS.*', 'As', True ],
[ r'AU.*', 'Au', True ],
[ r'FE.*', 'Fe', True ],
[ r'GD.*', 'Gd', True ],
[ r'LI.*', 'Li', True ],
[ r'MG.*', 'Mg', True ],
[ r'MN.*', 'Mn', True ],
[ r'MO.*', 'Mo', True ],
[ r'ZN.*', 'Zn', True ]
]
##################################################################################
# Given an iotbx.pdb.atom, look up its mmtbx_probe_ext.ExtraAtomInfo in the atom table.
# This includes checking for special cases using the helper functions above.
def FindAtomInfo(self, atom):
"""Given an iotbx.pdb.atom, look up its information in the atom table.
:param atom: iotbx.pdb.atom entry to look up.
:returns a pair (filled in AtomInfo, string warning) on success, raises ValueError on failure.
The warning will be empty if there was no problem with the lookup and will be a
printable string explaining the problem if there was one.
"""
# The element name we're going to use to look up in the table.
elementName = None
# Should we emit a warning about this name translation?
emitWarning = False
# Find the name of the atom and the residue it is in. The atom's parent is an
# atom group, which holds its residue name. The atom name is padded on the right
# with spaces so that it is at least four characters long and made upper-case.
atomPadded = Pad(atom.name.upper())
atomName = atomPadded # name that will be adjusted as we go
resName = atom.parent().resname.upper()
# Special-case check for a mis-placed Selenium atom
if atomName[0:2] == ' SE':
elementName = 'Se'
emitWarning = True
# Based on fixupAmbigAtomName() from Reduce ElementInfo.cpp
# Fix up ambiguous atom names before trying to figure out what they are. Emit a
# warning for each of them if we do rename them.
if resName in ['ASN','GLN'] and atomName in [' AD1',' AD2',' AE1',' AE2']:
emitWarning = True
if atomName[3] == '1':
atomName[1] = 'O'
else:
# All of the other entries in the list have '2', so we don't need a separate check
atomName[1] = 'N'
###
# If we didn't hit a special case above, check using the appropriate table
# based on the first character of the element name.
if elementName is None:
# See if the first character of the atom name is special. If so, shift it off
# the name and replace the standard parsing table with the subset table for special
# atom names.
nameTable = self._nameTable
specialName = False
if atomName[0] in self._specialAtomFirstChars:
atomName = atomName[1:]
nameTable = self._specialNameTable
specialName = True
# For Hydrogens, see if we need to truncate the name by removing its second character
# because it is an He, Hf, Hg, Ho, or Hs in a residue that does not allow such names.
# If it is disallowed, replace it with a simple 'H'.
if atomName[0] == 'H':
# If we are not a truncated special name, then we do more scrutiny on the name and
# emit a warning if it is not any of the recognized names.
if (not specialName) and (not atomName in self._legalStandardHAtomNames):
emitWarning = True
# Use the default value of 'H' unless the second letter is in a list of residues
# that allow that second letter.
elementName = 'H'
try:
if resName in self._legalResiduesForHElements[atomName[1]]:
# This residue is in the list of those that are valid for this name,
# so we make the element name H followed by the same lower-case letter.
elementName = 'H' + atomName[1].lower()
except KeyError:
# We did not find an entry for this character, so we leave things alone
pass
# Look up the atom in all entries of the table to see if any of their regular
# expressions match. If so, set the elementName and warning emission based on
# the table entry.
for n in nameTable:
e = re.compile(n[0])
if e.match(atomName) is not None:
elementName = n[1]
emitWarning |= n[2]
break;
###
# If we did not find an elementName yet, we always emit a warning,
# then default to Carbon, and then try another pass on names
# always skipping the first character of the name.
if elementName is None:
elementName = 'C'
emitWarning = True
atomName = atomPadded[1:]
for n in self._lastChanceNameTable:
e = re.compile(n[0])
if e.match(atomName) is not None:
elementName = n[1]
emitWarning |= n[2]
break;
###
# Some Carbon atoms are too general when coming from the tables, so we need to adjust
# them to match the correct types.
# Based on reduce.cpp fixupHeavyAtomElementType().
# This maps from amino acid names to single letter codes, but we just care if it is in the list.
aa_resnames = iotbx.pdb.amino_acid_codes.one_letter_given_three_letter
if elementName == 'C':
if IsAromatic(resName, atomPadded):
elementName = 'Car'
elif atomPadded == ' C ' and atom.parent().resname in aa_resnames:
elementName = 'C=O'
elif IsSpecialAminoAcidCarbonyl(resName, atomPadded):
elementName = 'C=O'
# @todo Aromatic and carbonyl carbons in non-standard HET residues need to be identified somehow
# (C++ code had this comment in it without code to handle the situation).
###
# Add Acceptor flag for some Nitrogen atoms.
# Based on reduce.cpp fixupHeavyAtomElementType().
if elementName == 'N':
# All acceptors in the C++ table were also marked as aromatic and vice-versa,
# so we use the test for both to locate them.
if IsAromatic(resName, atomPadded):
elementName = 'Nacc'
#elif @todo Het N atoms with 1 or 2 bonded (connected) neighbors:
# elementName = 'Nacc'
#elif @todo This is a fragment and may be an N-terminal (reduce.cpp line 849)
# elementName = 'Nacc'
###
# Some Hydrogens are aromatic.
# Based on probe:select.c:setAromaticProp()
if elementName == 'H':
if IsAromatic(resName, atomPadded):
elementName = 'Har'
###
# Some Hydrogens are polar
# Based on probe:probe.c:updateHydrogenInfo()
# If the Hydrogen is bonded to an N, O, or S, then mark it as polar and mark the
# parent as not a donor.
# @todo
###
# Look up the element name, which fails if it is not in the table.
try:
ai = AtomInfo(self._Index[elementName])
except Exception:
return (None, "WARNING: atom "+atom.name+" from "+atom.parent().resname+
' not recognized or unknown')
# Return the value, warning if we've been asked to.
warning = ""
if emitWarning:
warning = ("WARNING: atom "+atom.name+" from "+atom.parent().resname+
' will be treated as '+elementName)
return ( ai, warning )
def _FindProperRadius(self, ai):
"""Given an AtomInfo in the atom table, find the appropriate radius.
:param ai: AtomInfo to look up the radius in.
:returns Proper radius, depending on construtor parameters.
"""
if self._useNeutronDistances:
return ai.vdwNeutronExplicit
elif self._useImplicitHydrogenDistances:
return ai.vdwElectronCloudImplicit
else:
return ai.vdwElectronCloudExplicit
def FindProbeExtraAtomInfo(self, atom):
"""Given an iotbx.pdb.atom, look up its mmtbx_probe_ext.ExtraAtomInfo in the atom table.
Note: Makes use of the mmtbx.probe.useNeutronDistances option to determine whether to
return electron-cloud distance (default, when False) or neutron distances (when True).
:param atom: iotbx.pdb.atom entry to look up.
:returns a pair (mmtbx_probe_ext.ExtraAtomInfo structure filled with the info from the table,
warning string) on success, raises ValueError on failure. The warning string is empty
if there is no warning, contains a printable warning if there was.
"""
ai, warn = self.FindAtomInfo(atom)
if ai is None:
raise ValueError("FindProbeExtraAtomInfo(): Could not look up atom",atom.name)
ret = probe.ExtraAtomInfo()
ret.isAcceptor = bool(ai.flags & AtomFlags.ACCEPTOR_ATOM)
ret.isDonor = bool(ai.flags & AtomFlags.DONOR_ATOM)
ret.isDummyHydrogen = bool(ai.flags & AtomFlags.HB_ONLY_DUMMY_ATOM)
ret.vdwRadius = self._FindProperRadius(ai)
return ( ret, warn )
def MaximumVDWRadius(self):
"""Return the maximum VdW radius of any atom type in our table.
Cache the result after the first computation so that is faster when called
more than once.
"""
try:
return self._maxVDW
except Exception:
self._maxVDW = self._FindProperRadius(AtomInfo(self._AtomTable[0]))
for a in self._AtomTable[1:]:
v = self._FindProperRadius(AtomInfo(a))
if v > self._maxVDW:
self._maxVDW = v
return self._maxVDW
def Test(inFileName = None):
#========================================================================
# Make sure we can fill in mmtbx.probe.ExtraAtomInfoList info.
# Generate an example data model with a small molecule in it unless we
# were given a file name on the command line.
if inFileName is not None and len(inFileName) > 0:
# Read a model from a file using the DataManager
dm = DataManager()
dm.process_model_file(inFileName)
model = dm.get_model(inFileName)
else:
# Generate a small-molecule model using the map model manager
mmm=map_model_manager() # get an initialized instance of the map_model_manager
mmm.generate_map() # get a model from a generated small library model and calculate a map for it
model = mmm.model() # get the model
# Fill in an ExtraAtomInfoList with an entry for each atom in the hierarchy.
# We first find the largest i_seq sequence number in the model and reserve that
# many entries so we will always be able to fill in the entry for an atom.
atoms = model.get_atoms()
maxI = atoms[0].i_seq
for a in atoms:
if a.i_seq > maxI:
maxI = a.i_seq
extra = []
for i in range(maxI+1):
extra.append(probe.ExtraAtomInfo())
# Traverse the hierarchy and look up the extra data to be filled in.
# Get a list of all the atoms in the chain while we're at it
at = AtomTypes()
ph = model.get_hierarchy()
for m in ph.models():
for chain in m.chains():
for rg in chain.residue_groups():
for ag in rg.atom_groups():
for a in ag.atoms():
ei, warn = at.FindProbeExtraAtomInfo(a)
extra[a.i_seq] = ei
# User code should test for and print warnings
#if len(warn) > 0:
# print(warn)
#========================================================================
# Find an Oxygen atom and ask for its radii with explicit Hydrogen, implicit Hydrogen,
# and Nuclear radii.
o = None
ph = model.get_hierarchy()
for a in ph.models()[0].atoms():
if a.element.strip() == 'O':
o = a
assert o is not None, "AtomTypes.Test(): Could not find Oxygen (internal test failure)"
explicitH = AtomTypes(useNeutronDistances = False,
useImplicitHydrogenDistances = False).FindProbeExtraAtomInfo(o)[0].vdwRadius
implicitH = AtomTypes(useNeutronDistances = False,
useImplicitHydrogenDistances = True).FindProbeExtraAtomInfo(o)[0].vdwRadius
neutronH = AtomTypes(useNeutronDistances = True,
useImplicitHydrogenDistances = False).FindProbeExtraAtomInfo(o)[0].vdwRadius
assert explicitH != implicitH, "AtomTypes.Test(): Implicit and explicit Oxygen radii did not differ as expected"
#========================================================================
# Check MaximumVDWRadius, calling it twice to make sure both the cached and non-cached
# results work.
for i in range(2):
assert at.MaximumVDWRadius() == 2.5, "AtomTypes.Test(): Unexpected MaximumVDWRadius(): got "+str(MaximumVDWRadius())+", expected 2.5"
#========================================================================
# Check IsAromatic() to ensure it gives results when expected and not when not.
aromaticChecks = [
['PHE', 'CE2', True],
[' U', 'HN3', True],
['ASN', 'O', False]
]
for a in aromaticChecks:
assert IsAromatic(a[0],a[1]) == a[2], "AtomTypes.Test(): {} {} not marked as aromatic {}".format(a[0],a[1],a[2])
if __name__ == '__main__':
#==============================================================
# Parse command-line arguments. The 0th argument is the name
# of the script. There can be the name of a PDB/CIF file to read.
realParams = 0
fileName = ""
for i in range(1,len(sys.argv)):
fileName = sys.argv[i]
# This will throw an assertion failure if there is a problem.
Test(fileName)
print('OK')
``` |
{
"source": "jongablop/jupytemplate",
"score": 3
} |
#### File: jupytemplate/tests/test_template_path.py
```python
import os
import jupytemplate
def test_template_path_is_correct():
actual_path = jupytemplate.get_template_path()
expected_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'jupytemplate', 'jupytemplate', 'template.ipynb')
)
assert actual_path == expected_path
``` |
{
"source": "jongalloway/azure-functions-templates",
"score": 2
} |
#### File: Templates/CosmosDBTrigger-Python/__init__.py
```python
import logging
import azure.functions as func
def main(documents: func.DocumentList) -> str:
if documents:
logging.info('Document id: %s', documents[0]['id'])
```
#### File: Templates/DurableFunctionsEntity-Python-2.x/__init__.py
```python
import logging
import json
import azure.functions as func
import azure.durable_functions as df
def entity_function(context: df.DurableOrchestrationContext):
current_value = context.get_state(lambda: 0)
operation = context.operation_name
if operation == "add":
amount = context.get_input()
current_value += amount
context.set_result(current_value)
elif operation == "reset":
current_value = 0
elif operation == "get":
context.set_result(current_value)
context.set_state(current_value)
main = df.Entity.create(entity_function)
``` |
{
"source": "JongaMatos/prova_regex",
"score": 2
} |
#### File: JongaMatos/prova_regex/exemplo.py
```python
import builtins
import math
import sys
from typing import Any
from lark import Lark, InlineTransformer, LarkError, Token, Tree
# Constantes (algumas tarefas pedem para incluir variáveis específicas nesta
# parte do arquivo)
NAME = "<NAME>"
MATRICULA = "01/2345678"
...
# Gramática do Ruspy. (não modifique o nome desta variável, testes dependem disto!)
GRAMMAR = r"""
// Copie o conteúdo completo de ruspy.lark dentro desta string ou somente as
// partes relevantes para cada atividade.
//
// Você pode trabalhar no arquivo separadamente, mas copie e cole o conteúdo
// aqui dentro antes de submeter a tarefa.
mod: "TODO :)"
seq: "TODO :)"
"""
grammar_expr = Lark(GRAMMAR, parser="lalr", start="seq")
grammar_mod = Lark(GRAMMAR, parser="lalr", start="mod")
# (não modifique o nome desta classe, fique livre para alterar as implementações!)
class RuspyTransformer(InlineTransformer):
from operator import add, sub, mul, truediv as div, pow, neg, pos
from operator import rshift, lshift, or_, and_, xor
from operator import eq, ne, gt, lt, ge, le
global_names = {
**vars(math), # Inclui todas funções do módulo math
**vars(builtins), # Inclui todas funções padrão do python
"answer": 42,
"println": print,
"true": True,
"false": False,
"null": None,
}
# Estas declarações de tipo existem somente para deixar o VSCode feliz.
_transform_children: Any
_call_userfunc: Any
transform: Any
# Construtor
def __init__(self):
super().__init__()
self.env = self.global_names.copy()
# Trata símbolos terminais -------------------------------------------------
def INT(self, tk):
# Às vezes precisamos nos esforçar um pouquinho mais para obter o
# resultado que simplesmente fazer a conversão int(x)
data = tk.replace('_', '')
if set(data) == {'0'}:
raise ValueError('FIXME!') # (a solução aqui é trivial :)
return int(data)
def FLOAT(self, tk):
return int(tk)
# Trata símbolos não-terminais ---------------------------------------------
def lit(self, tk):
if not isinstance(tk, Token):
return tk
try:
return getattr(self, tk.type)(tk)
except AttributeError:
raise NotImplementedError(f"Implemente a regra def {tk.type}(self, tk): ... no transformer")
def name(self, name):
raise NotImplementedError("name")
def assign(self, name, value):
raise NotImplementedError("assign")
...
# Formas especiais --------------------------------------------------------
# Não-terminais normais recebem argumentos já transformados. As formas
# especiais exigem a avaliação manual, o que pode ser útil para controlar
# com mais precisão quantas vezes cada argumento deve ser avaliado. Isto é
# útil em laços, execução condicional etc.
#
# A lista de formas especiais precisa ser declarada explicitamente
special = {"if_", "for_", "while_", "fn", "lambd", "and_e", "or_e"}
# Sobrescrevemos este método para habilitar formas especiais no transformer.
def _transform_tree(self, tree):
if tree.data in self.special:
children = tree.children
else:
children = list(self._transform_children(tree.children))
return self._call_userfunc(tree, children)
# A avaliação é feita pelo método eval.
def eval(self, obj):
"""
Força a avaliação de um nó da árvore sintática em uma forma especial.
"""
if isinstance(obj, Tree):
return self.transform(obj)
elif isinstance(obj, Token):
try:
return getattr(self, obj.type)(obj)
except AttributeError:
return obj
else:
return obj
# Lista de formas especiais
def and_e(self, x, y):
# Esta é a forma mais simples. Avaliamos explicitamente cada argumento.
# Note que "x and y" em Python avalia x e somente avalia y caso o primeiro
# argumento seja verdadeiro. Este é exatamente o comportamento desejado.
return self.eval(x) and self.eval(y)
def or_e(self, x, y):
raise NotImplementedError("or_e")
def if_(self, cond, then, else_=None):
raise NotImplementedError("if")
def while_(self, cond, block):
raise NotImplementedError("while")
def for_(self, id, expr, block):
raise NotImplementedError("for")
def fn(self, name, args, block):
# Dica: reaproveite a implementação de lambd
raise NotImplementedError("fn")
def lambd(self, args, block):
raise NotImplementedError("fn")
def eval(src):
"""
Avalia uma expressão ruspy.
>>> eval("1 + 1")
2
"""
return _eval_or_exec(src, is_exec=False)
def module(src) -> dict:
"""
Avalia um módulo ruspy e retorna um dicionário com as funções definidas
no módulo.
Você pode utilizar estas funções a partir de código Python.
>>> dic = module("fn incr(n: int) { n + 1 }")
>>> f = dic["incr"]
>>> f(1)
2
"""
return _eval_or_exec(src, is_exec=True)
def run(src):
"""
Avalia um módulo ruspy e executa automaticamente a função main.
>>> src = '''
... fn main() {
... print("hello world!")
... }
... '''
hello world!
"""
mod = module(src)
main = mod.get("main")
if not main:
raise RuntimeError('módulo não define uma função "main()"')
main()
def _eval_or_exec(src: str, is_exec=False) -> Any:
# Função utilizada internamente por eval/module/run.
if is_exec:
grammar = grammar_mod
else:
grammar = grammar_expr
try:
tree = grammar.parse(src)
except LarkError:
print(f"Erro avaliando a expressão: \n{src}")
print("\nImprimindo tokens")
for i, tk in enumerate(grammar.lex(src), start=1):
print(f" - {i}) {tk} ({tk.type})")
raise
transformer = RuspyTransformer()
result = transformer.transform(tree)
if isinstance(result, Tree):
print(tree.pretty())
bads = [*tree.find_pred(lambda x: not hasattr(transformer, x.data))]
bad = bads[0] if bads else tree
raise NotImplementedError(
f"""
não implementou regra para lidar com: {tree.data!r}.
Crie um método como abaixo na classe do transformer.
def {bad.data}(self, ...):
return ...
"""
)
return result
# Interface de linha de comando. Lê um arquivo ruspy e passa para a função
# eval ou equivalente. Você pode modificar o conteúdo dentro do "if" para
# executar outros códigos de teste quando for rodar o arquivo. O exemplo abaixo
# fornece uma interface de linha de comando minimamente decente para interagir
# com o ruspy.
if __name__ == "__main__":
if "--help" in sys.argv:
print("Digite python ruspy.py [ARQUIVO] [--script]")
print("")
print("Opções:")
print(" --help:")
print(" mostra mensagem de ajuda")
print(" --script:")
print(" avalia como expressão no modo script, como se")
print(" estivéssemos executando o código dentro da função main()")
exit()
elif "--script" in sys.argv:
do_eval = True
del sys.argv[sys.argv.index("--script")]
else:
do_eval = False
with open(sys.argv[-1]) as fd:
src = fd.read()
if do_eval:
print(f"\n> {eval(src)}")
else:
run(src)
``` |
{
"source": "jongbatax/ttkbootstrap",
"score": 3
} |
#### File: ttkbootstrap/gallery/media_player.py
```python
import tkinter
from tkinter import ttk
from ttkbootstrap import Style
class Application(tkinter.Tk):
def __init__(self):
super().__init__()
self.title('Media Player')
self.style = Style()
self.style.theme_use('minty')
self.player = Player(self)
self.player.pack(fill='both', expand='yes')
self.style.configure('TButton', font='Helvetica 20')
self.style.configure('header.TLabel', background=self.style.colors.border, padding=10)
class Player(ttk.Frame):
"""
An interface for a media player
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.configure(padding=1)
self.background = tkinter.PhotoImage(file='assets/mp_background.png')
self.controls = {
'skip-previous': '\u23EE',
'play': '\u23F5',
'pause': '\u23F8',
'stop': '\u23F9',
'skip-next': '\u23ED',
'open-file': '\U0001f4c2'}
# track information header
self.track_info = tkinter.StringVar(value='Open a file to begin playback')
header = ttk.Label(self, textvariable=self.track_info, font='Helvetica 12', style='header.TLabel')
header.pack(fill='x', padx=2)
# media container
self.container = ttk.Label(self, image=self.background)
self.container.pack(fill='both', expand='yes')
# progress bar
progress_frame = ttk.Frame(self, padding=10)
progress_frame.pack(fill='x', expand='yes')
self.time_elapsed = ttk.Label(progress_frame, text='00:00', font='Helvetica 12')
self.time_elapsed.pack(side='left')
self.time_scale = ttk.Scale(progress_frame, orient='horizontal', style='info.Horizontal.TScale')
self.time_scale.pack(side='left', fill='x', expand='yes', padx=10)
self.time_remaining = ttk.Label(progress_frame, text='00:00', font='Helvetica 12')
self.time_remaining.pack(side='right')
# button controls
control_frame = ttk.Frame(self)
control_frame.pack(fill='x', expand='yes')
self.buttons = {
'play': ttk.Button(control_frame, text=self.controls['play']),
'skip-previous': ttk.Button(control_frame, text=self.controls['skip-previous']),
'skip-next': ttk.Button(control_frame, text=self.controls['skip-next']),
'pause': ttk.Button(control_frame, text=self.controls['pause']),
'stop': ttk.Button(control_frame, text=self.controls['stop']),
'open-file': ttk.Button(control_frame, text=self.controls['open-file'], style='secondary.TButton')}
for button in ['skip-previous', 'play', 'skip-next', 'pause', 'stop', 'open-file']:
self.buttons[button].pack(side='left', fill='x', expand='yes', ipadx=5, ipady=5, padx=2, pady=2)
if __name__ == '__main__':
Application().mainloop()
```
#### File: ttkbootstrap/gallery/pc_cleaner.py
```python
import tkinter
from tkinter import ttk
from ttkbootstrap import Style
class Application(tkinter.Tk):
def __init__(self):
super().__init__()
self.title('PC Cleaner')
self.style = Style('pulse')
self.cleaner = Cleaner(self)
self.cleaner.pack(fill='both', expand='yes')
# custom styles
self.style.configure('header.TLabel', background=self.style.colors.secondary, foreground=self.style.colors.info)
# do not allow window resizing
self.resizable(False, False)
class Cleaner(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# application images
self.logo_img = tkinter.PhotoImage(name='logo', file='assets/icons8_broom_64px_1.png')
self.brush_img = tkinter.PhotoImage(name='cleaner', file='assets/icons8_broom_64px.png')
self.registry_img = tkinter.PhotoImage(name='registry', file='assets/icons8_registry_editor_64px.png')
self.tools_img = tkinter.PhotoImage(name='tools', file='assets/icons8_wrench_64px.png')
self.options_img = tkinter.PhotoImage(name='options', file='assets/icons8_settings_64px.png')
self.privacy_img = tkinter.PhotoImage(name='privacy', file='assets/icons8_spy_80px.png')
self.junk_img = tkinter.PhotoImage(name='junk', file='assets/icons8_trash_can_80px.png')
self.protect_img = tkinter.PhotoImage(name='protect', file='assets/icons8_protect_40px.png')
# header
header_frame = ttk.Frame(self, padding=20, style='secondary.TFrame')
header_frame.grid(row=0, column=0, columnspan=3, sticky='ew')
ttk.Label(header_frame, image='logo', style='header.TLabel').pack(side='left')
logo_text = ttk.Label(header_frame, text='pc cleaner', font=('TkDefaultFixed', 30), style='header.TLabel')
logo_text.pack(side='left', padx=10)
# action buttons
action_frame = ttk.Frame(self)
action_frame.grid(row=1, column=0, sticky='nsew')
cleaner_btn = ttk.Button(action_frame, image='cleaner', text='cleaner', compound='top', style='info.TButton')
cleaner_btn.pack(side='top', fill='both', ipadx=10, ipady=10)
registry_btn = ttk.Button(action_frame, image='registry', text='registry', compound='top', style='info.TButton')
registry_btn.pack(side='top', fill='both', ipadx=10, ipady=10)
tools_btn = ttk.Button(action_frame, image='tools', text='tools', compound='top', style='info.TButton')
tools_btn.pack(side='top', fill='both', ipadx=10, ipady=10)
options_btn = ttk.Button(action_frame, image='options', text='options', compound='top', style='info.TButton')
options_btn.pack(side='top', fill='both', ipadx=10, ipady=10)
# option notebook
notebook = ttk.Notebook(self)
notebook.grid(row=1, column=1, sticky='nsew', pady=(25, 0))
## windows tab
windows_tab = ttk.Frame(notebook, padding=10)
wt_scrollbar = tkinter.Scrollbar(windows_tab)
wt_scrollbar.pack(side='right', fill='y')
wt_canvas = tkinter.Canvas(windows_tab, border=0, highlightthickness=0, yscrollcommand=wt_scrollbar.set)
wt_canvas.pack(side='left', fill='both')
### adjust the scrollregion when the size of the canvas changes
wt_canvas.bind('<Configure>', lambda e: wt_canvas.configure(scrollregion=wt_canvas.bbox('all')))
wt_scrollbar.configure(command=wt_canvas.yview)
scroll_frame = ttk.Frame(wt_canvas)
wt_canvas.create_window((0, 0), window=scroll_frame, anchor='nw')
radio_options = [
'Internet Cache', 'Internet History', 'Cookies', 'Download History', 'Last Download Location',
'Session', 'Set Aside Tabs', 'Recently Typed URLs', 'Saved Form Information', 'Saved Password']
edge = ttk.Labelframe(scroll_frame, text='Microsoft Edge', padding=(20, 5))
edge.pack(fill='both')
explorer = ttk.Labelframe(scroll_frame, text='Internet Explorer', padding=(20, 5))
explorer.pack(fill='both', pady=10)
### add radio buttons to each label frame section
for section in [edge, explorer]:
for opt in radio_options:
cb = ttk.Checkbutton(section, text=opt, state='normal')
cb.invoke()
cb.pack(side='top', pady=2, fill='x')
notebook.add(windows_tab, text='windows')
## empty tab for looks
notebook.add(ttk.Frame(notebook), text='applications')
# results frame
results_frame = ttk.Frame(self)
results_frame.grid(row=1, column=2, sticky='nsew')
## progressbar with text indicator
pb_frame = ttk.Frame(results_frame, padding=(0, 10, 10, 10))
pb_frame.pack(side='top', fill='x', expand='yes')
pb = ttk.Progressbar(pb_frame, style='success.Striped.Horizontal.TProgressbar', variable='progress')
pb.pack(side='left', fill='x', expand='yes', padx=(15, 10))
ttk.Label(pb_frame, text='%').pack(side='right')
ttk.Label(pb_frame, textvariable='progress').pack(side='right')
self.setvar('progress', 78)
## result cards
cards_frame = ttk.Frame(results_frame, name='cards-frame', style='secondary.TFrame')
cards_frame.pack(fill='both', expand='yes')
### privacy card
priv_card = ttk.Frame(cards_frame, padding=1, style='secondary.TButton')
priv_card.pack(side='left', fill='both', padx=(10, 5), pady=10)
priv_container = ttk.Frame(priv_card, padding=40)
priv_container.pack(fill='both', expand='yes')
priv_lbl = ttk.Label(priv_container, image='privacy', text='PRIVACY', compound='top', anchor='center')
priv_lbl.pack(fill='both', padx=20, pady=(40, 0))
ttk.Label(priv_container, textvariable='priv_lbl', style='primary.TLabel').pack(pady=(0, 20))
self.setvar('priv_lbl', '6025 tracking file(s) removed')
### junk card
junk_card = ttk.Frame(cards_frame, padding=1, style='secondary.TButton')
junk_card.pack(side='left', fill='both', padx=(5, 10), pady=10)
junk_container = ttk.Frame(junk_card, padding=40)
junk_container.pack(fill='both', expand='yes')
junk_lbl = ttk.Label(junk_container, image='junk', text='PRIVACY', compound='top', anchor='center')
junk_lbl.pack(fill='both', padx=20, pady=(40, 0))
ttk.Label(junk_container, textvariable='junk_lbl', style='primary.TLabel', justify='center').pack(pady=(0, 20))
self.setvar('junk_lbl', '1,150 MB of unneccesary file(s)\nremoved')
## user notification
note_frame = ttk.Frame(results_frame, style='secondary.TFrame', padding=40)
note_frame.pack(fill='both')
note_msg = ttk.Label(note_frame, text='We recommend that you better protect your data', anchor='center',
style='header.TLabel', font=('Helvetica', 12, 'italic'))
note_msg.pack(fill='both')
if __name__ == '__main__':
Application().mainloop()
```
#### File: ttkbootstrap/gallery/simple_data_entry.py
```python
import tkinter
from tkinter import ttk
from ttkbootstrap import Style
class Application(tkinter.Tk):
def __init__(self):
super().__init__()
self.title('Simple data entry form')
self.style = Style('darkly')
self.form = EntryForm(self)
self.form.pack(fill='both', expand='yes')
class EntryForm(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.configure(padding=(20, 10))
self.columnconfigure(2, weight=1)
# form variables
self.name = tkinter.StringVar(value='', name='name')
self.address = tkinter.StringVar(value='', name='address')
self.phone = tkinter.StringVar(value='', name='phone')
# form headers
ttk.Label(self, text='Please enter your contact information', width=60).grid(columnspan=3, pady=10)
# create label/entry rows
for i, label in enumerate(['name', 'address', 'phone']):
ttk.Label(self, text=label.title()).grid(row=i + 1, column=0, sticky='ew', pady=10, padx=(0, 10))
ttk.Entry(self, textvariable=label).grid(row=i + 1, column=1, columnspan=2, sticky='ew')
# submit button
self.submit = ttk.Button(self, text='Submit', style='success.TButton', command=self.print_form_data)
self.submit.grid(row=4, column=0, sticky='ew', pady=10, padx=(0, 10))
# cancel button
self.cancel = ttk.Button(self, text='Cancel', style='danger.TButton', command=self.quit)
self.cancel.grid(row=4, column=1, sticky='ew')
def print_form_data(self):
print(self.name.get(), self.address.get(), self.phone.get())
if __name__ == '__main__':
Application().mainloop()
```
#### File: ttkbootstrap/gallery/timer_widget.py
```python
import tkinter
from tkinter import ttk
from ttkbootstrap import Style
class Application(tkinter.Tk):
def __init__(self):
super().__init__()
self.title('Timer')
self.style = Style()
self.timer = TimerWidget(self)
self.timer.pack(fill='both', expand='yes')
class TimerWidget(ttk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# variables
self.running = tkinter.BooleanVar(value=False)
self.after_id = tkinter.StringVar()
self.time_elapsed = tkinter.IntVar()
self.time_text = tkinter.StringVar(value='00:00:00')
# timer label
self.timer_lbl = ttk.Label(self, font='-size 32', anchor='center', textvariable=self.time_text)
self.timer_lbl.pack(side='top', fill='x', padx=60, pady=20)
# control buttons
self.toggle_btn = ttk.Button(self, text='Start', width=10, style='info.TButton', command=self.toggle)
self.toggle_btn.pack(side='left', fill='x', expand='yes', padx=10, pady=10)
self.reset_btn = ttk.Button(self, text='Reset', width=10, style='success.TButton', command=self.reset)
self.reset_btn.pack(side='left', fill='x', expand='yes', pady=10)
self.quit_btn = ttk.Button(self, text='Quit', width=10, style='danger.TButton', command=self.quit)
self.quit_btn.pack(side='left', fill='x', expand='yes', padx=10, pady=10)
def toggle(self):
if self.running.get():
self.pause()
self.running.set(False)
self.toggle_btn.configure(text='Start', style='info.TButton')
else:
self.start()
self.running.set(True)
self.toggle_btn.configure(text='Pause', style='info.Outline.TButton')
def pause(self):
self.after_cancel(self.after_id.get())
def start(self):
self.after_id.set(self.after(1, self.increment))
def increment(self):
current = self.time_elapsed.get() + 1
self.time_elapsed.set(current)
time_str = '{:02d}:{:02d}:{:02d}'.format((current // 100) // 60, (current // 100) % 60, current % 100)
self.time_text.set(time_str)
self.after_id.set(self.after(100, self.increment))
def reset(self):
self.time_elapsed.set(0)
self.time_text.set('00:00:00')
if __name__ == '__main__':
Application().mainloop()
``` |
{
"source": "JongbinRyu/Ajou_Challenge",
"score": 3
} |
#### File: Ajou_Challenge/ReaderBoard/Score.py
```python
import os
import json
import datetime
from pytz import timezone, utc
def update_total_score(name_list_dict, score_rules, now_kst_aware, penalty_const=.1):
"""
Update Total Score when scheduled day written in "ScoreRule.json"
:param name_list_dict: This contains contestants score info loaded from "namelist.json"
:param score_rules: Score rules loaded from "ScoreRule.json"
:param now_kst_aware: Current Aware Time(UTC difference info stored) for Korea/Seoul(+9:00)
:return: None
"""
current_time = str(now_kst_aware)
name_list = name_list_dict['namelist']
# Read Score Rules and Calculate total score
for rule in score_rules:
date_rule = datetime.datetime.strptime(rule['date'], '%Y-%m-%d %H:%M:%S')
if now_kst_aware.month == date_rule.month and now_kst_aware.day == date_rule.day:
name_list_dict['total_score_update_time'] = current_time
print("Today is {} Update scheduled as {}".format(rule["var_name"], rule['date']))
# Todo: change 'avg_accuracy' to 'last_accuracy'
for info in name_list:
info[rule["var_name"]] = info['avg_accuracy']
for info in name_list:
total_score = 0
for rule in score_rules:
total_score += info[rule['var_name']] * rule['weight']
total_score -= info["penalty"] * penalty_const
info['total_score'] = round(total_score, 5)
``` |
{
"source": "jongcye/GeometricGAN",
"score": 3
} |
#### File: GeometricGAN/datasets/toy.py
```python
import torch
import torch.nn as nn
import math
from scipy.stats import multivariate_normal
import numpy as np
from torch.autograd import Variable
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
#import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# data generating function
# exp1: mixture of 4 gaussians
def exp1(num_data=1000):
if num_data % 4 != 0:
raise ValueError('num_data should be multiple of 4. num_data = {}'.format(num_data))
center = 8
sigma = 1 #math.sqrt(3)
# init data
d1x = torch.FloatTensor(num_data/4, 1)
d1y = torch.FloatTensor(num_data/4, 1)
d1x.normal_(center, sigma * 3)
d1y.normal_(center, sigma * 1)
d2x = torch.FloatTensor(num_data/4, 1)
d2y = torch.FloatTensor(num_data/4, 1)
d2x.normal_(-center, sigma * 1)
d2y.normal_(center, sigma * 3)
d3x = torch.FloatTensor(num_data/4, 1)
d3y = torch.FloatTensor(num_data/4, 1)
d3x.normal_(center, sigma * 3)
d3y.normal_(-center, sigma * 2)
d4x = torch.FloatTensor(num_data/4, 1)
d4y = torch.FloatTensor(num_data/4, 1)
d4x.normal_(-center, sigma * 2)
d4y.normal_(-center, sigma * 2)
d1 = torch.cat((d1x, d1y), 1)
d2 = torch.cat((d2x, d2y), 1)
d3 = torch.cat((d3x, d3y), 1)
d4 = torch.cat((d4x, d4y), 1)
d = torch.cat((d1, d2, d3, d4), 0)
# label
label = torch.IntTensor(num_data).zero_()
for i in range(4):
label[i*(num_data/4):(i+1)*(num_data/4)] = i
# shuffle
#shuffle = torch.randperm(d.size()[0])
#d = torch.index_select(d, 0, shuffle)
#label = torch.index_select(label, 0, shuffle)
# pdf
rv1 = multivariate_normal([ center, center], [[math.pow(sigma * 3, 2), 0.0], [0.0, math.pow(sigma * 1, 2)]])
rv2 = multivariate_normal([-center, center], [[math.pow(sigma * 1, 2), 0.0], [0.0, math.pow(sigma * 3, 2)]])
rv3 = multivariate_normal([ center, -center], [[math.pow(sigma * 3, 2), 0.0], [0.0, math.pow(sigma * 2, 2)]])
rv4 = multivariate_normal([-center, -center], [[math.pow(sigma * 2, 2), 0.0], [0.0, math.pow(sigma * 2, 2)]])
def pdf(x):
prob = 0.25 * rv1.pdf(x) + 0.25 * rv2.pdf(x) + 0.25 * rv3.pdf(x) + 0.25 * rv4.pdf(x)
return prob
def sumloglikelihood(x):
return np.sum(np.log((pdf(x) + 1e-10)))
return d, label, sumloglikelihood
# exp2: two spirals
def exp2(num_data=1000):
'''
This function is borrowed from http://stackoverflow.com/questions/16146599/create-artificial-data-in-matlab
'''
degrees = 450 #570
start = 90
#noise = 0 #0.2
deg2rad = (2*math.pi)/360
radius = 1.8
start = start * deg2rad;
N_mixtures = 100
N = 2 * N_mixtures
N1 = N_mixtures #math.floor(N/2)
N2 = N_mixtures #N-N1
if num_data % N_mixtures != 0:
raise ValueError('num_data should be multiple of {} (num_data = {})'.format(2*N_mixtures, num_data))
n = (start +
torch.sqrt(torch.linspace(0.075,1,N2).view(N2,1)).mul_(degrees)
).mul_(deg2rad)
mu1 = torch.cat((torch.mul(-torch.cos(n), n).mul_(radius),
torch.mul(torch.sin(n), n).mul_(radius)), 1)
n = (start +
torch.sqrt(torch.linspace(0.075,1,N1).view(N1,1)).mul_(degrees)
).mul_(deg2rad)
mu2 = torch.cat((torch.mul(torch.cos(n), n).mul_(radius),
torch.mul(-torch.sin(n), n).mul_(radius)), 1)
mu = torch.cat((mu1, mu2), 0)
num_data_per_mixture = num_data / (2*N_mixtures)
sigma = math.sqrt(0.6)
x = torch.zeros(num_data, 2)
for i in range(2*N_mixtures):
xx = x[i*num_data_per_mixture:(i+1)*num_data_per_mixture, :]
xx.copy_(torch.cat(
(torch.FloatTensor(num_data_per_mixture).normal_(mu[i,0], sigma).view(num_data_per_mixture, 1),
torch.FloatTensor(num_data_per_mixture).normal_(mu[i,1], sigma).view(num_data_per_mixture, 1)), 1))
# label
label = torch.IntTensor(num_data).zero_()
label[0:num_data/2] = 0
label[num_data/2:] = 1
# shuffle
#shuffle = torch.randperm(x.size()[0])
#x = torch.index_select(x, 0, shuffle)
#label = torch.index_select(label, 0, shuffle)
# pdf
rv_list = []
for i in range(2 * N_mixtures):
rv = multivariate_normal([mu[i,0], mu[i,1]], [[math.pow(sigma, 2), 0.0], [0.0, math.pow(sigma, 2)]])
rv_list.append(rv)
def pdf(x):
prob = 1 / (2*N_mixtures) * rv_list[0].pdf(x)
for i in range(1, 2 * N_mixtures):
prob += (1.0 / float(2*N_mixtures)) * rv_list[i].pdf(x)
return prob
def sumloglikelihood(x):
return np.sum(np.log((pdf(x) + 1e-10)))
return x, label, sumloglikelihood
# exp3: mixture of 2 gaussians with high bias
def exp3(num_data=1000):
if num_data < 2:
raise ValueError('num_data should be larger than 2. (num_data = {})'.format(num_data))
center = 6.2
sigma = 1 #math.sqrt(3)
n1 = int(round(num_data * 0.9))
n2 = num_data - n1
# init data
d1x = torch.FloatTensor(n1, 1)
d1y = torch.FloatTensor(n1, 1)
d1x.normal_(center, sigma * 5)
d1y.normal_(center, sigma * 5)
d2x = torch.FloatTensor(n2, 1)
d2y = torch.FloatTensor(n2, 1)
d2x.normal_(-center, sigma * 1)
d2y.normal_(-center, sigma * 1)
d1 = torch.cat((d1x, d1y), 1)
d2 = torch.cat((d2x, d2y), 1)
d = torch.cat((d1, d2), 0)
# label
label = torch.IntTensor(num_data).zero_()
label[0:n1] = 0
label[n1:] = 1
# shuffle
#shuffle = torch.randperm(d.size()[0])
#d = torch.index_select(d, 0, shuffle)
#label = torch.index_select(label, 0, shuffle)
# pdf
rv1 = multivariate_normal([ center, center], [[math.pow(sigma * 5, 2), 0.0], [0.0, math.pow(sigma * 5, 2)]])
rv2 = multivariate_normal([-center, -center], [[math.pow(sigma * 1, 2), 0.0], [0.0, math.pow(sigma * 1, 2)]])
def pdf(x):
prob = (float(n1) / float(num_data)) * rv1.pdf(x) + (float(n2) / float(num_data)) * rv2.pdf(x)
return prob
def sumloglikelihood(x):
return np.sum(np.log((pdf(x) + 1e-10)))
return d, label, sumloglikelihood
# exp4: grid shapes
def exp4(num_data=1000):
var = 0.1
max_x = 21
max_y = 21
min_x = -max_x
min_y = -max_y
n = 5
# init
nx, ny = (n, n)
x = np.linspace(min_x, max_x, nx)
y = np.linspace(min_y, max_y, ny)
xv, yv = np.meshgrid(x, y)
N = xv.size
if num_data % N != 0:
raise ValueError('num_data should be multiple of {} (num_data = {})'.format(N, num_data))
# data and label
mu = np.concatenate((xv.reshape(N,1), yv.reshape(N,1)), axis=1)
mu = torch.FloatTensor(mu)
num_data_per_mixture = num_data / N
sigma = math.sqrt(var)
x = torch.zeros(num_data, 2)
label = torch.IntTensor(num_data).zero_()
for i in range(N):
xx = x[i*num_data_per_mixture:(i+1)*num_data_per_mixture, :]
xx.copy_(torch.cat(
(torch.FloatTensor(num_data_per_mixture).normal_(mu[i,0], sigma).view(num_data_per_mixture, 1),
torch.FloatTensor(num_data_per_mixture).normal_(mu[i,1], sigma).view(num_data_per_mixture, 1)), 1))
label[i*num_data_per_mixture:(i+1)*num_data_per_mixture] = i
# shuffle
#shuffle = torch.randperm(x.size()[0])
#x = torch.index_select(x, 0, shuffle)
#label = torch.index_select(label, 0, shuffle)
# pdf
rv_list = []
for i in range(N):
rv = multivariate_normal([mu[i,0], mu[i,1]], [[math.pow(sigma, 2), 0.0], [0.0, math.pow(sigma, 2)]])
rv_list.append(rv)
def pdf(x):
prob = 1 / (N) * rv_list[0].pdf(x)
for i in range(1, N):
prob += (1.0 / float(N)) * rv_list[i].pdf(x)
return prob
def sumloglikelihood(x):
return np.sum(np.log((pdf(x) + 1e-10)))
return x, label, sumloglikelihood
# exp5: mixture of 2 gaussians with high bias
def exp5(num_data=1000):
if num_data < 2:
raise ValueError('num_data should be larger than 2. (num_data = {})'.format(num_data))
center = -5
sigma_x = 0.5
sigma_y = 7
n1 = num_data
# init data
d1x = torch.FloatTensor(n1, 1)
d1y = torch.FloatTensor(n1, 1)
d1x.normal_(center, sigma_x)
d1y.normal_(center, sigma_y)
d1 = torch.cat((d1x, d1y), 1)
d = d1
# label
label = torch.IntTensor(num_data).zero_()
label[:] = 0
# shuffle
#shuffle = torch.randperm(d.size()[0])
#d = torch.index_select(d, 0, shuffle)
#label = torch.index_select(label, 0, shuffle)
# pdf
rv1 = multivariate_normal([ center, center], [[math.pow(sigma_x, 2), 0.0], [0.0, math.pow(sigma_y, 2)]])
def pdf(x):
prob = (float(n1) / float(num_data)) * rv1.pdf(x)
return prob
def sumloglikelihood(x):
return np.sum(np.log((pdf(x) + 1e-10)))
return d, label, sumloglikelihood
# exp6: mixture of 2 gaussians with high bias
def exp6(num_data=1000):
if num_data < 2:
raise ValueError('num_data should be larger than 2. (num_data = {})'.format(num_data))
center = -5
sigma_x = 7
sigma_y = 7
n1 = num_data
# init data
d1x = torch.FloatTensor(n1, 1)
d1y = torch.FloatTensor(n1, 1)
d1x.normal_(center, sigma_x)
d1y.normal_(center, sigma_y)
d1 = torch.cat((d1x, d1y), 1)
d = d1
# label
label = torch.IntTensor(num_data).zero_()
label[:] = 0
# shuffle
#shuffle = torch.randperm(d.size()[0])
#d = torch.index_select(d, 0, shuffle)
#label = torch.index_select(label, 0, shuffle)
# pdf
rv1 = multivariate_normal([ center, center], [[math.pow(sigma_x, 2), 0.0], [0.0, math.pow(sigma_y, 2)]])
def pdf(x):
prob = (float(n1) / float(num_data)) * rv1.pdf(x)
return prob
def sumloglikelihood(x):
return np.sum(np.log((pdf(x) + 1e-10)))
return d, label, sumloglikelihood
def exp(exp_num='toy1', num_data=1000):
if exp_num == 'toy1':
return exp1(num_data)
elif exp_num == 'toy2':
return exp2(num_data)
elif exp_num == 'toy3':
return exp3(num_data)
elif exp_num == 'toy4':
return exp4(num_data)
elif exp_num == 'toy5':
return exp5(num_data)
elif exp_num == 'toy6':
return exp6(num_data)
else:
raise ValueError('unknown experiment {}'.format(exp_num))
def save_image_fake(fake_data, filename):
#import warnings
#warnings.filterwarnings("ignore", category=FutureWarning)
#import numpy as np
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
fig, ax = plt.subplots()
#plt.scatter(real_data[:,0], real_data[:,1], color='blue', label='real')
plt.scatter(fake_data[:,0], fake_data[:,1], color='red', label='fake')
plt.axis('equal')
#plt.legend(loc='upper right', fancybox=True, shadow=True, fontsize=11)
plt.grid(True)
plt.xlim(-25, 25)
plt.ylim(-25, 25)
plt.minorticks_on()
plt.xlabel('x', fontsize=14, color='black')
plt.ylabel('y', fontsize=14, color='black')
#plt.title('Toy dataset')
plt.savefig(filename)
plt.close()
def save_image_real(real_data, filename):
#import warnings
#warnings.filterwarnings("ignore", category=FutureWarning)
#import numpy as np
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.scatter(real_data[:,0], real_data[:,1], color='blue', label='real')
#plt.scatter(fake_data[:,0], fake_data[:,1], color='red', label='fake')
plt.axis('equal')
#plt.legend(loc='upper right', fancybox=True, shadow=True, fontsize=11)
plt.grid(True)
plt.xlim(-25, 25)
plt.ylim(-25, 25)
plt.minorticks_on()
plt.xlabel('x', fontsize=14, color='black')
plt.ylabel('y', fontsize=14, color='black')
#plt.title('Toy dataset')
plt.savefig(filename)
plt.close()
def save_image(real_data, fake_data, filename):
#import warnings
#warnings.filterwarnings("ignore", category=FutureWarning)
#import numpy as np
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.scatter(real_data[:,0], real_data[:,1], color='blue', label='real')
plt.scatter(fake_data[:,0], fake_data[:,1], color='red', label='fake')
#plt.axis('equal')
plt.legend(loc='upper right', fancybox=True, shadow=True, fontsize=11)
plt.grid(True)
plt.xlim(-25, 25)
plt.ylim(-25, 25)
plt.minorticks_on()
plt.xlabel('x', fontsize=14, color='black')
plt.ylabel('y', fontsize=14, color='black')
plt.title('Toy dataset')
plt.savefig(filename)
plt.close()
def save_contour(netD, filename, cuda=False):
#import warnings
#warnings.filterwarnings("ignore", category=FutureWarning)
#import numpy as np
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.cm as cm
#import matplotlib.mlab as mlab
#import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# gen grid
delta = 0.1
x = np.arange(-25.0, 25.0, delta)
y = np.arange(-25.0, 25.0, delta)
X, Y = np.meshgrid(x, y)
# convert numpy array to to torch variable
(h, w) = X.shape
XY = np.concatenate((X.reshape((h*w, 1, 1, 1)), Y.reshape((h*w, 1, 1, 1))), axis=1)
input = torch.Tensor(XY)
input = Variable(input)
if cuda:
input = input.cuda()
# forward
output = netD(input)
# convert torch variable to numpy array
Z = output.data.cpu().view(-1).numpy().reshape(h, w)
# plot and save
plt.figure()
CS1 = plt.contourf(X, Y, Z)
CS2 = plt.contour(X, Y, Z, alpha=.7, colors='k')
plt.clabel(CS2, inline=1, fontsize=10, colors='k')
plt.title('Simplest default with labels')
plt.savefig(filename)
plt.close()
'''
### test
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
num_data = 10000
exp_name = 'exp6'
if exp_name == 'exp1':
data, label, sumloglikelihood = exp1(num_data)
elif exp_name == 'exp2':
data, label, sumloglikelihood = exp2(num_data)
elif exp_name == 'exp3':
data, label, sumloglikelihood = exp3(num_data)
elif exp_name == 'exp4':
data, label, sumloglikelihood = exp4(num_data)
elif exp_name == 'exp5':
data, label, sumloglikelihood = exp5(num_data)
elif exp_name == 'exp6':
data, label, sumloglikelihood = exp6(num_data)
else:
raise ValueError('known exp: {}'.format(exp_name))
data = data.numpy()
label = label.numpy()
colors = ['red','purple','green','blue']
#print(data)
#print(data.shape)
#print(label)
#print(label.shape)
fig, ax = plt.subplots()
#plt.scatter(data[:,0], data[:,1], c=label, alpha=0.01, label=exp_name, cmap=matplotlib.colors.ListedColormap(colors))
plt.scatter(data[:,0], data[:,1], c=label, alpha=0.1, label=exp_name, cmap=matplotlib.colors.ListedColormap(colors))
plt.axis('equal')
plt.minorticks_on()
plt.grid(True)
plt.xlabel('x', fontsize=14, color='black')
plt.ylabel('y', fontsize=14, color='black')
plt.title('Toy dataset')
plt.savefig('toy.png')
'''
```
#### File: jongcye/GeometricGAN/main.py
```python
from __future__ import print_function
import argparse
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
import os
import time
from scipy.stats import multivariate_normal
import numpy as np
import models.dcgan as dcgan
import models.mlp as mlp
import models.toy as toy
import models.toy4 as toy4
import losses.SumLoss as sumloss
import losses.HingeLoss as hingeloss
import losses.LeakyHingeLoss as leakyhingeloss
import losses.BCELoss as bceloss
import utils.plot as plt
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw | toy1~toy4')
parent_parser.add_argument('--dataroot', required=True, help='path to dataset')
parent_parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parent_parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parent_parser.add_argument('--loadSize', type=int, default=64, help='the height / width of the input image (it will be croppred)')
parent_parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')
parent_parser.add_argument('--nc', type=int, default=3, help='number of channels in input (image)')
parent_parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parent_parser.add_argument('--ngf', type=int, default=64)
parent_parser.add_argument('--ndf', type=int, default=64)
parent_parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parent_parser.add_argument('--nsave', type=int, default=1, help='number of epochs to save models')
parent_parser.add_argument('--lrD', type=float, default=0.00005, help='learning rate for Critic, default=0.00005')
parent_parser.add_argument('--lrG', type=float, default=0.00005, help='learning rate for Generator, default=0.00005')
parent_parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parent_parser.add_argument('--weight_decay_D', type=float, default=0, help='weight_decay for discriminator. default=0')
parent_parser.add_argument('--weight_decay_G', type=float, default=0, help='weight_decay for generator. default=0')
parent_parser.add_argument('--cuda' , action='store_true', help='enables cuda')
parent_parser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use')
parent_parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parent_parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parent_parser.add_argument('--Diters', type=int, default=1, help='number of D iters per loop')
parent_parser.add_argument('--Giters', type=int, default=1, help='number of G iters per loop')
parent_parser.add_argument('--noBN', action='store_true', help='use batchnorm or not (only for DCGAN)')
parent_parser.add_argument('--model_G', default='dcgan', help='model for G: dcgan | mlp | toy')
parent_parser.add_argument('--model_D', default='dcgan', help='model for D: dcgan | mlp | toy')
parent_parser.add_argument('--n_extra_layers', type=int, default=0, help='Number of extra layers on gen and disc')
parent_parser.add_argument('--experiment', default=None, help='Where to store samples and models')
parent_parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
# arguments for weight clipping
parent_parser.add_argument('--wclip_lower', type=float, default=-0.01)
parent_parser.add_argument('--wclip_upper', type=float, default=0.01)
wclip_parser = parent_parser.add_mutually_exclusive_group(required=False)
wclip_parser.add_argument('--wclip', dest='wclip', action='store_true', help='flag for wclip. for wgan, it is required.')
wclip_parser.add_argument('--no-wclip', dest='wclip', action='store_false', help='flag for wclip. for wgan, it is required.')
parent_parser.set_defaults(wclip=False)
# arguments for weight projection
parent_parser.add_argument('--wproj_upper', type=float, default=1.0)
wproj_parser = parent_parser.add_mutually_exclusive_group(required=False)
wproj_parser.add_argument('--wproj', dest='wproj', action='store_true', help='flag for wproj. for wgan, it is required.')
wproj_parser.add_argument('--no-wproj', dest='wproj', action='store_false', help='flag for wproj. for wgan, it is required.')
parent_parser.set_defaults(wproj=False)
# display setting
display_parser = parent_parser.add_mutually_exclusive_group(required=False)
display_parser.add_argument('--display', dest='display', action='store_true', help='flag for display. for toy1~toy4, it should be off.')
display_parser.add_argument('--no-display', dest='display', action='store_false', help='flag for display. for toy1~toy4, it should be off.')
parent_parser.set_defaults(display=True)
parent_parser.add_argument('--ndisplay', type=int, default=500, help='number of epochs to display samples')
# arguments for training criterion
def add_criterion(mode_parser, parent_parser):
criterion_subparser = mode_parser.add_subparsers(title='criterion method: gan | wgan | geogan',
dest='criterion')
# wgan
wgan_parser = criterion_subparser.add_parser('wgan', help='train using WGAN',
parents=[parent_parser])
# meangan
meangan_parser = criterion_subparser.add_parser('meangan', help='train using mean matching GAN',
parents=[parent_parser])
# geogan
geogan_parser = criterion_subparser.add_parser('geogan', help='train using geoGAN',
parents=[parent_parser])
geogan_parser.add_argument('--C', type=float, default=1, help='tuning parapmeter C in 0.5 * ||w||^2 + C * hinge_loss(x)')
geogan_parser.add_argument('--margin', type=float, default=1, help='margin size in max(0, m - c * x), hinge loss, for generator loss')
gtrain_parser = geogan_parser.add_mutually_exclusive_group()
gtrain_parser.add_argument('--theory', action='store_const', dest='gtrain', const='theory',
help='For D, real_label = 1, fake_label = -1, and minimize svm primal loss. For G, fake_label = -1, and move perpendicular to hyperplane')
gtrain_parser.add_argument('--leaky', action='store_const', dest='gtrain', const='leaky',
help='For D, real_label = 1, fake_label = -1, and minimize svm primal loss. For G, fake_label = 1, and minize leaky svm primal loss with flipped labels.')
geogan_parser.set_defaults(gtrain='theory')
# ebgan
ebgan_parser = criterion_subparser.add_parser('ebgan', help='train using EBGAN',
parents=[parent_parser])
ebgan_parser.add_argument('--margin', type=float, default=1, help='slack margin constant in discriminator loss for fake data.')
# gan
gan_parser = criterion_subparser.add_parser('gan', help='train using GAN',
parents=[parent_parser])
gtrain_parser = gan_parser.add_mutually_exclusive_group()
gtrain_parser.add_argument('--theory', action='store_const', dest='gtrain', const='theory',
help='real_label = 1, fake_label = 0; thus, for D, min_D E_data[-log(D(x)] + E_gen[-log(1-D(G(z)))]. for G, min_G E_gen[log(1-D(G(z)))]')
gtrain_parser.add_argument('--practice', action='store_const', dest='gtrain', const='practice',
help='for D, min_D E_data[-log(D(x)] + E_gen[-log(1-D(G(z)))]. for G, min_G E_gen[-log(D(G(z)))]')
gtrain_parser.add_argument('--flip', action='store_const', dest='gtrain', const='flip',
help='real_label = 0, fake_label = 1.')
gan_parser.set_defaults(gtrain='practice')
# main parser and training mode
main_parser = argparse.ArgumentParser()
mode_subparsers = main_parser.add_subparsers(title='training mode: standard | bigan | ali',
dest='mode')
mode_standard_parser = mode_subparsers.add_parser('standard', help='train as standard implicit modeling')
add_criterion(mode_standard_parser, parent_parser)
#mode_bigan_parser = mode_subparsers.add_parser('bigan', help='train as BiGAN')
#add_criterion(mode_bigan_parser, parent_parser)
#mode_ali_parser = mode_subparsers.add_parser('ali', help='train as ALI')
#add_criterion(mode_ali_parser, parent_parser)
# parse arguments
opt = main_parser.parse_args()
print(opt)
# generate cache folder
os.system('mkdir samples')
if opt.experiment is None:
opt.experiment = 'samples/experiment'
os.system('mkdir -p {0}'.format(opt.experiment))
# set random seed
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
# apply cudnn option
cudnn.benchmark = True
# diagnose cuda option
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# load dataset
if opt.dataset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = dset.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Scale(opt.loadSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'lsun':
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'],
transform=transforms.Compose([
transforms.Scale(opt.loadSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'cifar10':
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'mnist':
dataset = dset.MNIST(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif 'toy' in opt.dataset: #opt.dataset in ['toy1', 'toy2', 'toy3', 'toy4', 'toy5', 'toy6']:
if opt.nc != 2:
raise ValueError('nc should be 2 for simulated dataset. (opt.nc = {})'.format(opt.nc))
import datasets.toy as tdset
num_data = 100000
data_tensor, target_tensor, x_sumloglikelihood = tdset.exp(opt.dataset, num_data)
data_tensor = data_tensor.view(num_data, 2, 1, 1).contiguous()
dataset = torch.utils.data.TensorDataset(data_tensor, target_tensor)
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
# init model parameters
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = opt.nc
n_extra_layers = int(opt.n_extra_layers)
# custum function for weight project in l2-norm unit ball
def weight_proj_l2norm(param):
norm = torch.norm(param.data, p=2) + 1e-8
coeff = min(opt.wproj_upper, 1.0/norm)
param.data.mul_(coeff)
# custom weights initialization called on netG and netD
def weights_init_dcgan(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def weights_init_mlp(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.01)
m.bias.data.fill_(0)
def weights_init_toy(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.01)
if m.bias:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.01)
m.bias.data.fill_(0)
# model initializaton: genterator
if opt.model_G == 'dcgan':
if opt.noBN:
netG = dcgan.DCGAN_G_nobn(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)
else:
netG = dcgan.DCGAN_G(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)
netG.apply(weights_init_dcgan)
elif opt.model_G == 'mlp':
netG = mlp.MLP_G(opt.imageSize, nz, nc, ngf, ngpu)
netG.apply(weights_init_mlp)
elif opt.model_G == 'toy':
netG = toy.MLP_G(1, nz, 2, ngf, ngpu)
netG.apply(weights_init_toy)
elif opt.model_G == 'toy4':
netG = toy4.MLP_G(1, nz, 2, ngf, ngpu)
netG.apply(weights_init_toy)
else:
raise ValueError('unkown model: {}'.format(opt.model_G))
if opt.netG != '': # load checkpoint if needed
netG.load_state_dict(torch.load(opt.netG))
print(netG)
# model initializaton: discriminator
if opt.model_D == 'dcgan':
netD = dcgan.DCGAN_D(opt.imageSize, nz, nc, ndf, ngpu, n_extra_layers)
netD.apply(weights_init_dcgan)
elif opt.model_D == 'mlp':
netD = mlp.MLP_D(opt.imageSize, nz, nc, ndf, ngpu)
netD.apply(weights_init_mlp)
elif opt.model_D == 'toy':
netD = toy.MLP_D(1, nz, 2, ndf, ngpu)
netD.apply(weights_init_toy)
elif opt.model_D == 'toy4':
netD = toy4.MLP_D(1, nz, 2, ndf, ngpu)
netD.apply(weights_init_toy)
else:
raise ValueError('unkown model: {}'.format(opt.model_D))
if opt.criterion == 'gan':
# add sigmoid activation function for gan
netD.main.add_module('sigmoid',
nn.Sigmoid())
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
# set type of adversarial training
if opt.criterion == 'gan':
criterion_R = nn.BCELoss()
criterion_F = nn.BCELoss()
if opt.gtrain == 'theory' or opt.gtrain == 'flip':
criterion_G = bceloss.BCELoss(-1)
else: #opt.gtrain == 'practice':
criterion_G = nn.BCELoss()
elif opt.criterion == 'wgan' or opt.criterion == 'meangan':
criterion_R = sumloss.SumLoss()
criterion_F = sumloss.SumLoss(-1)
criterion_G = sumloss.SumLoss()
elif opt.criterion == 'geogan':
criterion_R = hingeloss.HingeLoss()
criterion_F = hingeloss.HingeLoss()
if opt.gtrain == 'theory':
criterion_G = sumloss.SumLoss(sign=-1.0)
elif opt.gtrain == 'leaky':
criterion_G = leakyhingeloss.LeakyHingeLoss(margin=opt.margin)
else:
raise NotImplementedError('unknown opt.gtrain: {}'.format(opt.gtrain))
elif opt.criterion == 'ebgan':
criterion_R = sumloss.SumLoss(sign=1.0)
criterion_F = hingeloss.HingeLoss(margin=opt.margin)
criterion_G = sumloss.SumLoss(sign=1.0)
else:
raise ValueError('unknown criterion: {}'.format(opt.criterion))
# init variables
input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
if opt.criterion == 'gan' and opt.gtrain == 'theory':
real_label = 1
fake_label = 0
gen_label = fake_label
elif opt.criterion == 'gan' and opt.gtrain == 'flip':
real_label = 0
fake_label = 1
gen_label = fake_label
elif opt.criterion == 'geogan' and opt.gtrain == 'theory':
real_label = 1
fake_label = -1
gen_label = fake_label
elif opt.criterion == 'geogan' and opt.gtrain == 'leaky':
real_label = 1
fake_label = -1
gen_label = real_label
elif opt.criterion == 'ebgan':
real_label = -1
fake_label = 1
gen_label = fake_label
else: # opt.gtrain == 'practice'
real_label = 1
fake_label = 0
gen_label = real_label
# init cuda
if opt.cuda:
netD.cuda()
netG.cuda()
criterion_R.cuda()
criterion_F.cuda()
criterion_G.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
# convert to autograd variable
input = Variable(input)
label = Variable(label)
noise = Variable(noise)
fixed_noise = Variable(fixed_noise)
# setup optimizer
if opt.criterion == 'geogan':
paramsD = [
{'params': filter(lambda p: p.cls_weight, netD.parameters()), 'weight_decay': 1.0 / (float(opt.batchSize) * float(opt.C)) }, # assign weight decay for geogan to cls layer only
{'params': filter(lambda p: p.cls_bias, netD.parameters()) }, # no weight decay to the bias of cls layer
{'params': filter(lambda p: not p.cls, netD.parameters()), 'weight_decay': opt.weight_decay_D }
]
else:
paramsD = [
{'params': filter(lambda p: p.cls, netD.parameters()) }, # no weight decay to the bias of cls layer
{'params': filter(lambda p: not p.cls, netD.parameters()), 'weight_decay': opt.weight_decay_D }
]
#paramsD = [
# {'params': netD.parameters(), 'weight_decay': opt.weight_decay_D },
#]
if opt.adam:
optimizerD = optim.Adam(paramsD, lr=opt.lrD, betas=(opt.beta1, 0.999))#, weight_decay=opt.weight_decay_D)
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay_G)
else:
optimizerD = optim.RMSprop(paramsD, lr=opt.lrD)#, weight_decay=opt.weight_decay_D)
optimizerG = optim.RMSprop(netG.parameters(), lr = opt.lrG, weight_decay=opt.weight_decay_G)
# training
gen_iterations = 0
disc_iterations = 0
errM_print = -float('inf')
errM_real_print = -float('inf')
errM_fake_print = -float('inf')
for epoch in range(opt.niter):
data_iter = iter(dataloader)
i = 0
while i < len(dataloader):
tm_start = time.time()
############################
# (1) Update D network
############################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for p in netG.parameters():
p.requires_grad = False # to avoid computation
# train the discriminator Diters times
if opt.wclip and (gen_iterations < 25 or gen_iterations % 500 == 0):
Diters = 100
else:
Diters = opt.Diters
j = 0
while j < Diters and i < len(dataloader):
j += 1
disc_iterations += 1
##### weight clipping
# wclip parameters to a cube
if opt.wclip:
for p in netD.parameters():
if not p.cls:# or opt.criterion != 'geogan':
p.data.clamp_(opt.wclip_lower, opt.wclip_upper)
# wclip parameters to a cube for the last linear layer of disc if opt.criterion == 'wgan'
if opt.criterion == 'wgan':
for p in netD.parameters():
if p.cls:
p.data.clamp_(opt.wclip_lower, opt.wclip_upper)
##### weight projection
# weight projection to a cube for parameters
if opt.wproj:
for p in netD.parameters():
if not p.cls:# or opt.criterion != 'geogan':
weight_proj_l2norm(p)
# wproj parameters to a cube for the last linear layer of disc if opt.criterion == 'meangan'
if opt.criterion == 'meangan':
for p in netD.parameters():
if p.cls:
weight_proj_l2norm(p)
data_tm_start = time.time()
data = data_iter.next()
data_tm_end = time.time()
i += 1
# train with real
real_cpu, _ = data
netD.zero_grad()
batch_size = real_cpu.size(0)
input.data.resize_(real_cpu.size()).copy_(real_cpu)
label.data.resize_(batch_size).fill_(real_label)
outD_real = netD(input)
errD_real = criterion_R(outD_real, label)
errD_real.backward()
# train with fake
noise.data.resize_(batch_size, nz, 1, 1)
noise.data.normal_(0, 1)
fake = netG(noise)
label.data.fill_(fake_label)
input.data.copy_(fake.data)
outD_fake = netD(input)
errD_fake = criterion_F(outD_fake, label)
errD_fake.backward()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network
############################
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for p in netG.parameters():
p.requires_grad = True # reset requires_grad
j = 0
while j < opt.Giters:
j += 1
gen_iterations += 1
netG.zero_grad()
# in case our last batch was the tail batch of the dataloader,
# make sure we feed a full batch of noise
label.data.resize_(opt.batchSize).fill_(gen_label)
noise.data.resize_(opt.batchSize, nz, 1, 1)
noise.data.normal_(0, 1)
# forward G
fake = netG(noise)
# forward D (backward from D)
outG = netD(fake)
errG = criterion_G(outG, label)
errG.backward()
# update G
optimizerG.step()
############################
# Display results
############################
if opt.display and (gen_iterations % opt.ndisplay == 0):
if 'toy' in opt.dataset:
fake = netG(fixed_noise)
tdset.save_image(real_cpu.view(-1,2).numpy(),
fake.data.cpu().view(-1,2).numpy(),
'{0}/real_fake_samples_{1}.png'.format(opt.experiment, gen_iterations))
#tdset.save_contour(netD,
# '{0}/disc_contour_{1}.png'.format(opt.experiment, gen_iterations),
# cuda=opt.cuda)
else:
vutils.save_image(real_cpu, '{0}/real_samples.png'.format(opt.experiment), normalize=True)
fake = netG(fixed_noise)
vutils.save_image(fake.data, '{0}/fake_samples_{1}.png'.format(opt.experiment, gen_iterations), normalize=True)
tm_end = time.time()
if 'toy' in opt.dataset:
print('Epoch: [%d][%d/%d][%d]\t Time: %.3f DataTime: %.3f Loss_G: %f Loss_D: %f Loss_D_real: %f Loss_D_fake: %f x_real_sll: %f x_fake_sll: %f'
% (epoch, i, len(dataloader), gen_iterations,
tm_end-tm_start, data_tm_end-data_tm_start,
errG.data[0], errD.data[0], errD_real.data[0], errD_fake.data[0],
x_sumloglikelihood(real_cpu.view(-1,2).numpy()), x_sumloglikelihood(fake.data.cpu().view(-1,2).numpy())))
else:
print('Epoch: [%d][%d/%d][%d]\t Time: %.3f DataTime: %.3f Loss_G: %f Loss_D: %f Loss_D_real: %f Loss_D_fake: %f'
% (epoch, i, len(dataloader), gen_iterations,
tm_end-tm_start, data_tm_end-data_tm_start,
errG.data[0], errD.data[0], errD_real.data[0], errD_fake.data[0]))
############################
# Detect errors
############################
if np.isnan(errG.data[0]) or np.isnan(errD.data[0]) or np.isnan(errD_real.data[0]) or np.isnan(errD_fake.data[0]):
raise ValueError('nan detected.')
if np.isinf(errG.data[0]) or np.isinf(errD.data[0]) or np.isinf(errD_real.data[0]) or np.isinf(errD_fake.data[0]):
raise ValueError('inf detected.')
# do checkpointing
if (epoch+1) % opt.nsave == 0:
torch.save(netG.state_dict(), '{0}/netG_epoch_{1}.pth'.format(opt.experiment, epoch))
torch.save(optimizerG.state_dict(), '{0}/optG_epoch_{1}.pth'.format(opt.experiment, epoch))
torch.save(netD.state_dict(), '{0}/netD_epoch_{1}.pth'.format(opt.experiment, epoch))
torch.save(optimizerD.state_dict(), '{0}/optD_epoch_{1}.pth'.format(opt.experiment, epoch))
```
#### File: jongcye/GeometricGAN/plot_log.py
```python
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from parse import *
import progressbar
import math
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import pickle
import os.path
import scipy
import scipy.signal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("output_prefix", help="output prefix. output images will be <output prefix>_disc_loss.png, <output prefix>_real_loss.png, <output prefix>_fake_loss.png, <output prefix>_gen_loss.png")
parser.add_argument("-d", "--data", nargs=2, action='append',
help="<label> <log_filename> pairs. multiple data are available. if it is the case, all the logs will be drawed in each corresponding plot (disc, real, fake, gen)")
parser.add_argument("-m", "--med", help="median filter size",
type=int,
default=101)
args = parser.parse_args()
def parse_logs(log_path):
# Open log_path
with open(log_path, 'rt') as f:
lines = f.readlines()
num_data = len(lines)-1
# Init necessary variables
daxis = np.zeros(num_data)
gaxis = np.zeros(num_data)
real_loss = np.zeros(num_data)
fake_loss = np.zeros(num_data)
disc_loss = np.zeros(num_data)
gen_loss = np.zeros(num_data)
# Init bar and do parsing
print "progress: "
bar = progressbar.ProgressBar(maxval=num_data, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
for i in xrange(num_data):
tokens = lines[i].split()
disc_loss[i] = float(tokens[9])
real_loss[i] = float(tokens[11])
fake_loss[i] = float(tokens[13])
gen_loss[i] = float(tokens[7])
buffers = parse("[{}][{}/{}][{}]", tokens[1])
epoch = int(buffers[0])+1
cur_diter = int(buffers[1])
max_diter = int(buffers[2])
giter = int(buffers[3])
daxis[i] = (float(epoch)-1) + float(cur_diter)/float(max_diter)
gaxis[i] = giter
bar.update(i+1)
bar.finish()
return {'daxis':daxis, 'gaxis':gaxis,
'real':real_loss, 'fake':fake_loss , 'disc':disc_loss, 'gen':gen_loss }
###################################### process data
# init input arguments
num_files = len(args.data)
logs = []
output_prefix = args.output_prefix
# load logs
for i in range(0, num_files):
log_filename = args.data[i][1] #log_filenames[i]
log_path = log_filename
log_cache_path = '{}.{}'.format(log_path, 'pkl')
if not os.path.exists(log_cache_path):
print 'parse log (label: {})'.format(args.data[i][0])
logs.append(parse_logs(log_path))
pickle.dump(logs[i], open(log_cache_path , "wb"))
else:
logs.append(pickle.load(open(log_cache_path, "rb")))
###################################### plot gen loss
fig, ax = plt.subplots()
for i in range(0, num_files):
plt.plot(logs[i]['gaxis'], logs[i]['gen'], label=args.data[i][0])
plt.legend(loc='lower right', fancybox=True, shadow=True, fontsize=11)
plt.grid(True)
plt.minorticks_on()
plt.xlabel('generator iterations', fontsize=14, color='black')
plt.ylabel('gen loss', fontsize=14, color='black')
plt.title('Generator Loss')
plt.savefig('{}_gen_loss'.format(output_prefix))
###################################### plot real loss
fig, ax = plt.subplots()
for i in range(0, num_files):
plt.plot(logs[i]['gaxis'], logs[i]['real'], label=args.data[i][0])
plt.legend(loc='upper right', fancybox=True, shadow=True, fontsize=11)
plt.grid(True)
plt.minorticks_on()
plt.xlabel('generator iterations', fontsize=14, color='black')
plt.ylabel('real loss', fontsize=14, color='black')
plt.title('Real Loss')
plt.savefig('{}_real_loss'.format(output_prefix))
###################################### plot fake loss
fig, ax = plt.subplots()
for i in range(0, num_files):
plt.plot(logs[i]['gaxis'], logs[i]['fake'], label=args.data[i][0])
plt.legend(loc='upper right', fancybox=True, shadow=True, fontsize=11)
plt.grid(True)
plt.minorticks_on()
plt.xlabel('generator iterations', fontsize=14, color='black')
plt.ylabel('fake loss', fontsize=14, color='black')
plt.title('Fake Loss')
plt.savefig('{}_fake_loss'.format(output_prefix))
###################################### plot disc loss
fig, ax = plt.subplots()
for i in range(0, num_files):
plt.plot(logs[i]['gaxis'], logs[i]['disc'], label=args.data[i][0])
plt.legend(loc='upper right', fancybox=True, shadow=True, fontsize=11)
plt.grid(True)
plt.minorticks_on()
plt.xlabel('generator iterations', fontsize=14, color='black')
plt.ylabel('disc loss', fontsize=14, color='black')
plt.title('Discriminator Loss (real + fake)')
plt.savefig('{}_disc_loss'.format(output_prefix))
###################################### plot disc (medfilt) loss
fig, ax = plt.subplots()
for i in range(0, num_files):
med_filtered_loss = scipy.signal.medfilt(logs[i]['disc'], args.med)
plt.plot(logs[i]['gaxis'], med_filtered_loss, label=args.data[i][0])
plt.legend(loc='upper right', fancybox=True, shadow=True, fontsize=11)
plt.grid(True)
plt.minorticks_on()
plt.xlabel('generator iterations', fontsize=14, color='black')
plt.ylabel('disc loss', fontsize=14, color='black')
plt.title('Discriminator Loss (median filtered, size: {})'.format(args.med))
plt.savefig('{}_disc_medfilt_loss'.format(output_prefix))
print 'Done.'
``` |
{
"source": "jonge-democraten/dyonisos",
"score": 2
} |
#### File: dyonisos/events/tests.py
```python
from django.conf import settings
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from subscribe.models import Event
from subscribe.models import Registration
from subscribe.forms import SubscribeForm
class TestUserPages(TestCase):
"""Test case with client for pages for normal user"""
fixtures = ['test_data.json']
def setUp(self):
self.client = Client()
def test_view_homepage(self):
response = self.client.get(reverse('homepage'))
self.assertEqual(response.status_code, 200)
def test_view_subscribe_page(self):
events = Event.objects.all()
self.assertTrue(events.exists())
for event in events:
response = self.client.get(reverse('subscribe', kwargs={'slug': event.slug}))
self.assertEqual(response.status_code, 200)
def test_subscribe_form(self):
events = Event.objects.all()
self.assertTrue(events.exists())
for event in events:
form_data = {
'first_name': 'John',
'last_name': 'Doe',
'email': '<EMAIL>',
}
form = SubscribeForm(event=event, data=form_data)
self.assertTrue(form.is_valid())
def test_subscribe_form_empty_email(self):
events = Event.objects.all()
self.assertTrue(events.exists())
for event in events:
form_data = {
'first_name': '',
'last_name': '',
'email': '<EMAIL>',
}
form = SubscribeForm(event=event, data=form_data)
self.assertFalse(form.is_valid())
class TestCaseAdminLogin(TestCase):
"""Test case with client and login as admin function"""
fixtures = ['test_data.json']
def setUp(self):
self.client = Client()
self.login()
def login(self):
"""Login as admin."""
success = self.client.login(username='admin', password='<PASSWORD>')
self.assertTrue(success)
response = self.client.get('/admin/', follow=True)
self.assertEqual(response.status_code, 200)
return response
class TestAdminPages(TestCaseAdminLogin):
def test_events_page(self):
response = self.client.get('/admin/subscribe/event/')
self.assertEqual(response.status_code, 200)
def test_event_edit_pages(self):
events = Event.objects.all()
self.assertTrue(events.exists())
for event in events:
response = self.client.get('/admin/subscribe/event/' + str(event.id) + '/change/')
self.assertEqual(response.status_code, 200)
def test_registrations_page(self):
response = self.client.get('/admin/subscribe/registration/')
self.assertEqual(response.status_code, 200)
def test_registration_page(self):
registrations = Registration.objects.all()
self.assertTrue(registrations.exists())
for registration in registrations:
response = self.client.get('/admin/subscribe/registration/' + str(registration.id) + '/change/')
self.assertEqual(response.status_code, 200)
```
#### File: dyonisos/subscribe/admin.py
```python
from subscribe.models import Event, EventQuestion, EventOption, Answer, Registration
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.utils.html import format_html
from xlwt import Workbook
import io as BytesIO
def export_events(eventadmin, request, queryset):
"""
Helper function to export registrations to an excel file."""
wb = Workbook()
for event in queryset:
# Put each event in it's own sheet
s = wb.add_sheet(event.slug[:30]) # this is the max number of characters for an excel tab
# Write header
s.write(0, 0, "Voornaam")
s.write(0, 1, "Achternaam")
s.write(0, 2, "Email")
s.write(0, 3, "Betaald")
s.write(0, 4, "Prijs")
s.write(0, 5, "Purchase ID")
col_count = 6
q_to_col = {}
for question in event.eventquestion_set.all():
if question.question_type == "TEXT":
continue
q_to_col[question.id] = col_count
s.write(0, col_count, question.name)
col_count += 1
# Write the data
row = 1
for reg in event.registrations.all():
s.write(row, 0, reg.first_name)
s.write(row, 1, reg.last_name)
s.write(row, 2, reg.email)
s.write(row, 3, reg.paid)
s.write(row, 4, float(reg.price) / 100)
s.write(row, 5, reg.id)
for ans in reg.answers.all():
s.write(row, q_to_col[ans.question.id], '{}'.format(ans.get_answer()))
row += 1
out = BytesIO.BytesIO()
wb.save(out)
response = HttpResponse(out.getvalue(), content_type="application/excel")
response['Content-Disposition'] = 'attachment; filename=events.xls'
return response
export_events.short_description = "Export event subscriptions to excel."
class EventQuestionInline(admin.TabularInline):
model = EventQuestion
extra = 1
fields = ['name', 'admin_link', 'order', 'question_type', 'radio', 'required', ]
readonly_fields = ('admin_link',)
show_change_link = True # Django 1.8
ordering = ('order',)
def admin_link(self, instance):
url = reverse('admin:subscribe_eventquestion_change', args=(instance.id,))
return format_html('<a href="{}">Edit</a>', url)
class EventAdmin(admin.ModelAdmin):
fieldsets = [
("Event", {
'fields': [
('name', 'slug'),
('start_registration', 'end_registration'),
'description', 'price', 'max_registrations',
]}),
("Email", {"fields": ["contact_email", "email_template"]}),
]
prepopulated_fields = {'slug': ('name',)}
date_hierarchy = 'end_registration'
inlines = [EventQuestionInline]
actions = [export_events, ] # XXX: export
list_display = ['name', 'form_link', 'subscribed', 'total_paid', 'is_full', 'start_registration', 'end_registration']
search_fields = ["name", ]
class EventOptionInline(admin.TabularInline):
model = EventOption
extra = 1
fields = ['order', 'name', 'price', 'limit', 'num_registrations', 'active', ]
readonly_fields = ('num_registrations',)
ordering = ('order',)
def has_delete_permission(self, request, obj=None):
return False
def num_registrations(self, instance):
return instance.num_registrations()
num_registrations.short_description = "Registrations"
class EventQuestionAdmin(admin.ModelAdmin):
readonly_fields = ('event', )
list_display = ["name", "event", "order", "question_type", ]
list_filter = ["event"]
inlines = [EventOptionInline]
class AnswerInline(admin.TabularInline):
model = Answer
fields = ['question', 'int_field', 'txt_field', 'bool_field', 'option']
readonly_fields = ['question', ]
extra = 0
def get_formset(self, request, obj=None, **kwargs):
self.parent_obj = obj
return super().get_formset(request, obj, **kwargs)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "option":
kwargs['queryset'] = db_field.rel.to.objects.filter(question__event=self.parent_obj.event)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
class RegistrationAdmin(admin.ModelAdmin):
readonly_fields = ('registration_date', 'trxid')
list_display = ["id", "event", "first_name", "last_name", "status", "registration_date", "paid", "trxid", ]
list_filter = ["paid", "event"]
search_fields = ["first_name", "last_name"]
inlines = [AnswerInline]
admin.site.register(EventQuestion, EventQuestionAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Registration, RegistrationAdmin)
```
#### File: dyonisos/subscribe/models.py
```python
import datetime
import logging
import traceback
from django.core.mail import EmailMessage
from django.db import models
from django.template import Context, Template
logger = logging.getLogger(__name__)
AFDELINGEN = (
("AMS", "Amsterdam"),
("AN", "Arnhem-Nijmegen"),
("BB", "Brabant"),
("FR", "Friesland"),
("GR", "Groningen"),
("LH", "Leiden-Haaglanden"),
("MS", "Limburg"),
("RD", "Rotterdam"),
("TW", "Overijssel"),
("UT", "Utrecht"),
("WN", "Wageningen"),
("INT", "Internationaal"),
)
def afdeling_text(afd):
for key, value in AFDELINGEN:
if key == afd:
return value
return None
QUESTION_TYPES = (
("INT", "Integer"),
("TXT", "Text Input"),
("AFD", "Afdeling"),
("BOOL", "Ja/Nee"),
("CHOICE", "Multiple Choice"),
("TEXT", "HTML Text"),
)
class Event(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField()
start_registration = models.DateTimeField()
end_registration = models.DateTimeField()
description = models.TextField()
contact_email = models.EmailField()
email_template = models.TextField(help_text="Enkele placeholders: {{voornaam}}, {{achternaam}}, {{inschrijf_opties}}")
price = models.IntegerField(help_text="Eurocenten", default=0)
max_registrations = models.IntegerField(default=0, help_text="Als groter dan 0, bepaalt maximaal aantal inschrijvingen")
class Meta:
ordering = ('-end_registration',)
def __str__(self):
return self.name
def subscribed(self):
return len(Registration.objects.filter(event=self))
def paid(self):
return len(Registration.objects.filter(event=self).filter(paid=True))
def total_paid(self):
return "\u20AC %.2f" % (sum([e.price for e in self.registrations.filter(paid=True)]) / 100.)
def form_link(self):
return "<a href=\"https://events.jongedemocraten.nl/inschrijven/%s/\">Inschrijven</a>" % (self.slug)
form_link.allow_tags = True
def all_free(self):
"""Are all event options free?"""
if self.price != 0:
return False
if len(EventOption.objects.filter(price__gt=0).filter(question__event=self)):
return False
return True
def active(self):
now = datetime.datetime.now()
if self.start_registration > now or self.end_registration < now:
return False
return True
# active.boolean = True
def price_str(self):
return "\u20AC %.2f" % (float(self.price) / 100)
def is_full(self):
if self.max_registrations <= 0:
return False
return self.registrations.count() >= self.max_registrations
is_full.boolean = True
def get_registrations_over_limit(self):
results = []
if self.max_registrations > 0:
results += self.registrations.order_by('pk')[int(self.max_registrations):]
for question in self.eventquestion_set.all():
for option in question.options.all():
results += option.get_registrations_over_limit()
return results
class EventQuestion(models.Model):
event = models.ForeignKey(Event)
name = models.CharField(max_length=64)
question_type = models.CharField(max_length=16, choices=QUESTION_TYPES)
required = models.BooleanField(default=False, help_text='Bij Ja/Nee: verplicht aanvinken; bij andere: verplicht invullen')
radio = models.BooleanField(default=False, help_text='Voor multiple-choice/afdeling: geen dropdown maar radio buttons')
order = models.IntegerField(default=0, help_text='Bepaalt volgorde op formulier; gebruik order<0 voor elementen vooraf aan voornaam, achternaam en email')
text = models.TextField(blank=True, default='', help_text='Voor "HTML Text"; geldige HTML tags: a, b/strong, code, em/i, h3, img, ul, ol, li, p, br; Geldige HTML attributen: class, style, a.href, a.target, img.src, img.alt')
def __str__(self):
return "%s (%s)" % (self.name, self.question_type)
def form_id(self):
return "q%d" % (self.id)
def delete_event_question(self):
return '<a href="/deleteEventQuestion/?optionId=%d">Delete</a>' % (self.id)
delete_event_question.allow_tags = True
class EventOption(models.Model):
question = models.ForeignKey('EventQuestion', related_name="options")
name = models.CharField(max_length=200)
price = models.IntegerField(help_text="Eurocenten", default=0)
active = models.BooleanField(default=True)
order = models.IntegerField(default=0)
limit = models.IntegerField(default=0, help_text="Aantal beschikbare plekken (0 = geen limiet)")
def __str__(self):
if self.price < 0:
return "%s: \u20AC %.2f korting" % (self.name, float(-self.price) / 100)
if self.price > 0:
return "%s: \u20AC %.2f" % (self.name, float(self.price) / 100)
else:
return "%s" % (self.name,)
def price_str(self):
return "\u20AC %.2f" % (float(self.price) / 100)
def delete_event_option(self):
return '<a href="/deleteEventOption/?optionId=%d">Delete</a>' % (self.id)
delete_event_option.allow_tags = True
def get_related_registrations(self):
return Registration.objects.filter(answers__option=self).order_by('pk')
def num_registrations(self):
registrations = self.get_related_registrations()
return registrations.count()
def is_full(self):
if self.limit <= 0:
return False
return self.num_registrations() >= self.limit
is_full.boolean = True
def limit_str(self):
if self.limit <= 0:
return "-"
return "{}/{}".format(self.num_registrations(), self.limit)
limit_str.short_description = "Limit usage"
def get_registrations_over_limit(self):
if self.limit <= 0:
return []
registrations = self.get_related_registrations()
return registrations[int(self.limit):]
def limit_reached(self):
return self.is_full()
limit_reached.boolean = True
class Registration(models.Model):
registration_date = models.DateTimeField(auto_now_add=True)
first_name = models.CharField(max_length=64)
last_name = models.CharField(max_length=64)
email = models.EmailField(blank=True)
event = models.ForeignKey(Event, related_name='registrations')
price = models.IntegerField(default=0)
paid = models.BooleanField(default=False)
status = models.CharField(max_length=64, default="", blank=True)
trxid = models.CharField(max_length=128, default="", blank=True)
def calculate_price(self):
self.price = self.event.price + sum([answer.option.price for answer in self.answers.exclude(option=None)])
def get_options_text(self):
results = []
added_default_fields = False
answers = {a.question: a.get_answer() for a in self.answers.all()}
for question in self.event.eventquestion_set.order_by('order'):
if question.order >= 0 and not added_default_fields:
results += ["Voornaam: {}".format(self.first_name)]
results += ["Achternaam: {}".format(self.last_name)]
results += ["Email: {}".format(self.email)]
added_default_fields = True
if question in answers:
results += ["{}: {}".format(question.name, answers[question])]
if not added_default_fields:
results += ["Voornaam: {}".format(self.first_name)]
results += ["Achternaam: {}".format(self.last_name)]
results += ["Email: {}".format(self.email)]
return '\n'.join(results)
def __str__(self):
return "%s %s - %s - %s" % (self.first_name, self.last_name, self.event, str(self.price))
def gen_subscription_id(self):
num_id = str(self.id)
safe = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
return num_id + "x" + filter(lambda c: c in safe, self.get_options_name())[:15 - len(num_id)]
def send_confirmation_email(self):
t = Template(self.event.email_template)
c = Context({
"voornaam": self.first_name,
"achternaam": self.last_name,
"inschrijf_opties": self.get_options_text(),
})
rendered_mail = t.render(c)
email = EmailMessage(
subject="Inschrijfbevestiging: %s" % (self.event.name),
body=rendered_mail,
from_email=self.event.contact_email,
to=[self.email],
)
try:
email.send()
except:
logger.error("Could not send welcome mail to %s" % (self.email))
logger.error(traceback.format_exc())
raise
return rendered_mail
class Answer(models.Model):
# This should maybe be a "through" model
registration = models.ForeignKey(Registration, related_name='answers')
question = models.ForeignKey(EventQuestion)
int_field = models.IntegerField(default=0, null=True)
txt_field = models.CharField(max_length=256, blank=True)
bool_field = models.BooleanField(default=False)
option = models.ForeignKey(EventOption, default=None, null=True, blank=True)
def __str__(self):
return "%s - %s" % (self.question, self.get_answer())
def set_answer(self, ans):
if self.question.question_type == "INT":
self.int_field = ans
elif self.question.question_type == "TXT":
self.txt_field = ans
elif self.question.question_type == "AFD":
self.txt_field = ans
elif self.question.question_type == "BOOL":
self.bool_field = ans
if self.bool_field and len(self.question.options.all()):
self.option = self.question.options.all()[0]
else:
self.option = None
elif self.question.question_type == "CHOICE":
self.option = ans
def get_answer(self):
if self.question.question_type == "INT":
return self.int_field
elif self.question.question_type == "TXT":
return self.txt_field
elif self.question.question_type == "AFD":
return afdeling_text(self.txt_field)
elif self.question.question_type == "BOOL":
if self.option is not None:
return self.option
else:
return self.bool_field and 'Ja' or 'Nee'
elif self.question.question_type == "CHOICE":
return self.option
``` |
{
"source": "jonge-democraten/jdleden",
"score": 2
} |
#### File: management/commands/createdepartmentexcels.py
```python
from django.core.management.base import BaseCommand
from jdleden.ledenlijst import create_department_excels_from_file
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('members_file', nargs=1, type=str)
def handle(self, *args, **options):
create_department_excels_from_file(options['members_file'][0])
```
#### File: jdleden/jdleden/tests.py
```python
import os
import shutil
from django.test import TestCase
import jdleden.ledenlijst
import jdleden.afdelingrondschuif
from jdleden import afdelingen
from jdleden import afdelingenoud
from jdleden.regionmaillist import create_region_mail_list
from testdata import test_afdelingen
class TestCaseLedenlijst(TestCase):
oldfile = 'testdata/test_data_a.xls'
newfile = 'testdata/test_data_b.xls'
checksum_filename = 'testchecksum.txt'
def test_update(self):
output_dir = 'testoutput'
output_moved_dir = 'testoutput_moved'
try:
result = jdleden.ledenlijst.update(
self.oldfile,
self.newfile,
dryrun=False,
no_ldap=True,
out_dir=output_dir,
out_moved_dir=output_moved_dir,
checksum_file=self.checksum_filename
)
self.assertTrue(result is not None)
self.assertEqual(len(result['removed']), 1)
self.assertEqual(len(result['added']), 1)
self.assertEqual(len(result['updated']), 2)
self.assertEqual(len(result['changed_department']), 1)
self.assertTrue(os.path.exists(output_dir))
self.assertTrue(os.path.exists(output_moved_dir))
finally: # always remove the generated output
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
if os.path.exists(output_moved_dir):
shutil.rmtree(output_moved_dir)
os.remove(self.checksum_filename)
def test_checksum(self):
jdleden.ledenlijst.create_new_checksum(self.newfile, self.checksum_filename)
self.assertTrue(os.path.exists(self.checksum_filename))
is_same = jdleden.ledenlijst.check_oldfile(self.newfile, self.checksum_filename)
self.assertTrue(is_same)
is_same = jdleden.ledenlijst.check_oldfile(self.oldfile, self.checksum_filename)
self.assertFalse(is_same)
os.remove(self.checksum_filename)
class TestCaseChangedDepartments(TestCase):
members_file = 'testdata/test_data_a.xls'
def test_check_postcodes(self):
self.assertTrue(jdleden.afdelingrondschuif.check_postcode_indeling(afdelingen.AFDELINGEN))
self.assertTrue(jdleden.afdelingrondschuif.check_postcode_indeling(afdelingenoud.AFDELINGEN))
self.assertFalse(jdleden.afdelingrondschuif.check_postcode_indeling(test_afdelingen.AFDELINGEN))
self.assertFalse(jdleden.afdelingrondschuif.check_overlap_afdelingen(test_afdelingen.AFDELINGEN))
self.assertFalse(jdleden.afdelingrondschuif.check_postcode_ranges(test_afdelingen.AFDELINGEN))
def test_change_departments(self):
moved_members = jdleden.afdelingrondschuif.move_members(self.members_file, dryrun=True)
self.assertEqual(len(moved_members), 3) # this needs to be updated after afdelingen and afdelingenoud has changed
class TestCasePostcodeChecks(TestCase):
def test_check_postcode_overlap(self):
has_no_overlap = jdleden.afdelingrondschuif.check_overlap_afdelingen(afdelingen.AFDELINGEN)
self.assertTrue(has_no_overlap)
has_no_overlap = jdleden.afdelingrondschuif.check_overlap_afdelingen(afdelingenoud.AFDELINGEN)
self.assertTrue(has_no_overlap)
def test_check_postcode_ranges(self):
correct_ranges = jdleden.afdelingrondschuif.check_postcode_ranges(afdelingen.AFDELINGEN)
self.assertTrue(correct_ranges)
class TestCaseRegionMailList(TestCase):
members_file = 'testdata/test_data_a.xls'
def test_create_list_amsterdam(self):
maillist_filepath, list_length = create_region_mail_list(self.members_file, ['Amsterdam'])
self.assertTrue(os.path.exists(maillist_filepath))
self.assertEqual(list_length, 976)
os.remove(maillist_filepath)
maillist_filepath, list_length = create_region_mail_list(self.members_file, ['AMSTERDAM'])
self.assertTrue(os.path.exists(maillist_filepath))
self.assertEqual(list_length, 976)
os.remove(maillist_filepath)
def test_create_list_amsterdam_utrecht(self):
maillist_filepath, list_length = create_region_mail_list(self.members_file, ['Amsterdam', 'Utrecht'])
self.assertTrue(os.path.exists(maillist_filepath))
self.assertEqual(list_length, 1918)
os.remove(maillist_filepath)
def test_create_list_haarlem(self):
maillist_filepath, list_length = create_region_mail_list(self.members_file, ['Haarlem'])
self.assertTrue(os.path.exists(maillist_filepath))
self.assertEqual(list_length, 0)
os.remove(maillist_filepath)
``` |
{
"source": "jonge-democraten/website",
"score": 2
} |
#### File: website/jdpages/tests.py
```python
import logging
from django.conf import settings
from django.test import TestCase
from django.test import Client
from mezzanine.blog.models import BlogCategory
from mezzanine.blog.models import BlogPost
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED, CONTENT_STATUS_DRAFT
from mezzanine.pages.models import RichTextPage
from fullcalendar.models import Occurrence
logger = logging.getLogger(__name__)
class TestCaseAdminLogin(TestCase):
""" Test case with client and login as admin function. """
def setUp(self):
# Needed during tests with DEBUG=False (the default)
# to prevent a TemplateDoesNotExist error of a filebrowser template.
# Not sure what goes wrong here, but seems to work fine in manual tests.
settings.TEMPLATE_DEBUG = False
self.client = Client()
self.login()
def login(self):
""" Login as admin. """
response = self.client.post('/admin/login/?next=/admin/', {'username': 'admin', 'password': '<PASSWORD>'}, follow=True)
self.assertEqual(response.status_code, 200)
return response
class TestPage(TestCaseAdminLogin):
""" Tests the basic page structure and admin. """
fixtures = ['test_base.json', 'test_pages.json']
def test_edit_richtextpage_admin_view(self):
richtextpages = RichTextPage.objects.all()
self.assertEqual(richtextpages.count(), 4)
for page in richtextpages:
response = self.client.get('/admin/pages/richtextpage/' + str(page.id) + '/', follow=True)
self.assertEqual(response.status_code, 200)
def test_richtextpage_view(self):
richtextpages = RichTextPage.objects.all()
for page in richtextpages:
response = self.client.get(page.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
class TestPageHeaderImage(TestCaseAdminLogin):
""" Tests the header image of pages. """
fixtures = ['test_base.json', 'test_pages.json']
def test_edit_header_admin_view(self):
richtextpages = RichTextPage.objects.all()
for page in richtextpages:
response = self.client.get('/admin/pages/richtextpage/' + str(page.id) + '/', follow=True)
self.assertEqual(response.status_code, 200)
def test_header_page_view(self):
richtextpages = RichTextPage.objects.all()
for page in richtextpages:
response = self.client.get(page.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
page_header_image_widget = response.context['page_header']
if page.id == 17:
self.assertEqual(page_header_image_widget.page.id, 17)
self.assertEqual(str(page_header_image_widget.image), 'uploads/site-1/headerhome.jpg')
if page.id == 37:
self.assertEqual(page_header_image_widget.page.id, 17)
self.assertEqual(str(page_header_image_widget.image), 'uploads/site-1/headerhome.png')
if page.id == 29:
self.assertEqual(page_header_image_widget.page.id, 29)
self.assertEqual(str(page_header_image_widget.image), 'uploads/site-1/header.jpg')
class TestBlogCategoryPage(TestCaseAdminLogin):
""" Tests the blog category page rendering """
fixtures = ['test_base.json', 'test_blog.json']
blog_cat_1 = 'BlogCategory1'
blog_cat_2 = 'BlogCategory2'
def setUp(self):
super().setUp()
settings.BLOG_POST_PER_PAGE = 2
def test_active_in_menu(self):
""" Tests whether the page is part of the menu. """
response = self.client.get('/')
html = str(response.content)
self.assertTrue('<a href="/blogcategory1page/">blogcategory1page</a>' in html)
self.assertTrue('<a href="/blogcategory2page/">blogcategory2page</a>' in html)
def test_blogpost_titles(self):
""" Tests whether the blog post titles are shown on a blog category page. """
response = self.client.get('/blogcategory1page/', follow=True)
html = str(response.content)
self.assertTrue('<a class="button" href="/blog/blogpost3category1/">Lees verder</a>' in html)
self.assertTrue('<a class="button" href="/blog/blogpost2category1/">Lees verder</a>' in html)
def test_blogpost_contents(self):
""" Tests whether the blog post contents are shown on the page. """
response = self.client.get('/blogcategory1page/', follow=True)
html = str(response.content)
self.assertTrue('<p>Example content 3.</p>' in html)
self.assertTrue('<p>Example content 2.</p>' in html)
def test_blogpage_pagination(self):
""" Tests whether only a limited number of posts are shown on a page and pagination links are available. """
response = self.client.get('/blogcategory1page/', follow=True)
html = str(response.content)
self.assertFalse('<a class="button" href="/blog/blogpost1category1/">Lees verder</a>' in html)
blog_posts = response.context['blog_posts']
self.assertEqual(len(blog_posts), 2)
self.assertTrue('Pagina 1 van 2' in html)
class TestBlogListView(TestCaseAdminLogin):
""" Tests the blog post list view. """
fixtures = ['test_base.json', 'test_blog.json']
blog_cat_1 = 'BlogCategory1'
blog_cat_2 = 'BlogCategory2'
posts_per_page = 2
def setUp(self):
super().setUp()
settings.BLOG_POST_PER_PAGE = TestBlogListView.posts_per_page
def test_blogpost_titles(self):
""" Tests whether the titles of the last 2 blog posts are shown on the page. """
blog_categories = BlogCategory.objects.all()
for category in blog_categories:
url = category.get_absolute_url()
print(url)
response = self.client.get(url)
html = str(response.content)
posts = BlogPost.objects.filter(categories=category)
counter = 0
for post in posts:
if counter < TestBlogListView.posts_per_page:
self.assertTrue(post.get_absolute_url() in html)
else:
self.assertFalse(post.get_absolute_url() in html)
counter += 1
class TestEvent(object):
"""
Tests the integration with the fullcalendar app.
Tests the events column and sidebar widget, and the individual occurrence page.
Tests whether the events from the chosen (in the admin) sites are shown,
* Events from all sites in column element
* Events from this site in column element
* Events from this site and main site in column element
"""
def get_html(self, url):
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
return str(response.content)
def test_all_site_events_visibility__user(self):
"""
Tests whether the agenda sidebar set to show events from all sites,
actually shows these events,and whether the draft status of events is respected and thus not shown,
"""
url = '/'
html = self.get_html(url)
occurrences = Occurrence.objects.all()
self.check_occurrence_visibility(occurrences, html, self.is_admin())
def test_this_site_events_visibility_user(self):
"""
Tests whether the agenda sidebar is set to show events from this site only,
actually shows only these events, and whether the draft status of events is respected and thus not shown,
"""
url = '/eventsthissite/'
html = self.get_html(url)
occurrences = Occurrence.site_related.all()
self.check_occurrence_visibility(occurrences, html, self.is_admin())
def test_this_site_and_main_events_visibility_user(self):
"""
Tests whether the events column elements, that is set to show events from this and main site,
actually shows only these events, and whether the draft status of events is respected and thus not shown,
"""
settings.SITE_ID = 2 # set to a department site
url = '/'
html = self.get_html(url)
sites = {1, 2}
occurrences = Occurrence.site_related.filter(site_id__in=sites)
self.check_occurrence_visibility(occurrences, html, self.is_admin())
occurrences_site_3 = Occurrence.objects.filter(site_id=3)
for occurrence in occurrences_site_3:
self.assertFalse(str(occurrence.event.title) in html)
settings.SITE_ID = 1
def check_occurrence_visibility(self, occurrences, html, is_admin):
"""
Tests that draft occurrences are not shown in agenda sidebar, and that their pages are hidden.
:param occurrences: the occurrences to check for visibility based on published status
:param html: the html of the page
"""
for occurrence in occurrences:
if occurrence.status == CONTENT_STATUS_DRAFT and not is_admin:
self.assertFalse(str(occurrence.event.title) in html)
response = self.client.get(occurrence.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 404)
elif occurrence.status == CONTENT_STATUS_PUBLISHED:
self.assertTrue(str(occurrence.event.title) in html)
response = self.client.get(occurrence.get_absolute_url())
self.assertEqual(response.status_code, 200)
class TestEventAdmin(TestCase, TestEvent):
"""
Tests the draft/published status visibility in sidebar and the occurrence page, for a normal user (draft hidden).
see TestEvent for actual tests
"""
fixtures = ['test_base.json', 'test_pages.json', 'test_events.json']
def setUp(self):
self.client = Client()
response = self.client.post('/admin/login/?next=/admin/', {'username': 'admin', 'password': '<PASSWORD>'}, follow=True)
self.assertEqual(response.status_code, 200)
def tearDown(self):
settings.SITE_ID = 1
def is_admin(self):
return True
class TestEventUser(TestCase, TestEvent):
"""
Tests the draft/published status visibility in sidebar and occurrence page, for an admin (draft visible)
see TestEvent for actual tests
"""
fixtures = ['test_base.json', 'test_pages.json', 'test_events.json']
def setUp(self):
self.client = Client()
def tearDown(self):
settings.SITE_ID = 1
def is_admin(self):
return False
``` |
{
"source": "Jongerr/vendor_receiving",
"score": 3
} |
#### File: Jongerr/vendor_receiving/inventory.py
```python
import json
import os
import random
import requests
from passlib.hash import pbkdf2_sha256 as pbk
from PyQt5.QtSql import QSqlDatabase, QSqlQuery
from pprint import pprint
ENCODING = 'utf-8'
DB_PATH = os.path.join(os.path.curdir, 'inventory.db')
def scrambleWord(word):
"""Randomize the letters in word and return the resulting string."""
word_list = list(word)
random.shuffle(word_list)
word = ''.join(word_list)
return word
def generateItems():
"""Generate a dictionary of retail products and store the data in items.json.
Pulls a list of items and artificially doubles it with scrambled item names.
Each item is given a random PLU, UPC, and department number.
Each dictionary key is the item's PLU.
"""
response = requests.get('https://www.randomlists.com/data/things.json')
json_data = response.json()
items = json_data['RandL']['items']
#double sample size by scrambling item names
scrambled_list = []
for item in items:
scrambled_item = scrambleWord(item)
scrambled_list.append(scrambled_item)
items = items + scrambled_list
data = {}
for item in items:
random.seed(item)
upc = random.randint(100000000000, 999999999999)
plu = random.randint(1000, 9999999)
department = (plu % 7) + 1
print('UPC:{0} | PLU:{1} | Item:{2} | D{3}'.format(upc, plu, item, department))
if plu in data:
print('Duplicate found: {}'.format(plu))
continue
data[plu] = {'upc':upc, 'department':department, 'model':item}
with open('items.json', 'w') as f:
json.dump(data, f)
def generatePO():
"""Create dumby Purchase Orders and store them in pos.json.
Each PO is asigned one random vendor and department number,
along with a random length list of items belonging to said department.
Returns: True if items.json successfully opens, False otherwise.
"""
try:
with open('items.json', 'r') as f:
items_dict = json.load(f)
except FileNotFoundError:
return False
vendors = ['Dyson', 'Ingrammicro', 'LKG', 'Inland', 'Sandisk', 'Seagate', 'Hasbro', 'Mattel',\
'Gear Head', 'Logitech', 'NTE', 'Dell', 'Microsoft', 'Right Stuff', 'Alliance', 'Energizer']
po_dict = {}
for i in range(50):
po_num = 24000000 + random.randint(1, 999999)
if po_num in po_dict:
continue
po_dict[po_num] = {'department': (po_num % 7) + 1, 'items': {}, 'vendor': random.choice(vendors)}
for key in items_dict:
match_found = False
loops = 0
while not match_found:
loops += 1
if loops > 200:
print('\n\nToo many loops.\n\n')
break
po, department = random.choice(list(po_dict.items()))
department = department['department']
print('PO department: {}'.format(department))
print('item plu: {} department: {}'.format(key, items_dict[key]['department']))
if items_dict[key]['department'] == department:
max_count = random.randint(1, 20)
po_dict[po]['items'][key] = max_count
match_found = True
with open('pos.json', 'w') as f:
json.dump(po_dict, f)
return True
def fillDB():
"""Create a database and populate two tables(named items and purchase_order).
The 'items' and 'purchase_order' tables are populated with the data from items.json
and pos.json respectively.
"""
with open('items.json') as f:
data = json.load(f)
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if query.exec_("drop table items"):
print('successfully dropped table')
else:
print('unsuccessfully dropped table')
print(query.lastError().text())
if query.exec_("create table items(plu int primary key, upc varchar(12) unique, "
"model varchar(20), department int)"):
print('success')
else:
print('failure')
print(query.lastError().text())
for key in data:
if query.exec_("insert into items values({}, '{}', '{}', {})".format(key, data[key]['upc'],
data[key]['model'], data[key]['department'])):
print("values({}, {}, {}, {}) successfully inserted.".format(key, data[key]['upc'], data[key]['model'], data[key]['department']))
else:
print("values({}, {}, {}, {}) unsuccessfully inserted.".format(key, data[key]['upc'], data[key]['model'], data[key]['department']))
print(query.lastError().text())
with open('pos.json') as f:
po_dict = json.load(f)
if query.exec_("drop table purchase_order"):
print('successfully dropped table')
else:
print('unsuccessfully dropped table')
print(query.lastError().text())
if query.exec_("create table purchase_order(po int primary key, vendor varchar(30), "
"department int, items blob)"):
print('success')
else:
print('failure')
print(query.lastError().text())
for key in po_dict:
item_string = json.dumps(po_dict[key]['items'])
item_blob = item_string.encode(ENCODING)
if query.exec_("insert into purchase_order values({}, '{}', {}, '{}')"\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_string)):
print("values({}, {}, {}, {}) successfully inserted."\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_string))
else:
print("values({}, {}, {}, {}) unsuccessfully inserted."\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_blob))
print(query.lastError().text())
def createEmployeeTable():
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if not query.exec_("drop table employee"):
print(query.lastError().text())
if not query.exec_("create table employee(id int primary key, first_name varchar(10), "\
"last_name varchar(10), posistion int, pass_hash varchar(200))"):
print(query.lastError().text())
if not query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(162973, 'Jon', 'Michie', 2, pbk.hash('Michie'))):
print(query.lastError().text())
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(131901, 'Ben', 'Terry', 3, pbk.hash('Terry')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(150697, 'Daniel', 'Silva', 2, pbk.hash('Silva')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(68412, 'James', 'Hutchetson', 2, pbk.hash('Hutchetson')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(161844, 'MacKenly', 'Gamble', 1, pbk.hash('Gamble')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(141047, 'George', 'Huston', 1, pbk.hash('Huston')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(46045, 'Arthur', 'Art', 1, pbk.hash('Art')))
def testHashVerification(name):
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if not query.exec_("select pass_hash from employee where last_name = '{}'".format(name)):
print(query.lastError().text())
elif not query.next():
print('Table values not found')
else:
pass_hash = query.value(0)
if pbk.verify(name, pass_hash):
print('It\'s a match!')
else:
print('Match not found.')
if __name__ == '__main__':
generateItems()
generatePO()
fillDB()
createEmployeeTable()
testHashVerification('Terry')
``` |
{
"source": "jonggurl96/Real-Time-Voice-Cloning",
"score": 3
} |
#### File: Real-Time-Voice-Cloning/a_data/tool.py
```python
import json
import numpy as np
from pathlib import Path
def get_input_paths_texts(path):
with open(path, "r", encoding="utf-8") as json_file:
jf = json.load(json_file)
input_text = jf["input_text"]
output_text = jf["output_text"]
speakers_path = Path("datasets_root/prototype/inputs")
speaker_path = sorted([m for m in speakers_path.glob("*") if m.is_dir()])
wavpaths = []
for speaker in speaker_path:
wavs = [m for m in speaker.glob("*.wav")]
wavpaths.extend(wavs)
return wavpaths, output_text
def add_breaks(wav, breaks, Synthesizer):
b_ends = np.cumsum(np.array(breaks) * Synthesizer.hparams.hop_size)
b_starts = np.concatenate(([0], b_ends[:-1]))
wavs = [wav[start:end] for start, end, in zip(b_starts, b_ends)]
breaks = [np.zeros(int(0.15 * Synthesizer.sample_rate))] * len(breaks)
wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)])
return wav
``` |
{
"source": "jonggyujang0123/Early_exit",
"score": 3
} |
#### File: Early_exit/tools/train.py
```python
import numpy as np
import torch
from torchvision import transforms, datasets
import torch.nn as nn
import ssl
import matplotlib.pyplot as plt
from net.AlexNet import AlexNet, LeNet, ResNet18
from tqdm import tqdm
from tools.dataloader import dataloader
import argparse
#certification error
ssl._create_default_https_context = ssl._create_unverified_context
parser = argparse.ArgumentParser(description = 'Options')
parser.add_argument('--dataset',
help='Choose among MNIST, CIFAR10, CIFAR100, EMNIST')
parser.add_argument('--model',
help='Choose among AlexNet, LeNet, ResNet')
args = parser.parse_args()
data_name = args.dataset
net_name = args.model
batch_size=256
trainloader, testloader, num_classes, in_channel = dataloader(data_name = data_name,batch_size = batch_size)
## NETWORK LOADER
if net_name == 'AlexNet':
Net = AlexNet(num_classes = num_classes, in_channel =in_channel)
lr = 0.006
elif net_name == 'LeNet':
Net = LeNet(num_classes = num_classes, in_channel =in_channel)
lr = 0.006
elif net_name == 'ResNet':
Net = ResNet18(num_classes = num_classes, in_channel =in_channel)
lr = 0.001
else:
raise ValueError('Choose correct model')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(Net.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
## MAIN FUNCTION
def main():
epoches = tqdm(range(200))
for epoch in epoches:
loss_ep = list()
for batch_idz, (inputs, labels) in enumerate(trainloader):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
output_1, output_2, output_3 = Net.forward(inputs)
loss = 1.0 * criterion(output_1, labels) + 0.4 * criterion(output_2, labels) + 0.3 * criterion(output_3, labels)
loss.backward()
optimizer.step()
loss_ep.append(loss.item())
scheduler.step()
if epoch % 20 == 19:
tqdm.write(f'{epoch}-th epoch loss is {np.mean(loss_ep)}')
torch.save(Net.state_dict(), f'./DATA/{data_name}_{net_name}.pth')
return
if __name__ == '__main__':
main()
``` |
{
"source": "jonggyujang0123/QoS-UARAPC",
"score": 3
} |
#### File: QoS-UARAPC/libs/actor_critic_net.py
```python
import tensorflow as tf
import numpy as np
import random
class GaussianNoise():
def __init__(self,action_dimension,epsilon_init = 0.7, epsilon_end = 0.3,mu=0, theta =0.15, sigma = 0.25):
self.action_dimension = action_dimension
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.epsilon_decay = 0.9995
self.epsilon = epsilon_init
self.epsilon_end = epsilon_end
self.decay = (epsilon_init-epsilon_end)/10000.
def reset(self):
self.epsilon = np.maximum(self.epsilon - self.decay, self.epsilon_end)
def noise(self,step):
self.is_noise = (np.random.uniform() <self.epsilon)
noise = np.random.normal(size= [1,self.action_dimension])* self.sigma * self.is_noise
return noise
class Actor_USL():
def __init__(self, action_size, scope = 'DDPG_Actor'):
self.output_size = action_size
self.scope = scope
def forward(self,state):
with tf.variable_scope(self.scope, reuse = tf.AUTO_REUSE):
self.state = state
self.fcn1 = tf.contrib.layers.fully_connected(self.state, 2048, activation_fn = tf.nn.leaky_relu)
self.fcn2= tf.contrib.layers.fully_connected(self.fcn1, 2048, activation_fn = tf.nn.leaky_relu)
self.fcn3= tf.contrib.layers.fully_connected(self.fcn2, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn4= tf.contrib.layers.fully_connected(self.fcn3, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn5= tf.contrib.layers.fully_connected(self.fcn4, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn6= tf.contrib.layers.fully_connected(self.fcn5, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn7= tf.contrib.layers.fully_connected(self.fcn6, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn8= tf.contrib.layers.fully_connected(self.fcn7, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn9= tf.contrib.layers.fully_connected(self.fcn8, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn10= tf.contrib.layers.fully_connected(self.fcn9, 512, activation_fn =tf.nn.leaky_relu)
self.fcn11= tf.contrib.layers.fully_connected(self.fcn10, 256, activation_fn =tf.nn.leaky_relu)
self.fcn12= tf.contrib.layers.fully_connected(self.fcn11, 128, activation_fn =tf.nn.leaky_relu)
self.action = 1+tf.nn.elu(tf.contrib.layers.fully_connected(self.fcn12,self.output_size, activation_fn = None))
return self.action
class Actor():
def __init__(self, action_size, thr, scope = 'DDPG_Actor', is_tanh = True):
self.output_size = action_size
self.scope = scope
self.is_tanh = is_tanh
self.thr = thr
def forward(self,state):
with tf.variable_scope(self.scope, reuse = tf.AUTO_REUSE):
## Actor
state_part1 = state[:,0:self.thr]
state_part2 = state[:, self.thr::]
state_part1_ = tf.contrib.layers.fully_connected(state_part1, 512, activation_fn = tf.nn.leaky_relu)
state_part2_ = tf.contrib.layers.fully_connected(state_part2, 512, activation_fn = tf.nn.leaky_relu)
state_post = tf.concat([state_part1_, state_part2_],axis=1)
self.fcn1 = tf.contrib.layers.fully_connected(state_post, 2048, activation_fn = tf.nn.leaky_relu)
self.fcn2= tf.contrib.layers.fully_connected(self.fcn1, 2048, activation_fn = tf.nn.leaky_relu)
self.fcn3= tf.contrib.layers.fully_connected(self.fcn2, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn4= tf.contrib.layers.fully_connected(self.fcn3, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn5= tf.contrib.layers.fully_connected(self.fcn4, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn6= tf.contrib.layers.fully_connected(self.fcn5, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn7= tf.contrib.layers.fully_connected(self.fcn6, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn8= tf.contrib.layers.fully_connected(self.fcn7, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn9= tf.contrib.layers.fully_connected(self.fcn8, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn10= tf.contrib.layers.fully_connected(self.fcn9, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn11= tf.contrib.layers.fully_connected(self.fcn10, 512, activation_fn =tf.nn.leaky_relu)
self.fcn12= tf.contrib.layers.fully_connected(self.fcn11, 512, activation_fn =tf.nn.leaky_relu)
self.action = tf.tanh(tf.contrib.layers.fully_connected(self.fcn12,self.output_size, activation_fn = None))
return self.action
class Critic():
def __init__(self, reward_size, BS, UE, scope = 'DDPG_Critic'):
self.scope = scope
self.reward_size = reward_size
self.BS = BS
self.UE = UE
self.renew = (np.arange(self.BS) != self.BS-1).astype(int) #np.array([1,1,1,0])
def state_action_to_PCstate(self, state, action):
P = tf.reshape((self.renew * (0.01 + 0.69 * (action+1)/2) + (1-self.renew) * (0.01 + 0.99 * (action+1)/2)), [-1, 1, self.BS])
SNR_p = 2000*tf.reshape(state[:,0:self.UE*self.BS],[-1, self.UE,self.BS]) * P
SINR = SNR_p/ ( 1+ tf.reduce_sum(SNR_p,axis=2,keepdims=True)- SNR_p)
Rate = tf.log(1+SINR)/tf.log(2.0)*18 + 0.001
QoS = tf.reshape(state[:,self.UE*self.BS:self.UE*self.BS + self.UE ], [-1, self.UE, 1])
Avail_energy = state[:,self.UE*self.BS + self.UE : self.UE*self.BS + self.UE + self.BS]
grid_power= state[:, self.UE*self.BS + self.UE + 2 * self.BS : self.UE*self.BS + self.UE + 3 *self.BS]
RES= state[:, self.UE*self.BS + self.UE + 3 * self.BS : self.UE*self.BS + self.UE + 4 *self.BS]
Backhaul = state[:, self.UE*self.BS + self.UE + 4 * self.BS : self.UE*self.BS + self.UE + 5 *self.BS]
state_1 = tf.reshape(-tf.log(QoS/Rate), [-1, self.BS * self.UE]) # QoS-Rate Ratio [-1, BS*UE]
state_2 = tf.reshape( -tf.log(QoS / 10 /tf.reshape(Backhaul,[-1, 1, self.BS])), [-1, self.BS * self.UE]) # QoS-Bh Ratio [-1, BS * UE]
state_3 = -tf.log(self.renew * Avail_energy * tf.reshape(1-P, [-1,self.BS]) +RES + grid_power) # Remaining energy [-1, BS]
state_4 = tf.reduce_max(Rate, axis=1)/100.0 # Max_Rate [-1,BS]
state_5 = RES + 0.0 # RES [-1, BS]
return tf.concat([state_1, state_2],axis=1), tf.concat([state_3, state_4, state_5], axis=1)
def forward(self,state, action):
with tf.variable_scope(self.scope, reuse = tf.AUTO_REUSE):
state_part1, state_part2 = self.state_action_to_PCstate(state, action)
state_part1_ = tf.contrib.layers.fully_connected(state_part1, 512, activation_fn = tf.nn.leaky_relu)
state_part2_ = tf.contrib.layers.fully_connected(state_part2, 512, activation_fn = tf.nn.leaky_relu)
state_post = tf.concat([state_part1_, state_part2_],axis=1)
self.fcn1 = tf.contrib.layers.fully_connected(state_post, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn2= tf.contrib.layers.fully_connected(self.fcn1, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn3= tf.contrib.layers.fully_connected(self.fcn2, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn4= tf.contrib.layers.fully_connected(self.fcn3, 2048, activation_fn =tf.nn.leaky_relu)
self.fcn5= tf.contrib.layers.fully_connected(self.fcn4, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn6= tf.contrib.layers.fully_connected(self.fcn5, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn7= tf.contrib.layers.fully_connected(self.fcn6, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn8= tf.contrib.layers.fully_connected(self.fcn7, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn9= tf.contrib.layers.fully_connected(self.fcn8, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn10= tf.contrib.layers.fully_connected(self.fcn9, 1024, activation_fn =tf.nn.leaky_relu)
self.fcn11= tf.contrib.layers.fully_connected(self.fcn10, 512, activation_fn =tf.nn.leaky_relu)
self.fcn12= tf.contrib.layers.fully_connected(self.fcn11, 512, activation_fn =tf.nn.leaky_relu)
self.Qval = tf.contrib.layers.fully_connected(self.fcn12,self.reward_size,activation_fn = None)
return self.Qval
class DDPG():
def __init__(self, scope, sess, BS, UE, Actor , Critic,Actor_target , Critic_target, OUNoise, replay_buffer, state_size, action_size,reward_size, gamma, lr_actor, lr_critic, batch_size,tau,is_tanh):
self.sess = sess
self.batch_size = batch_size
self.lr_actor = lr_actor
self.lr_critic = lr_critic
self.scope = scope
self.is_tanh = is_tanh
self.gamma = gamma
self.Actor = Actor
self.Critic = Critic
self.Actor_target = Actor_target
self.Critic_target = Critic_target
self.noise = OUNoise
self.replay_buffer = replay_buffer
self.state_size = state_size
self.action_size = action_size
self.tau = tau
self.reward_size = reward_size
self.state = np.zeros([1,state_size])
self.action = np.zeros([1, action_size])
self.state_next = np.zeros([1,state_size])
self.reward = np.zeros([1,self.reward_size])
self.state_ph = tf.placeholder(shape = [None,state_size], dtype = tf.float32)
self.action_ph = tf.placeholder(shape = [None,action_size], dtype = tf.float32)
self.state_ph_next = tf.placeholder(shape = [None,state_size], dtype= tf.float32)
self.reward_ph = tf.placeholder(shape = [None,self.reward_size], dtype = tf.float32)
self.BS = BS
self.UE = UE
# Network models + Actor netowrk update
self.action_tf = self.Actor.forward(self.state_ph)
self.qval = self.Critic.forward(self.state_ph, self.action_tf)
self.gradient_action = tf.reshape(tf.gradients(tf.reduce_sum(self.qval),self.action_tf),[-1,self.action_size])
self.target_action = tf.clip_by_value(tf.stop_gradient(self.action_tf + 0.03*self.gradient_action),-0.99,0.99)
self.loss_weight = tf.placeholder(shape= [None,1], dtype = tf.float32)
self.policy_loss = tf.reduce_mean(self.loss_weight*tf.reduce_mean((self.target_action-self.action_tf)**2,axis=1,keepdims=True))
self.train_policy = tf.train.AdamOptimizer(learning_rate = self.lr_actor).minimize(self.policy_loss)
## Critic netowrk update
self.action_next_tf = self.Actor_target.forward(self.state_ph_next)
self.target_qval = tf.stop_gradient(self.Critic_target.forward(self.state_ph_next, self.action_next_tf))
self.target_critic = self.reward_ph + self.gamma * self.target_qval
self.loss_critic = tf.reduce_mean(self.loss_weight * tf.reduce_mean((self.target_critic - self.Critic.forward(self.state_ph, self.action_ph))**2,axis=1,keepdims=True))
self.TD_error = tf.sqrt(tf.reduce_sum(tf.abs(self.target_critic - self.Critic.forward(self.state_ph, self.action_ph))**2,axis=1,keepdims=True))
self.loss_critic_wo_noise = tf.reduce_mean(tf.reduce_mean((self.target_critic - self.Critic.forward(self.state_ph, self.action_ph))**2,axis=1,keepdims=True))
self.train_critic = tf.train.AdamOptimizer(learning_rate = self.lr_critic).minimize(self.loss_critic)
self.Actor_noiseless_tf = self.Actor_target.forward(self.state_ph)
tfVars = tf.trainable_variables(scope = self.scope )
tau = self.tau
total_vars = len(tfVars)
self.op_holder =[]
for index, var in enumerate(tfVars[0:int(total_vars/2)]):
self.op_holder.append(tfVars[index+int(total_vars/2)].assign((var.value()*tau)+((1-tau)*tfVars[index+int(total_vars/2)].value())))
def add_exp(self, state, state_next, action, reward):
self.replay_buffer.add(state, state_next, action, reward)
def forward_test_action(self,state):
return self.sess.run(self.Actor_noiseless_tf, feed_dict = {self.state_ph : state})
def forward_noiseless_action(self,state):
return self.sess.run(self.action_tf, feed_dict = {self.state_ph : state})
def forward_noise_action(self,state, step):
if self.is_tanh == True:
output = np.clip(self.sess.run(self.action_tf, feed_dict = {self.state_ph : state}) + self.noise.noise(step), -1., 1.)
else:
output = np.clip(self.sess.run(self.action_tf, feed_dict = {self.state_ph : state}) + self.noise.noise(), 0.00, 1000.)
return output
def forward_loss(self,s,s_1,a,r):
return self.sess.run(self.loss_critic_wo_noise, feed_dict = {self.state_ph : s, self.action_ph: a, self.state_ph_next: s_1, self.reward_ph : r})
class PER():
def __init__(self, buffer_size = 10000, alpha = 0.4, epsilon_per = 0.001, beta = 0.7):
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon_per
self.buffer_size = buffer_size
self.buffer = []
self.prob_bean = np.zeros([0])
self.alpha_decay = (self.alpha- 0.0)/15000
self.beta_increasing = (1.0-self.beta)/15000
def add(self, s,s_1,a,r, ):
self.buffer.append((s,s_1,a,r))
if self.prob_bean.shape[0] == 0:
self.prob_bean = np.concatenate([self.prob_bean,[self.epsilon]],axis=0)
else:
self.prob_bean = np.concatenate([self.prob_bean,[max(self.prob_bean)]],axis=0)
if len(self.buffer) == self.buffer_size +1 :
self.prob_bean = self.prob_bean[1:self.buffer_size+1]
del self.buffer[0]
def sample(self, batch_size):
self.alpha = np.maximum(self.alpha-self.alpha_decay, 0.0)
self.beta = np.minimum(self.beta +self.beta_increasing, 1.0)
batch =list()
idx = np.random.choice(range(len(self.buffer)),size = batch_size, replace = False, p = self.prob_bean**self.alpha/sum(self.prob_bean**self.alpha))
for i in range(batch_size):
batch.append(self.buffer[idx[i]])
s, s_1, a, r = zip(*batch)
s = np.concatenate(s)
s_1 = np.concatenate(s_1)
a = np.concatenate(a)
r = np.concatenate(r)
loss_weight = (1/self.prob_bean[idx]**self.alpha * sum(self.prob_bean**self.alpha)/ len(self.buffer) )**self.beta
loss_weight = loss_weight/max(loss_weight)
return s, s_1, a, r, loss_weight, idx
def update_weight(self, idx, TD_error):
self.prob_bean[idx] = (TD_error.reshape([-1]) + self.epsilon)
class USL():
def __init__(self, scope, sess, BS, UE, Actor , replay_buffer, state_size, action_size, lr_actor, batch_size, alpha_init):
self.sess = sess
self.batch_size = batch_size
self.lr_actor = lr_actor
self.scope = scope
self.Actor = Actor
self.replay_buffer = replay_buffer
self.state_size = state_size
self.action_size = action_size
self.state = np.zeros([1,state_size])
self.action = np.zeros([1, action_size])
self.state_ph = tf.placeholder(shape = [None,state_size], dtype = tf.float32)
self.BS = BS
self.UE = UE
self.radius = 4 * self.BS**0.5
self.mu_ind = np.concatenate([np.ones([1,self.BS]), np.zeros([1,self.BS])],axis=1)
# Network models + Actor netowrk update
self.action_tf = self.Actor.forward(self.state_ph)
self.target_action= tf.placeholder(shape = [None,action_size], dtype = tf.float32)
self.loss = tf.reduce_mean((self.target_action - self.action_tf)**2)
self.train_weights = tf.train.AdamOptimizer(learning_rate = self.lr_actor).minimize(self.loss)
self.alpha = alpha_init
def add_exp(self, state, Rate, QoS, Backhaul):
self.replay_buffer.add(state, Rate, QoS, Backhaul)
def forward_action(self,state):
return self.sess.run(self.action_tf, feed_dict = {self.state_ph : state})
class USL_replay():
def __init__(self, buffer_size = 10000):
self.buffer_size = buffer_size
self.buffer = []
def add(self, State, Rate, QoS, Backhaul):
Rate = np.expand_dims(Rate,0)
QoS = np.expand_dims(QoS,0)
Backhaul = np.expand_dims(Backhaul,0)
self.buffer.append((State, Rate,QoS,Backhaul))
if len(self.buffer) == self.buffer_size +1 :
del self.buffer[0]
def sample(self, batch_size):
batch =list()
idx = np.random.choice(range(len(self.buffer)),size = batch_size, replace = False)
for i in range(batch_size):
batch.append(self.buffer[idx[i]])
State, Rate, QoS, Backhaul = zip(*batch)
State = np.concatenate(State)
Rate = np.concatenate(Rate)
QoS = np.concatenate(QoS)
Backhaul = np.concatenate(Backhaul)
return State, Rate, QoS, Backhaul
```
#### File: QoS-UARAPC/libs/environment.py
```python
import numpy as np
class ENV_net():
def __init__(self,SNR_mat, UE, BS, Bh):
self.UE = UE
self.BS = BS
self.episode = 0
self.step = 0
self.max_level = 500
self.power_default = 37
self.renew_max = 60
self.renew_min = 37
self.tx_dBm = 30
self.tx_w = np.power(10, self.tx_dBm/10)/1000
self.delta = 2.6
self.grid_power_min = 200
self.grid_power_max = 200
self.QoS_pool = np.array([0.192,2.22,1.5,0.6,4.44]) ## Mbps [Audio, Video, Image, Web_bro, Email]
self.SNR_mat = SNR_mat
self.renew = (np.arange(self.BS) != self.BS-1).astype(int) #np.array([1,1,1,0])
self.grid_power = self.grid_power_min + (self.grid_power_max-self.grid_power_min)*np.random.uniform(size= [self.BS])
self.grid_power = self.grid_power * (1-self.renew)
self.Backhaul_lim = Bh
self.Backhaul = self.Backhaul_lim + (50-self.Backhaul_lim )*(1-self.renew)
self.RB = 100
self.BW = 2e+7
self.action_size_PC = self.BS
self.action_size_UA = 2*self.BS
self.reward_size_PC = 2 * self.BS #3*self.BS
self.reward_size_UA = 4*self.BS #+ 2*self.UE
self.state_size_PC = self.UE*self.BS + 5*self.BS + self.UE
self.state_size_UA = self.UE*self.BS + 2* self.BS + self.UE
self.QoS_penalty = 4.0
self.Backhaul_penalty =100
def reset(self,is_test=False):
if is_test:
self.UA_set = np.arange(self.UE)
self.H_mat = self.SNR_mat[self.episode, :, :,:].copy()
else:
self.UA_set = np.random.permutation(self.SNR_mat.shape[2])[0:self.UE]
self.H_mat = self.SNR_mat[np.mod(self.episode+int(np.random.uniform(0,self.SNR_mat.shape[0])) ,self.SNR_mat.shape[0]), :, :,:].copy()
self.H_mat = self.H_mat[:,self.UA_set,:].copy()
H = self.H_mat[0,:,:].copy()
UA = np.zeros([self.UE]).astype(int)
for i in range(self.UE):
BS_ind = np.mod(i, self.BS)
UE_ind = np.argmax(H[:,BS_ind])
H[UE_ind,:] = -1.0
UA[BS_ind * int(self.UE/self.BS) + int(i/self.BS)] = UE_ind
self.H_mat = self.H_mat[:,UA,:].copy()
self.H = self.H_mat[0, :,:].copy()
self.QoS = np.random.choice(self.QoS_pool.shape[0],[self.UE,1])+0.0
for i in range(self.QoS_pool.shape[0]):
self.QoS[self.QoS == i] = self.QoS_pool[i]
self.QoS[self.QoS==2.22] = (0.6 + (1.4-0.6)*np.random.uniform(size= [np.sum(self.QoS==2.22)]) )
self.QoS[self.QoS==4.44] = (2.0 + (6.0-2.0)*np.random.uniform(size= [np.sum(self.QoS==4.44)]) )
self.b_level = 100 * self.renew
self.res_source =(self.renew_min+ (self.renew_max - self.renew_min)*np.random.uniform(size = [self.BS]))*self.renew
self.state_PC = np.concatenate([(self.H*(self.b_level + self.grid_power - self.power_default)/260).reshape([1,-1])/2000.0, self.QoS.reshape([1,-1]), (np.maximum(self.b_level+self.grid_power - self.power_default,0.0).reshape([1,-1])/260.0), self.b_level.reshape([1,-1])/260, self.grid_power.reshape([1,-1])/260, (self.res_source).reshape([1,-1])/260,self.Backhaul.reshape([1,-1])/10.0], axis=1)
def get_state_UA(self):
self.P = np.clip( (self.renew * (0.01 + 0.69 * (self.action[0,0:self.BS]+1)/2) + (1-self.renew) * (0.01 + 0.99 * (self.action[0,0:self.BS]+1)/2))*(self.b_level + self.grid_power -self.power_default)/self.delta/self.RB,
0, 1).reshape([1,-1])
SNR = self.H * self.P
SINR = SNR/ ( 1+ np.tile(np.sum(SNR,axis=1,keepdims=True),[1, self.BS]) - SNR)
self.Rate = np.log2(1+SINR)*100*0.18 + 0.001
self.state_UA = np.concatenate([np.max(self.Rate,axis=0).reshape([1,-1]),-np.log(1+self.QoS/self.Rate).reshape([1,-1]),self.Backhaul.reshape([1,-1]), self.QoS.reshape([1,-1])],axis=1)
def get_X(self, Rate, QoS, Backhaul, mu, rho, is_print=False):
mu = np.expand_dims(mu,axis=1)
rho = np.expand_dims(rho,axis=1)
Backhaul = np.expand_dims(Backhaul,axis=1)
X = (np.expand_dims(np.argmax(Rate,axis=2),2) == np.arange(self.BS).reshape([1,1,self.BS]))+0.0
lamb = np.max(Rate*X,axis=1,keepdims=True)
count = 0
while 1:
lamb_old = lamb.copy()
if X.shape[0] > 0:
UE_order = np.random.permutation(self.UE)
else:
UE_order = np.argsort(np.min(np.maximum(QoS/Rate, QoS/Backhaul)[0,:,:],axis=1))
for UE_ind in UE_order:
X[:,UE_ind,:] = 0
lamb = np.max(Rate*X,axis=1,keepdims=True)
UE_opt = -(1+mu)*QoS * lamb/Rate - rho * QoS
## Tie Break
UE_select = np.argmax(UE_opt[:,UE_ind,:],axis=1)[0]
BB= -UE_opt[0,UE_ind,:].copy()
indices = np.argsort(BB,axis=0)
R_remain = 1-np.sum(np.sum(QoS/Rate*X,axis=1),axis=0)
B_remain = Backhaul[0,0,:] - np.sum(np.sum(QoS*X,axis=1),axis=0)
if R_remain[UE_select] < QoS[0,UE_ind,0]/Rate[0,UE_ind,UE_select] or B_remain[UE_select] < QoS[0,UE_ind,0]:
X[:,UE_ind,:] = 0.0
X[:,UE_ind,:] = (UE_select == np.arange(self.BS).reshape([1,self.BS]))+0.0
Y = self.get_Y(X[0,:,:],mu[0,:,:],rho[0,:,:])
reward_org = np.sum(self.Rate * X[0,:,:] * Y)/40 - np.sum(self.QoS > np.sum(self.Rate * X[0,:,:]*Y,axis=1,keepdims=True)+1e-7)/self.UE * 40
for B_ind in indices:
if abs(np.log(abs(BB[UE_select] / BB[B_ind])))<0.5:
X[:,UE_ind,:] = 0
X[:,UE_ind,:] = (B_ind == np.arange(self.BS).reshape([1,self.BS]))+0.0
Y=self.get_Y(X[0,:,:],mu[0,:,:],rho[0,:,:])
reward_new = np.sum(self.Rate * X[0,:,:] * Y)/40 - np.sum(self.QoS > np.sum(self.Rate * X[0,:,:]*Y,axis=1,keepdims=True)+1e-7)/self.UE * 40
if reward_new >reward_org:
UE_select = B_ind
break
X[:,UE_ind,:] = 0.0
X[:,UE_ind,:] = (UE_select == np.arange(self.BS).reshape([1,self.BS]))+0.0
lamb = np.max(Rate*X,axis=1,keepdims=True)
if np.sum(abs(lamb_old-lamb)>1e-7) == 0:
count = count+1
if count > 1:
break
Y = QoS / Rate * X #[Batch, UE, BS]
Y_opt = Y.copy()
Y_opt[Y_opt==0] = 9999999.9
Y_s = np.sort(Y_opt,axis=1)
QoS_tile = np.tile(QoS, [1,1,self.BS])
ind = np.argsort(Y_opt,axis=1)
QoS_s = np.take_along_axis(QoS_tile, ind, axis=1)
fail_rate = 1-np.sum((np.cumsum(Y_s,axis=1) < 1) * (np.cumsum(QoS_s,axis=1)<Backhaul) )/self.UE
return X.copy(), fail_rate
def get_Y(self,X,mu,rho):
Z = (np.argmax(self.Rate*X,axis=0).reshape([1,-1]) == np.arange(self.UE).reshape([-1,1]))* X+0.0
Y = self.QoS/(self.Rate)*X
for BS_ind in range(self.BS):
while np.sum(Y[:,BS_ind]) > 1+1e-11 :
ind = np.argmax(Y[:,BS_ind])
Y[ind,BS_ind] = 0 #np.maximum(0, Y[ind,BS_ind] - (-1+ np.sum(Y[:,BS_ind])))
while self.Backhaul[BS_ind] < np.sum(X[:,BS_ind]*self.Rate[:,BS_ind]*Y[:,BS_ind])-1e-11:
ind = np.argmax((self.Rate * X * Y)[:,BS_ind])
Y[ind,BS_ind] = 0# np.maximum(0, Y[ind,BS_ind]- (np.sum(X[:,BS_ind]*self.Rate[:,BS_ind]*Y[:,BS_ind])-self.Backhaul[BS_ind])/self.Rate[ind,BS_ind] )
Y = Y*(1-Z)
Y = Y + Z* np.minimum( 1-np.sum(Y,axis=0,keepdims=True), np.tile(((self.Backhaul - np.sum(self.Rate*X*Y,axis=0))/np.sum(self.Rate*Z+0.00000001,axis=0)).reshape([1,self.BS]),[self.UE,1]))
return Y
def proceed_master(self, is_train = False):
mu = (self.action_UA[0,0: self.BS]).reshape([1,-1]).copy() # [1,BS]
rho = (self.action_UA[0,self.BS: 2*self.BS]).reshape([1,-1]).copy() # [1,BS]
if is_train:
X = (np.argmax(self.Rate,axis=1).reshape([-1,1]) == np.arange(self.BS).reshape([1,-1]))
fail_rate = 99
else:
X, fail_rate = self.get_X(self.Rate.reshape([1,self.UE,self.BS]).copy(), self.QoS.reshape([1,self.UE,1]).copy(), self.Backhaul.reshape([1,self.BS]).copy(), mu.copy(), rho.copy())
X.shape = [self.UE, self.BS]
Y = self.get_Y(X,mu,rho)
####################
lamb = np.max(self.Rate*X,axis = 0,keepdims=True)
fail_true = np.sum(self.QoS*X - np.sum(self.Rate *X*Y, axis=1,keepdims=True) - 1e-5 > 0, axis = 0).reshape([1,self.BS])
self.sum_rate = np.sum(self.Rate*X*Y)
self.QoS_unsatisfactory = np.sum(self.QoS > np.sum(self.Rate *X*Y, axis=1,keepdims=True)+1e-7)/self.UE
self.reward_PC_part1 = np.sum(self.Rate*X*Y,axis=0).reshape([1,self.BS])/40
self.reward_PC_part2 = -fail_true *40 / self.UE
self.reward = np.concatenate([self.reward_PC_part1,self.reward_PC_part2],axis=1)
self.QoS_over_amount = np.maximum(self.QoS.reshape([-1]) - np.sum(self.Rate*X*Y,axis=1).reshape([-1]),0.0)
self.Lag = np.sum((1+mu) * lamb *( 1- np.sum(self.QoS/self.Rate*X,axis=0,keepdims=True))) + np.sum( rho * (self.Backhaul.reshape([1,-1]) - np.sum(self.QoS*X,axis=0,keepdims=True)))
## New State
Net_b_loss = np.maximum( self.power_default + self.RB * self.tx_w * self.P * np.sum(X*Y,axis=0) * self.delta - self.grid_power,0)
self.X_save = X.copy()
self.RES_save = self.res_source.copy()
self.b_level = np.maximum(np.minimum( self.b_level - Net_b_loss + self.res_source,self.max_level ),0)
self.res_source = np.clip(3*np.random.normal(size=[self.BS]) +self.res_source, 37, 60) * self.renew
self.H = self.H_mat[self.step+1, : ,:].copy()
self.state_next = np.concatenate([(self.H*(self.b_level + self.grid_power - self.power_default)/260).reshape([1,-1])/2000.0, self.QoS.reshape([1,-1]), ((self.b_level + self.grid_power - self.power_default)/260).reshape([1,self.BS]), self.b_level.reshape([1,-1])/260, self.grid_power.reshape([1,-1])/260, (self.res_source).reshape([1,self.BS])/260,self.Backhaul.reshape([1,-1])/10.0], axis=1)
``` |
{
"source": "jongha/python-finance-exchange-notify",
"score": 3
} |
#### File: python-finance-exchange-notify/notify/notify.py
```python
import smtplib
from smtplib import SMTP as SMTP
import datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import json, requests
class Notify(object):
username = None
password = <PASSWORD>
server = None
port = None
def __init__(self, username, password, server, port=587):
self.username = username
self.password = password
self.server = server
self.port = port
def sendmail(self, subject='', content='', recipient=None):
conn = SMTP(host=self.server, port=self.port)
conn.set_debuglevel(False)
if self.username and self.password:
conn.login(self.username, self.password)
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = self.username
msg['To'] = recipient
part1 = MIMEText(content, 'plain', 'utf-8')
part2 = MIMEText(content, 'html', 'utf-8')
msg.attach(part1)
msg.attach(part2)
try:
conn.sendmail(self.username, recipient.split(','), msg.as_string())
finally:
conn.close()
print 'Sending...'
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.