metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jctaillandier/ethical-adversaries",
"score": 3
}
|
#### File: ethical-adversaries/scripts/attack_model.py
```python
import numpy as np
import pandas as pd
import os
from sklearn import preprocessing
import logging
logger = logging.getLogger(__name__)
def transform_dataset(df):
"""
:param df:
:return: Tuple of the transformed dataset and the labels Y and S
"""
df_binary = df[(df["race"] == "Caucasian") | (df["race"] == "African-American")]
del df_binary['c_jail_in']
del df_binary['c_jail_out']
##separated class from the rests of the features
# remove unnecessary dimensions from Y -> only the decile_score remains
Y = df_binary['decile_score']
del df_binary['decile_score']
Y_true = df_binary['two_year_recid']
del df_binary['two_year_recid']
del df_binary['score_text']
S = df_binary['race']
#del df_binary['race']
#del df_binary['is_recid']
print(df_binary.shape)
# set sparse to False to return dense matrix after transformation and keep all dimensions homogeneous
encod = preprocessing.OneHotEncoder(sparse=False)
data_to_encode = df_binary.to_numpy()
feat_to_encode = data_to_encode[:, 0]
# print(feat_to_encode)
# transposition
feat_to_encode = feat_to_encode.reshape(-1, 1)
# print(feat_to_encode)
encoded_feature = encod.fit_transform(feat_to_encode)
df_binary_encoded = pd.DataFrame(encoded_feature)
feat_to_encode = data_to_encode[:, 1]
feat_to_encode = feat_to_encode.reshape(-1, 1)
encoded_feature = encod.fit_transform(feat_to_encode)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature)], axis=1)
feat_to_encode = data_to_encode[:, 2] == "Caucasian"
feat_to_encode = feat_to_encode.reshape(-1, 1)
encoded_feature = encod.fit_transform(feat_to_encode)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature)], axis=1)
# feature [2] [3] [4] [5] [6] [7] [8] has to be put between 0 and 1
for i in range(3, 10):
encoded_feature = data_to_encode[:, i]
ma = np.amax(encoded_feature)
mi = np.amin(encoded_feature)
encoded_feature = (encoded_feature - mi) / (ma - mi)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature)], axis=1)
feat_to_encode = data_to_encode[:, 10]
feat_to_encode = feat_to_encode.reshape(-1, 1)
encoded_feature = encod.fit_transform(feat_to_encode)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature)], axis=1)
feat_to_encode = data_to_encode[:, 11]
feat_to_encode = feat_to_encode.reshape(-1, 1)
encoded_feature = encod.fit_transform(feat_to_encode)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature)], axis=1)
return df_binary_encoded, Y, S, Y_true
def transform_dataset_census(df):
"""
:param df: the dataset "census income" from a csv file with reduced features, heterogeneous types and missing values, no header
:return: Tuple of the transformed dataset and the labels Y and S
"""
label_encoder = preprocessing.LabelEncoder()
oh_encoder = preprocessing.OneHotEncoder(sparse=False)
##Y_true is the vector containing labels, at this point, labels (initially strings) have been transformed into integer (0 and 1) -> -5000 is now '0' and 5000+ is now '+1'
# Y_true is the true outcome, in this case we're not using a future predictor (vs. compas)
Y_true=[]
#remove examples with missing values
df_replace = df.replace(to_replace="?",value=np.nan)
df_replace.dropna(inplace=True, axis=0)
if df_replace.shape == df.shape:
raise AssertionError("The removal of na values failed")
df_label = df_replace.iloc[:,-1]
Y = label_encoder.fit_transform(df_label)
#remove last column from df
del df_replace[df_replace.columns[-1]]
#S is the protected attribute
# could also be feature 7 (sex) or feature 13 (citizenship)
S=df_replace["sex"]
del df_replace["sex"]
#remove feature fnlwgt
del df_replace["fnlwgt"]
print(df_replace.shape)
#transform other features
#feature age to normalize
# df_replace.reset_index(inplace=True)
encoded_feature = df_replace.to_numpy()[:, 0]
mi = np.amin(encoded_feature)
ma = np.amax(encoded_feature)
encoded_feature = (encoded_feature - mi) / (ma - mi)
#df_binary_encoded is the data frame containing encoded features
df_binary_encoded = pd.DataFrame(encoded_feature)
#feature 1 to 7 (after removal) are categorical
for i in range(1,8):
encod_feature = df_replace.iloc[:,i]
encoded_feature = pd.get_dummies(encod_feature)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature).reset_index(inplace=True)], axis=1)
#feature 8 and 9 are numerical
for i in range(8,10):
encod_feature = df_replace.iloc[:,i]
mi = np.amin(encod_feature)
ma = np.amax(encod_feature)
encoded_feature = (encod_feature - mi) / (ma - mi)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature).reset_index(inplace=True)], axis=1)
#feature 10 and 11 are categorical
for i in range(10,12):
encod_feature = df_replace.iloc[:,i]
encoded_feature = pd.get_dummies(encod_feature)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature).reset_index(inplace=True)], axis=1)
return df_binary_encoded, Y, S, Y_true
def transform_dataset_credit(df):
"""
For more info on the features:
https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
:param df: the dataset "german credit" from a space separated file
:return: Tuple of the transformed dataset and the labels Y and S
"""
label_encoder = preprocessing.LabelEncoder()
oh_encoder = preprocessing.OneHotEncoder(sparse=False)
Y = np.array(df.iloc[:,-1] == 2)
##Y_true is the vector containing labels, at this point, labels (initially strings) have been transformed into integer (0 and 1) -> -5000 is now '0' and 5000+ is now '+1'
#remove last column from df
del df[df.columns[-1]]
# Y_true is the true outcome, in this case we're not using a future predictor (vs. compas)
Y_true=[]
#S is the protected attribute
S=df.iloc[:,12] > 25
#del df["Age"]
#remove examples with missing values
df_replace = df.replace(to_replace="?",value=np.nan)
df_replace.dropna(inplace=True, axis=1)
print(df_replace.shape)
#transform other features
#feature age to normalize
encoded_feature = df_replace.to_numpy()[:, 1]
mi = np.amin(encoded_feature)
ma = np.amax(encoded_feature)
encoded_feature = (encoded_feature - mi) / (ma - mi)
#df_binary_encoded is the data frame containing encoded features
df_binary_encoded = pd.DataFrame(encoded_feature)
# categorical attributes
for i in [0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 18,19]:
encod_feature = df_replace.iloc[:,i]
encoded_feature = pd.get_dummies(encod_feature)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature)], axis=1)
# Numerical attributes
for i in [1, 7, 10, 15, 17]:
encod_feature = df_replace.iloc[:,i]
mi = np.amin(encod_feature)
ma = np.amax(encod_feature)
encoded_feature = (encod_feature - mi) / (ma - mi)
df_binary_encoded = pd.concat([df_binary_encoded, pd.DataFrame(encoded_feature)], axis=1)
print(S)
return df_binary_encoded, Y, S, Y_true
def attack_keras_model(X, Y, S, nb_attack=25, dmax=0.1):
"""
Generates an adversarial attack on a general model.
:param X: Original inputs on which the model is trained
:param Y: Original outputs on which the model is trained
:param S: Original protected attributes on which the model is trained
:return: Adversarial dataset (i.e. new data points + original input)
"""
from secml.data import CDataset
from secml.array import CArray
# secML wants all dimensions to be homogeneous (we had previously float and int in X)
data_set_encoded_secML = CArray(X, dtype=float, copy=True)
data_set_encoded_secML = CDataset(data_set_encoded_secML, Y)
n_tr = round(0.66 * X.shape[0])
n_ts = X.shape[0] - n_tr
logger.debug(X.shape)
logger.debug(n_tr)
logger.debug(n_ts)
from secml.data.splitter import CTrainTestSplit
splitter = CTrainTestSplit(train_size=n_tr, test_size=n_ts)
# Use training set for the classifier and then pick points from an internal test set.
tr_set_secML, ts_set_secML = splitter.split(data_set_encoded_secML)
# tr_set_secML = CDataset(X_train,Y_train)
# ts_set_secML = CDataset(X_test,Y_test)
# Create a surrogate classifier
# Creation of the multiclass classifier
from secml.ml.classifiers import CClassifierSVM
from secml.ml.classifiers.multiclass import CClassifierMulticlassOVA
from secml.ml.kernel import CKernelRBF
clf = CClassifierMulticlassOVA(CClassifierSVM, kernel=CKernelRBF())
# Parameters for the Cross-Validation procedure
xval_params = {'C': [1e-4, 1e-3, 1e-2, 0.1, 1], 'kernel.gamma': [0.01, 0.1, 1, 10, 100, 1e3]}
# Let's create a 3-Fold data splitter
random_state = 999
from secml.data.splitter import CDataSplitterKFold
xval_splitter = CDataSplitterKFold(num_folds=3, random_state=random_state)
# Select and set the best training parameters for the classifier
logger.debug("Estimating the best training parameters...")
best_params = clf.estimate_parameters(
dataset=tr_set_secML,
parameters=xval_params,
splitter=xval_splitter,
metric='accuracy',
perf_evaluator='xval'
)
logger.debug("The best training parameters are: ", best_params)
logger.debug(clf.get_params())
logger.debug(clf.num_classifiers)
# Metric to use for training and performance evaluation
from secml.ml.peval.metrics import CMetricAccuracy
metric = CMetricAccuracy()
# Train the classifier
clf.fit(tr_set_secML)
logger.debug(clf.num_classifiers)
# Compute predictions on a test set
y_pred = clf.predict(ts_set_secML.X)
# Evaluate the accuracy of the classifier
acc = metric.performance_score(y_true=ts_set_secML.Y, y_pred=y_pred)
logger.debug("Accuracy on test set: {:.2%}".format(acc))
# Prepare attack configuration
noise_type = 'l2' # Type of perturbation 'l1' or 'l2'
lb, ub = 0, 1 # Bounds of the attack space. Can be set to `None` for unbounded
y_target = None # None if `error-generic` or a class label for `error-specific`
# Should be chosen depending on the optimization problem
solver_params = {
'eta': 0.1, # grid search resolution
'eta_min': 0.1,
'eta_max': None, # None should be ok
'max_iter': 1000,
'eps': 1e-2 # Tolerance on the stopping crit.
}
# Run attack
from secml.adv.attacks.evasion import CAttackEvasionPGDLS
pgd_ls_attack = CAttackEvasionPGDLS(
classifier=clf,
surrogate_classifier=clf,
surrogate_data=tr_set_secML,
distance=noise_type,
dmax=dmax,
lb=lb, ub=ub,
solver_params=solver_params,
y_target=y_target)
nb_feat = X.shape[1]
result_pts = np.empty([nb_attack, nb_feat])
result_class = np.empty([nb_attack, 1])
# take a point at random being the starting point of the attack and run the attack
import random
for nb_iter in range(0, nb_attack):
rn = random.randint(0, ts_set_secML.num_samples - 1)
x0, y0 = ts_set_secML[rn, :].X, ts_set_secML[rn, :].Y,
try:
y_pred_pgdls, _, adv_ds_pgdls, _ = pgd_ls_attack.run(x0, y0)
adv_pt = adv_ds_pgdls.X.get_data()
# np.asarray([np.asarray(row, dtype=float) for row in y_tr], dtype=float)
result_pts[nb_iter] = adv_pt
result_class[nb_iter] = y_pred_pgdls.get_data()[0]
except ValueError:
logger.warning("value error on {}".format(nb_iter))
return result_pts, result_class, ts_set_secML[:nb_attack, :].Y
if __name__ == '__main__':
df = pd.read_csv(os.path.join("..", "data", "csv", "scikit", "compas_recidive_two_years_sanitize_age_category_jail_time_decile_score.csv"))
df, Y, S = transform_dataset(df)
result = attack_keras_model(df, Y=Y, S=S)
# number of attack for which the classifier gives a different response than y0
#print(np.count_nonzero(result_class != y0))
```
|
{
"source": "jctanner/ansible-tools",
"score": 2
}
|
#### File: ansible-tools/scripts/modules_changelog.py
```python
import os
import sys
import subprocess
from optparse import OptionParser
from pprint import pprint
def run_command(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(so, se) = p.communicate()
return (p.returncode, so, se)
def list_module_files(dirpath):
''' Make a list of current modules in the checkout dir '''
moduledir = os.path.join(dirpath, 'lib/ansible/modules')
cmd = "find %s -type f -name '*.py'" % moduledir
(rc, so, se) = run_command(cmd)
files = [x.strip() for x in so.split('\n') if x.strip()]
return files
def get_commits_for_files(dirpath,
paths=['lib/ansible/modules/core', 'lib/ansible/modules/extras']):
''' Associate a commitid to every file under given paths '''
files = {}
for path in paths:
full_path = os.path.join(dirpath, path)
files[full_path] = {}
# use git to get commits with files added
cmd = "cd %s ; git log --name-status | egrep -e ^commit -e ^'A'" % full_path
(rc, so, se) = run_command(cmd)
lines = [x.strip() for x in so.split('\n') if x.strip()]
commitid = None
for idx,x in enumerate(lines):
if x.strip().startswith('commit '):
commitid = x.split()[1].strip()
if x.strip().startswith('A\t'):
thisfile = x.split('\t',1)[1].strip()
files[full_path][thisfile] = commitid
return files
def branches_for_commit(dirpath, commitid):
''' Return a list of branches for a commitid '''
# http://stackoverflow.com/questions/1419623/how-to-list-branches-that-contain-a-given-commit
# git branch --contains <commit>
#print dirpath, commitid
cmd = "cd %s; git branch -r --contains %s" % (dirpath, commitid)
(rc, so, se) = run_command(cmd)
branches = [x.strip() for x in so.split('\n') if x.strip()]
branches = [x for x in branches if not x.startswith('*')]
#pprint(branches)
return branches
def parse_changelog(dirpath):
''' Convert changlog into a datastructure '''
changelog = {}
changelogfile = os.path.join(dirpath, 'CHANGELOG.md')
fdata = None
with open(changelogfile, 'rb') as f:
fdata = f.read()
lines = [x for x in fdata.split('\n') if x.strip()]
thisversion = None
thissection = None
thismodule = None
thismoduletopic = None
#for idx, line in enumerate(lines[0:55]):
for idx, line in enumerate(lines):
if line.startswith('## '):
thisversion = line.split()[1]
changelog[thisversion] = {}
changelog[thisversion]['newmodules'] = {}
changelog[thisversion]['newmodules']['orphaned'] = []
if line.startswith('####') and line[4] != '#':
thissection = line.replace('####', '').replace(':', '').lower()
###########################################
# MODULES
###########################################
if thissection == 'new modules':
thismodule = None
if line.strip().startswith('-'):
# either a section or a module, depends on next line
if lines[idx+1].strip().startswith('*'):
thismoduletopic = line.replace('-', '', 1).strip()
else:
thismoduletopic = None
thismodule = line.replace('-', '', 1).strip()
elif line.strip().startswith('*'):
thismodule = line.replace('*', '').strip()
if thismodule and not thismoduletopic:
changelog[thisversion]['newmodules']['orphaned'].append(thismodule)
elif thismodule and thismoduletopic:
if not thismoduletopic in changelog[thisversion]['newmodules']:
changelog[thisversion]['newmodules'][thismoduletopic] = []
changelog[thisversion]['newmodules'][thismoduletopic].append(thismodule)
return changelog
def main():
parser = OptionParser()
(options, args) = parser.parse_args()
dirpath = args[0]
# Make a list of all current module files
current_files = list_module_files(dirpath)
# Make a datastructure representing the modules added in each version
# by parsing the current changelog file.
changelog = parse_changelog(dirpath)
# Make a list of ansible versions
ansible_versions = sorted(changelog.keys())
# Mark the last version as the current devel version
ansible_devel_version = ansible_versions[-1]
# Make a list of the commitid for every module file
file_commits = get_commits_for_files(dirpath)
# Iterate each module directory (core|extras)
for k,v in file_commits.iteritems():
# Iterate through each file in the module directory
for kfile,commitid in v.iteritems():
# Skip non-python files
if not kfile.endswith('.py'):
continue
# join the module dir and the filepath
fullpath = os.path.join(k, kfile)
# Make sure it is a directory
if '.' in os.path.basename(fullpath):
fullpath = os.path.dirname(fullpath)
if not os.path.isdir(fullpath) or (os.path.basename(kfile) == '__init__.py'):
continue
branches = branches_for_commit(fullpath, commitid)
# check if commit only in devel and if in changelog
non_devel_branches = [x for x in branches if not 'origin/devel' in x]
if len(non_devel_branches) == 0:
module_name = os.path.basename(kfile).replace('.py', '')
inchangelog = False
for kc,vc in changelog[ansible_devel_version]['newmodules'].iteritems():
for vfile in vc:
if vfile == module_name:
inchangelog = True
break
if not inchangelog:
print "MODULE (%s) IS NOT IN THE %s CHANGELOG!!!" % (module_name, ansible_devel_version)
if __name__ == "__main__":
main()
```
#### File: ansible-tools/scripts/run_ssh_cmd.py
```python
import fcntl
import json
import mock
import os
import re
import subprocess
import time
from datetime import datetime
from optparse import OptionParser
from ansible.compat import selectors
from ansible.errors import AnsibleError
from ansible.plugins.loader import connection_loader
HAS_LOGZERO = False
try:
from logzero import logger
HAS_LOGZERO = True
except ImportError:
pass
class MockLogger(object):
level = 'DEBUG'
def setLevel(self, level):
self.level = level
def info(self, msg):
print(msg)
def error(self, msg):
print(msg)
def debug(self, msg):
if self.level == 'DEBUG':
print(msg)
@staticmethod
def debug(msg, host=None):
print(msg)
@staticmethod
def v(msg, host=None):
print(msg)
@staticmethod
def vv(msg, host=None):
print(msg)
@staticmethod
def vvv(msg, host=None):
print(msg)
@staticmethod
def vvvv(msg, host=None):
print(msg)
@staticmethod
def vvvvv(msg, host=None):
print(msg)
class MockPlayContext(object):
executable = '/bin/sh'
shell = 'sh'
ssh_executable = 'ssh'
port = 22
remote_user = 'vagrant'
password = <PASSWORD>
_load_name = 'ssh'
name = 'ssh'
timeout = 10
verbosity = 5
ssh_args = None
private_key_file = None
prompt = None
become = False
if not HAS_LOGZERO:
print('PLEASE INSTALL LOGZERO FOR BEST EXPERIENCE')
logger = MockLogger()
SSHCMD = [
"/usr/bin/ssh",
"-vvvvvv",
"-C",
"-o",
"ControlMaster=auto",
"-o",
"ControlPersist=60s",
"-o",
"IdentityFile=\"~/.ssh/id_rsa\"",
"-o",
"KbdInteractiveAuthentication=no",
"-o",
"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
"-o",
"PasswordAuthentication=no",
"-o",
"User=vagrant",
"-o",
"ConnectTimeout=10",
"-o",
"ControlPath=~/.ansible/cp/testcp",
"el6host",
"/bin/sh -c 'echo ~vagrant && sleep 0'"
]
def validate_control_socket(SSHCMD):
# $ ssh -O check -o ControlPath=... vagrant@el6host
# Master running (pid=24779)
for idx, x in enumerate(SSHCMD):
if x.startswith('ControlPath'):
cppath = x.split('=')[1]
if not os.path.exists(cppath):
logger.info('%s does not exist' % cppath)
else:
cpcmd = SSHCMD[:-1]
checkcmd = cpcmd[:]
checkcmd.insert(-1, '-O')
checkcmd.insert(-1, 'check')
print('# %s' % ' '.join(checkcmd))
(rc, so, se) = run_ssh_cmd(
' '.join(checkcmd),
use_selectors=False
)
logger.debug('rc: %s' % rc)
logger.debug('so: %s' % so)
logger.debug('se: %s' % se)
if rc != 0 or so.strip():
logger.info('checkcmd rc != 0 or has stdout')
logger.info(so)
logger.info(se)
def set_vcount(SSHCMD, count=None):
if count is None:
return SSHCMD
isset = False
for idx, x in enumerate(SSHCMD):
if x.startswith('-v'):
isset = True
SSHCMD[idx] = '-' + ''.join(['v' for x in range(0, count)])
if not isset:
SSHCMD.insert(1, '-' + ''.join(['v' for x in range(0, count)]))
return SSHCMD
def set_hostname(SSHCMD, hostname):
SSHCMD[-2] = hostname
return SSHCMD
def set_username(SSHCMD, username):
for idx, x in enumerate(SSHCMD):
if x.startswith('User='):
SSHCMD[idx] = 'User=%s' % username
if 'echo ~' in x:
orig = re.search(r'~\w+', x).group()
new = '~%s' % username
SSHCMD[idx] = x.replace(orig, new, 1)
return SSHCMD
def set_keyfile(SSHCMD, keyfile):
# "IdentityFile=\"~/.ssh/id_rsa\"",
for idx, x in enumerate(SSHCMD):
if x.startswith('IdentityFile'):
SSHCMD[idx] = 'IdentityFile="%s"' % keyfile
break
return SSHCMD
def remove_control_persist(SSHCMD):
while True:
if not [x for x in SSHCMD if x.startswith('Control')]:
break
for idx, x in enumerate(SSHCMD):
if x.startswith('Control'):
print('popping %s' % x)
SSHCMD.pop(idx)
SSHCMD.pop(idx-1)
print(' '.join(SSHCMD))
break
return SSHCMD
def extract_speeed_from_stdtout(so):
'''Strip transfer statistics from stderr/stdout'''
# Transferred: sent 3192, received 2816 bytes, in 1.6 seconds
# Bytes per second: sent 1960.0, received 1729.1
data = {}
for line in so.split('\n'):
if 'Transferred' in line:
sent = re.search(r'sent \d+', line).group()
received = re.search(r'received \d+', line).group()
duration = re.search(r'in \d+\.\d+', line).group()
data['transfered'] = {
'sent': float(sent.split()[1]),
'received': float(received.split()[1]),
'duration': float(duration.split()[1]),
}
elif 'Bytes per second' in line:
sent = re.search(r'sent \d+', line).group()
received = re.search(r'received \d+', line).group()
data['speeds'] = {
'sent': float(sent.split()[1]),
'received': float(received.split()[1]),
}
return data
def run_ssh_exec(command=None, hostname=None, username=None, keyfile=None):
'''Use ansible's connection plugin to execute the command'''
with mock.patch('ansible.plugins.connection.ssh.display', MockLogger):
pc = MockPlayContext()
if hostname:
pc.remote_addr = hostname
if username:
pc.remote_user = username
if keyfile:
pc.private_key_file = keyfile
ssh = connection_loader.get('ssh', pc, None)
(rc, so, se) = ssh.exec_command(command)
return (
rc,
so.decode('utf-8'),
se.decode('utf-8')
)
def run_ssh_cmd(SSHCMD, command=None, hostname=None, username=None, use_selectors=False):
'''Run the command with subprocess and communicate or selectors'''
if not use_selectors:
p = subprocess.Popen(
' '.join(SSHCMD),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(so, se) = p.communicate()
return (p.returncode, so.decode('utf-8'), se.decode('utf-8'))
else:
# This is kinda how ansible runs ssh commands ...
logger.info('using selectors ...')
p = subprocess.Popen(
' '.join(SSHCMD),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
for fd in (p.stdout, p.stderr):
fcntl.fcntl(
fd,
fcntl.F_SETFL,
fcntl.fcntl(
fd,
fcntl.F_GETFL
) | os.O_NONBLOCK
)
states = [
'awaiting_prompt',
'awaiting_escalation',
'ready_to_send',
'awaiting_exit'
]
state = states.index('ready_to_send')
state += 1
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
timeout = 0
events = None
b_stdout = b_stderr = b''
b_tmp_stdout = b_tmp_stderr = b''
try:
counter = 0
while True:
counter += 1
if counter == 1:
time.sleep(2)
poll = p.poll()
events = selector.select(timeout)
if not events:
if state <= states.index('awaiting_escalation'):
if poll is not None:
break
p.terminate()
raise AnsibleError('timeout')
for key, event in events:
if key.fileobj == p.stdout:
b_chunk = p.stdout.read()
logger.debug('b_chunk %s' % b_chunk)
if b_chunk == b'':
selector.unregister(p.stdout)
timeout = 1
b_tmp_stdout += b_chunk
elif key.fileobj == p.stderr:
b_chunk = p.stderr.read()
logger.debug('b_chunk %s' % b_chunk)
if b_chunk == b'':
selector.unregister(p.stderr)
b_tmp_stderr += b_chunk
if state < states.index('ready_to_send'):
if b_tmp_stdout:
b_stdout += b_tmp_stdout
if b_tmp_stderr:
b_stderr += b_tmp_stderr
else:
b_stdout += b_tmp_stdout
b_stderr += b_tmp_stderr
b_tmp_stdout = b_tmp_stderr = b''
if states[state] == 'awaiting_prompt':
state += 1
if states[state] == 'awaiting_escalation':
state += 1
if states[state] == 'ready_to_send':
state += 1
if poll is not None:
if not selector.get_map() or not events:
break
timeout = 0
continue
elif not selector.get_map():
p.wait()
break
logger.debug(counter)
logger.debug(state)
logger.debug(states[state])
logger.debug(poll)
logger.debug(selector.get_map())
logger.debug(events)
finally:
selector.close()
return (
p.returncode,
b_stdout.decode('utf-8'),
b_stderr.decode('utf-8')
)
##########################################
# MAIN
##########################################
def main():
global SSHCMD
parser = OptionParser()
parser.add_option('--iterations', type=int, default=10)
parser.add_option('--controlpersist', action='store_true')
parser.add_option('--selectors', action='store_true')
parser.add_option('--use_plugin', action='store_true')
parser.add_option('--vcount', type=int, default=None)
parser.add_option('--debug', action='store_true')
parser.add_option('--hostname', default=None)
parser.add_option('--username', default=None)
parser.add_option('--keyfile', default=None)
parser.add_option('--command', default=None)
(options, args) = parser.parse_args()
if not options.debug:
logger.setLevel('INFO')
# munge the example ssh command if not using the connection plugin
if not options.use_plugin:
validate_control_socket(SSHCMD)
if not options.controlpersist:
SSHCMD = remove_control_persist(SSHCMD)
if options.hostname:
SSHCMD = set_hostname(SSHCMD, options.hostname)
if options.username:
SSHCMD = set_username(SSHCMD, options.username)
if options.keyfile:
SSHCMD = set_keyfile(SSHCMD, options.keyfile)
if options.vcount is not None:
SSHCMD = set_vcount(SSHCMD, count=options.vcount)
if options.command is not None:
SSHCMD[-1] = '/bin/sh -c "%s"' % options.command
logger.info(SSHCMD)
# run the command X times and record the durations + speeds
durations = []
for x in range(0, options.iterations):
logger.info('iteration %s' % x)
start = datetime.now()
if options.use_plugin:
(rc, so, se) = run_ssh_exec(
command=options.command,
hostname=options.hostname,
username=options.username,
keyfile=options.keyfile,
)
else:
(rc, so, se) = run_ssh_cmd(
SSHCMD,
hostname=options.hostname,
username=options.username,
use_selectors=options.selectors
)
stop = datetime.now()
durations.append(stop - start)
stats = extract_speeed_from_stdtout(se)
logger.info('transfer stats ...')
for k, v in stats.items():
for k2, v2 in v.items():
logger.info('%s.%s = %s' % (k, k2, v2))
logger.info('rc: %s' % rc)
logger.info('so:%s' % so.strip())
if rc != 0:
logger.error(se)
logger.error('sshcmd: %s' % ' '.join(SSHCMD))
durations = [x.total_seconds() for x in durations]
logger.info('durations ...')
for idx, x in enumerate(durations):
logger.info('%s. %s' % (idx, x))
logger.info('duration min: %s' % min(durations))
logger.info('duration max: %s' % max(durations))
avg = sum(durations) / float(len(durations))
logger.info('duration avg: %s' % avg)
if __name__ == "__main__":
main()
```
#### File: playbooks/files/log_aggregator.py
```python
import datetime
import os
import json
from flask import Flask
from flask import request
app = Flask(__name__)
fixtures = '/tmp/fixtures'
def get_ts():
ts = datetime.datetime.now().isoformat()
ts = ts.replace('-','_')
ts = ts.replace(':', '_')
ts = ts.replace('.', '_')
return ts
@app.route('/', methods=['GET', 'POST'])
def index():
print('/')
print(request.headers)
print(request.data)
'''
ts = datetime.datetime.now().isoformat()
ts = ts.replace('-','_')
ts = ts.replace(':', '_')
ts = ts.replace('.', '_')
'''
ts = get_ts()
logfile = os.path.join(fixtures, 'entries.log')
if request.data:
logger_name = 'null_logger'
host_name = 'null'
for x in [(request.data, 'data'), (request.headers, 'headers')]:
isjson = False
data = x[0]
if x[1] == 'data':
data = json.loads(data)
else:
data = dict(data)
if 'logger_name' in data:
logger_name = data['logger_name']
if 'host_name' in data:
host_name = data['host_name']
dd = os.path.join(fixtures, logger_name)
if not os.path.isdir(dd):
os.makedirs(dd)
df = os.path.join(dd, '%s_%s.%s.json' % (host_name, ts, x[1]))
with open(df, 'w') as f:
f.write(json.dumps(data, indent=2, sort_keys=True))
f.write('\n')
with open(logfile, 'a') as f:
f.write('%s %s\n' % (datetime.datetime.now().isoformat(), df))
return ''
@app.route('/checkpoint/<path:path>', methods=['GET', 'POST'])
def checkpoint(path):
print(path)
ts = get_ts()
logfile = os.path.join(fixtures, 'entries.log')
with open(logfile, 'a') as f:
f.write('%s checkpoint %s\n' % (datetime.datetime.now().isoformat(), path))
return ''
@app.route('/<path:path>', methods=['GET', 'POST'])
def abstract_path(path):
print(path)
return ''
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
```
|
{
"source": "jctanner/github-test-proxy",
"score": 2
}
|
#### File: github-test-proxy/github_test_proxy/cacher.py
```python
import datetime
import glob
import gzip
import hashlib
import json
import os
import subprocess
import requests
from logzero import logger
#BASEURL = 'http://localhost:5000'
ERROR_TIMER = 0
TOKENS = {
'AAA': '<PASSWORD>'
}
ANSIBLE_PROJECT_ID = u'573f79d02a8192902e20e34b'
SHIPPABLE_URL = u'https://api.shippable.com'
ANSIBLE_PROVIDER_ID = u'562dbd9710c5980d003b0451'
ANSIBLE_RUNS_URL = u'%s/runs?projectIds=%s&isPullRequest=True' % (
SHIPPABLE_URL,
ANSIBLE_PROJECT_ID
)
DEFAULT_ETAG = 'a00049ba79152d03380c34652f2cb612'
# https://elasticread.eng.ansible.com/ansible-issues/_search
# https://elasticread.eng.ansible.com/ansible-pull-requests/_search
# ?q=lucene_syntax_here
# _search accepts POST
########################################################
# MOCK
########################################################
class RequestNotCachedException(Exception):
pass
def get_timestamp():
# 2018-10-15T21:21:48.150184
# 2018-10-10T18:25:49Z
ts = datetime.datetime.now().isoformat()
ts = ts.split('.')[0]
ts += 'Z'
return ts
def run_command(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(so, se) = p.communicate()
return (p.returncode, so, se)
def read_gzip_json(cfile):
try:
with gzip.open(cfile, 'r') as f:
jdata = json.loads(f.read())
except json.decoder.JSONDecodeError as e:
logger.error(e)
import epdb; epdb.st()
return jdata
def write_gzip_json(cfile, data):
with gzip.open(cfile, 'wb') as f:
f.write(json.dumps(data).encode('utf-8'))
class ProxyCacher:
BASEURL = 'http://localhost:5000'
TOKEN = None
SHIPPABLE_TOKEN = None
# make remote calls to github for uncached data
proxy = False
# use local ondisk cache from fixtures+deltas
usecache = False
# where to store and load the data fetched from github
fixturedir = '/tmp/bot.fixtures'
# where to store the new events created by POST
deltadir = '/tmp/bot.deltas'
def __init__(self):
pass
@property
def is_proxy(self):
if self.proxy:
return True
return False
def tokenized_request(
self,
url,
data=None,
method='GET',
headers=None,
pages=None,
paginate=True,
pagecount=0
):
logger.info('(FETCH) [%s] %s' % (method, url))
_headers = {}
if self.TOKEN:
_headers['Authorization'] = 'token %s' % self.TOKEN
# reactions
accepts = [
u'application/json',
u'application/vnd.github.mockingbird-preview',
u'application/vnd.github.sailor-v-preview+json',
u'application/vnd.github.starfox-preview+json',
u'application/vnd.github.v3+json',
u'application/vnd.github.squirrel-girl-preview+json'
]
_headers['Accept'] = ','.join(accepts)
if headers is not None:
for k, v in headers.items():
_headers[k] = v
if method == 'GET':
logger.info('GET %s' % url)
rr = requests.get(url, headers=_headers)
elif method == 'POST':
logger.info('POST %s' % url)
rr = requests.post(url, data=data, headers=_headers)
if rr.headers.get('Status') == '204 No Content':
data = None
else:
try:
data = rr.json()
except json.decoder.JSONDecodeError as e:
logger.error(e)
import epdb; epdb.st()
rheaders = dict(rr.headers)
if not paginate:
return (rheaders, data)
# exit early if enough pages were collected
pagecount += 1
if pages and pagecount >= pages:
return (rheaders, data)
if 'Link' in rheaders:
links = self.extract_header_links(rheaders)
if links.get('next'):
logger.debug('NEXT: %s' % links.get('next'))
(_headers, _data) = self.tokenized_request(links['next'], pagecount=pagecount)
data += _data
return (rheaders, data)
# CACHED PROXY
def cached_tokenized_request(
self,
url,
data=None,
method='GET',
headers=None,
pages=None,
pagecount=0,
context='api.github.com'
):
'''fetch a raw github api url, cache the result, munge it and send it back'''
rdata = None
loaded = False
path = url.replace('https://%s/' % context, '')
path = path.split('/')
if path[-1] != 'graphql':
dtype = path[-1]
path = '/'.join(path[:-1])
fixdir = os.path.join(self.fixturedir, context, path)
else:
fixdir = os.path.join(self.fixturedir, context, 'graphql')
m = hashlib.md5()
m.update(data)
dtype = m.hexdigest()
if self.usecache:
try:
rheaders, rdata = self.read_fixture(fixdir, dtype)
loaded = True
except RequestNotCachedException:
pass
# add new data locally
if method in ['POST', 'UPDATE', 'DELETE'] and path[-1] != 'graphql':
jdata = data
try:
jdata = json.loads(data)
except json.decoder.JSONDecodeError:
pass
self.handle_change(context, url, headers, data, method=method)
#import epdb; epdb.st()
return {}, {}
if not loaded and method == 'GET' and not self.is_proxy:
# issues without labels won't have a labels file, so we have to return empty data
# get the issue data for verification
if url.endswith('/labels'):
iheaders, idata = self.get_cached_issue_data(url=url)
if not idata['labels']:
return {}, []
else:
print('HUH?')
import epdb; epdb.st()
# merge in the deltas
#if loaded and not self.is_proxy and method == 'GET':
# rdata = self.get_changes(context, url, rdata)
if self.usecache:
rdata = self.get_changes(context, url, rdata)
if not loaded and self.is_proxy:
rheaders, rdata = self.tokenized_request(
url,
data=data,
method=method,
headers=headers,
pages=pages,
pagecount=pagecount,
paginate=False
)
if not os.path.exists(fixdir):
os.makedirs(fixdir)
self.write_fixture(fixdir, dtype, rdata, rheaders, compress=True)
loaded = True
if not loaded:
raise Exception(
'%s was not cached and the server is not in proxy mode' % url
)
new_headers = self.replace_data_urls(rheaders)
new_data = self.replace_data_urls(rdata)
logger.debug('returning from cached_tokenized_request')
return new_headers, new_data
def get_cached_issue_data(self, namespace=None, repo=None, number=None, url=None):
# https://api.github.com/repos/ansible/ansible/issues/55062/labels
urlparts = url.split('/')
numix = None
for idx, urlpart in enumerate(urlparts):
if urlpart.isdigit():
numix = idx
break
diskpath = urlparts[2:numix]
fixdir = os.path.join(self.fixturedir, '/'.join(diskpath))
(headers, data) = self.read_fixture(fixdir, urlparts[numix])
return (headers, data)
def get_changes(self, context, url, data):
path = url.replace('https://%s/' % context, '')
path = path.split('/')
if not 'issues' in path and not 'issue' in path and not 'pull' in path and not 'pulls' in path:
return data
numix = None
for idx, _path in enumerate(path):
if _path.isdigit():
numix = idx
break
if numix is None:
import epdb; epdb.st()
inumber = path[numix]
dtype = path[-1]
_path = '/'.join(path[:numix+1])
fixdir = os.path.join(self.deltadir, context, _path)
if not os.path.exists(fixdir):
return data
efile = os.path.join(fixdir, 'events.json')
if not os.path.exists(efile):
return data
with open(efile, 'r') as f:
events = json.loads(f.read())
dtype = None
if url.endswith(inumber):
dtype = 'issue'
elif url.endswith('events'):
dtype = 'events'
elif url.endswith('comments'):
dtype = 'comments'
for event in events:
if dtype == 'events':
data.append(event)
continue
if dtype == 'comments' and event['event'] == 'commented':
data.append(event)
continue
if dtype == 'comments' and event['event'] != 'commented':
continue
if dtype == 'issue':
data['updated_at'] = event['created_at']
if event['event'] == 'labeled':
found = False
for label in data['labels']:
if label['name'] == event['label']['name']:
found = True
break
if not found:
data['labels'].append({'name': event['label']['name']})
elif event['event'] == 'unlabeled':
found = False
for label in data['labels']:
if label['name'] == event['label']['name']:
found = label
break
if found:
data['labels'].remove(found)
elif event['event'] == 'commented':
data['comments'] += 1
#else:
# import epdb; epdb.st()
continue
#import epdb; epdb.st()
#import epdb; epdb.st()
return data
def handle_change(self, context, url, headers, data, method=None):
# GET POST UPDATE DELETE
path = url.replace('https://%s/' % context, '')
path = path.split('/')
jdata = None
try:
jdata = json.loads(data)
except Exception:
pass
if method.lower() == 'delete':
if path[-2] == 'labels':
jdata = [path[-1]]
path = path[:-1]
else:
import epdb; epdb.st()
dtype = path[-1]
_path = '/'.join(path[:-1])
fixdir = os.path.join(self.deltadir, context, _path)
if not os.path.exists(fixdir):
os.makedirs(fixdir)
#fixfile = os.path.join(fixdir, '%s.json' % path[-1])
efile = os.path.join(fixdir, 'events.json')
#ldata = []
#if os.path.exists(fixfile):
# with open(fixfile, 'r') as f:
# ldata = json.loads(f.read())
edata = []
if os.path.exists(efile):
with open(efile, 'r') as f:
edata = json.loads(f.read())
if path[-1] == 'labels':
#jdata = json.loads(data)
if isinstance(jdata, dict) and 'labels' in jdata:
labels = jdata['labels']
else:
labels = jdata[:]
for label in labels:
thisevent = self.get_new_event()
thisevent['actor']['login'] = 'ansibot'
thisevent['actor']['url'] = 'https://api.github.com/users/ansibot'
thisevent['user']['login'] = 'ansibot'
thisevent['user']['url'] = 'https://api.github.com/users/ansibot'
if method.lower() == 'post':
thisevent['event'] = 'labeled'
elif method.lower() == 'delete':
thisevent['event'] = 'unlabeled'
thisevent['label'] = {'name': label}
edata.append(thisevent)
elif path[-1] == 'comments':
#jdata = json.loads(data)
thisevent = self.get_new_event()
thisevent['actor']['login'] = 'ansibot'
thisevent['actor']['url'] = 'https://api.github.com/users/ansibot'
thisevent['user']['login'] = 'ansibot'
thisevent['user']['url'] = 'https://api.github.com/users/ansibot'
thisevent['event'] = 'commented'
thisevent['body'] = jdata['body']
edata.append(thisevent)
else:
import epdb; epdb.st()
with open(efile, 'w') as f:
f.write(json.dumps(edata, indent=2))
def get_new_event(self):
thisevent = {
'id': None,
'node_id': None,
'url': None,
'actor': {
'login': None,
'url': None,
},
'user': {
'login': None,
'url': None
},
'event': None,
'commit_id': None,
'commit_url': None,
'created_at': datetime.datetime.now().isoformat(),
}
return thisevent
def extract_header_links(self, headers):
links = {}
for line in headers['Link'].split(','):
parts = line.split(';')
rel = parts[-1].split('"')[1]
link = parts[0].replace('<', '').replace('>', '').strip()
links[rel] = link
#import epdb; epdb.st()
return links
def fetch_first_issue_number(self, org, repo):
iurl = 'https://api.github.com/repos/%s/%s/issues' % (org, repo)
(issues_headers, issues) = self.tokenized_request(iurl, pages=1)
return issues[0]['number']
def get_issue_fixture(self, org, repo, number, ftype=None):
'''Read the fixture(s) from disk and send them back'''
logger.info('load %s %s %s' % (org, repo, number))
number = int(number)
bd = os.path.join(self.fixturedir, 'repos', org, repo, str(number))
fns = sorted(glob.glob('%s/*' % bd))
fns = [x for x in fns if ftype in os.path.basename(x)]
result = None
headers = None
for fn in fns:
if fn.endswith('.gz'):
data = read_gzip_json(fn)
else:
with open(fn, 'r') as f:
try:
data = json.loads(f.read())
except ValueError as e:
logger.error('unable to parse %s' % fn)
raise Exception(e)
data = self.replace_data_urls(data)
if '.headers' in fn:
headers = data.copy()
else:
result = data.copy()
return headers, result
def replace_data_urls(self, data):
'''Point ALL urls back to this instance instead of the origin'''
data = json.dumps(data)
data = data.replace('https://api.github.com', self.BASEURL)
data = data.replace('https://github.com', self.BASEURL)
data = data.replace('https://api.shippable.com', self.BASEURL)
data = data.replace('https://app.shippable.com', self.BASEURL)
data = json.loads(data)
return data
def read_fixture(self, directory, fixture_type):
hfn = os.path.join(directory, '%s.headers.json' % fixture_type)
if not os.path.exists(hfn):
hfn += '.gz'
if not os.path.exists(hfn):
raise RequestNotCachedException
logger.debug('read %s' % hfn)
headers = read_gzip_json(hfn)
else:
logger.debug('read %s' % hfn)
with open(hfn, 'r') as f:
headers = json.load(f.read())
dfn = os.path.join(directory, '%s.json' % fixture_type)
if not os.path.exists(dfn):
dfn += '.gz'
if not os.path.exists(dfn):
raise RequestNotCachedException
logger.debug('read %s' % dfn)
data = read_gzip_json(dfn)
else:
logger.debug('read %s' % dfn)
with open(dfn, 'r') as f:
data = json.load(f.read())
return headers, data
def write_fixture(self, directory, fixture_type, data, headers, compress=False):
if not os.path.exists(directory):
os.makedirs(directory)
if compress:
hfn = os.path.join(directory, '%s.headers.json.gz' % fixture_type)
write_gzip_json(hfn, headers)
dfn = os.path.join(directory, '%s.json.gz' % fixture_type)
write_gzip_json(dfn, data)
else:
with open(os.path.join(directory, '%s.json' % fixture_type), 'w') as f:
f.write(json.dumps(data, indent=2, sort_keys=True))
with open(os.path.join(directory, '%s.headers.json' % fixture_type), 'w') as f:
f.write(json.dumps(headers, indent=2, sort_keys=True))
```
#### File: github-test-proxy/github_test_proxy/webapp.py
```python
import argparse
import datetime
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import requests
import six
import subprocess
import time
from logzero import logger
from pprint import pprint
from flask import Flask
from flask import jsonify
from flask import request
from github_test_proxy.cacher import ProxyCacher
GM = ProxyCacher()
#app = Flask(__name__)
app = Flask('test')
########################################################
# ROUTES
########################################################
@app.route('/rate_limit')
def rate_limit():
reset = int(time.time()) + 10
rl = {
'resources': {
'core': {
'limit': 5000,
'remaining': 5000,
'reset': reset
}
},
'rate': {
'limit': 5000,
'remaining': 5000,
'reset': reset
}
}
return jsonify(rl)
@app.route('/<path:path>', methods=['GET', 'POST', 'DELETE', 'UPDATE'])
def abstract_path(path):
# 127.0.0.1 - - [12/Apr/2019 13:54:04] "GET /repos/ansible/ansible/git/commits/6a7ba80b421da4e3fe70badb67c1164e6ea5d75e HTTP/1.1" 200 -
# 127.0.0.1 - - [12/Apr/2019 13:54:06] "DELETE /repos/ansible/ansible/issues/55055/labels/needs_revision HTTP/1.1" 405 -
logger.info('# ABSTRACT PATH! - %s' % path)
path_parts = path.split('/')
logger.info(six.text_type((len(path_parts),path_parts)))
logger.info(request.path)
# context defines the baseurl
thiscontext = None
if path_parts[0] in ['jobs', 'runs']:
thiscontext = 'api.shippable.com'
else:
thiscontext = 'api.github.com'
# tell the mocker what the real url should be
thisurl = request.url.replace(
'http://localhost:5000',
'https://%s' % thiscontext
)
thisurl = request.url.replace(
'http://localhost:6000',
'https://%s' % thiscontext
)
logger.debug('thisurl: %s' % thisurl)
headers, data = GM.cached_tokenized_request(
thisurl,
method=request.method.upper(),
data=request.data,
context=thiscontext
)
logger.info('finished cached_tokenized_request')
pprint(data)
resp = jsonify(data)
whitelist = ['ETag', 'Link']
for k,v in headers.items():
if not k.startswith('X-') and k not in whitelist:
continue
resp.headers.set(k, v)
logger.debug('response data: %s' % data)
#pprint(dict(resp.headers))
return resp
def main():
action_choices = [
'load', # use fixtures but do not make requests
'proxy', # make requests and cache results
'smart', # use fixtures when possible
]
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=action_choices,
help="which mode to run the proxy in")
parser.add_argument('--port', default=5000, type=int)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--token', '--github_token', default=None)
parser.add_argument('--shippable_token', default=None)
parser.add_argument('--fixtures', '--fixturedir',
default='/tmp/github.proxy/fixtures',
help="where the fixtures are stored and loaded from")
parser.add_argument('--deltas', '--deltadir',
default='/tmp/github/deltas',
help="where to store changes from POST data")
args = parser.parse_args()
GM.deltadir = os.path.expanduser(args.deltas)
GM.fixturedir = os.path.expanduser(args.fixtures)
if args.action == 'proxy':
GM.proxy = True
GM.usecache = False
GM.TOKEN = args.token
GM.SHIPPABLE_TOKEN = args.shippable_token
elif args.action == 'smart':
GM.proxy = True
GM.usecache = True
GM.TOKEN = args.token
GM.SHIPPABLE_TOKEN = args.shippable_token
else:
GM.proxy = False
GM.usecache = True
GM.writedeltas = True
GM.BASEURL = 'http://localhost:%s' % args.port
app.run(debug=args.debug, host='0.0.0.0', port=args.port)
if __name__ == "__main__":
main()
```
|
{
"source": "jctanner/mockme",
"score": 3
}
|
#### File: jctanner/mockme/test_foo.py
```python
import unittest
from mock import patch
from lib.foo import foodoo
class TestFoo(unittest.TestCase):
def test_foodoo(self):
assert foodoo() == None
# make this pass!
def test_foodoo_withpatch(self):
assert foodoo() == "gotmilk?"
```
|
{
"source": "jctanner/odp-sandbox",
"score": 2
}
|
#### File: ansible/ambari_scripts/post-repo-url.py
```python
import json
import os
import requests
import shlex
import socket
import sys
# https://cwiki.apache.org/confluence/display/AMBARI/Blueprints#Blueprints-Step4:SetupStackRepositories%28Optional%29
'''
PUT /api/v1/stacks/:stack/versions/:stackVersion/operating_systems/:osType/repositories/:repoId
{
"Repositories" : {
"base_url" : "<CUSTOM_REPO_BASE_URL>",
"verify_base_url" : true
}
}
'''
'''
[vagrant@sandbox ~]$ curl -u admin:admin http://$(hostname -f):8080/api/v1/stacks/OD/versions/0.9/operating_systems/redhat7
{
"href" : "http://sandbox.odp.org:8080/api/v1/stacks/ODP/versions/0.9/operating_systems/redhat7",
"OperatingSystems" : {
"os_type" : "redhat7",
"stack_name" : "ODP",
"stack_version" : "0.9"
},
"repositories" : [
{
"href" : "http://sandbox.odp.org:8080/api/v1/stacks/ODP/versions/0.9/operating_systems/redhat7/repositories/ODP-0.9",
"Repositories" : {
"os_type" : "redhat7",
"repo_id" : "ODP-0.9",
"stack_name" : "ODP",
"stack_version" : "0.9"
}
},
{
"href" : "http://sandbox.odp.org:8080/api/v1/stacks/ODP/versions/0.9/operating_systems/redhat7/repositories/ODP-UTILS-1.1.0.20",
"Repositories" : {
"os_type" : "redhat7",
"repo_id" : "ODP-UTILS-1.1.0.20",
"stack_name" : "ODP",
"stack_version" : "0.9"
}
}
]
'''
def post_repo(stackname, stackversion, ostype, repoid, repourl):
# ODP 0.9 redhat7 ODP-0.9 http://repo.opendataplatform.org/repository/ODP/centos7/2.x/BUILDS/0.9.0.1-70
data = {
"Repositories" : {
"base_url" : "%s" % repourl,
"verify_base_url" : True
}
}
hostname = socket.gethostname()
headers = {'X-Requested-By': 'FOOBAR'}
baseurl = "http://%s:8080/api/v1" % (hostname)
baseurl += "/stacks/%s/versions/%s/operating_systems/%s/repositories/%s" % (stackname, stackversion, ostype, repoid)
print "# PUT --> %s" % baseurl
r = requests.put(baseurl, auth=('admin', 'admin'), data=json.dumps(data), headers=headers)
print "# %s" % r.status_code
for x in r.text.split('\n'):
print "# %s" % x
if __name__ == "__main__":
# ODP 0.9 redhat7 ODP-0.9 http://repo.opendataplatform.org/repository/ODP/centos7/2.x/BUILDS/0.9.0.1-70
# ODP 0.9 redhat7 ODP-UTILS-1.1.0.20 http://repo.opendataplatform.org/repository/ODP-UTILS-1.1.0.20/repos/centos7
print sys.argv
assert len(sys.argv) >= 2, "Usage: <SCRIPT> <stackname> <stackversion> <ostype> <repoid> <repourl>"
stackname = sys.argv[1]
stackversion = sys.argv[2]
ostype = sys.argv[3]
repoid = sys.argv[4]
repourl = sys.argv[5]
post_repo(stackname, stackversion, ostype, repoid, repourl)
```
#### File: ansible/ambari_scripts/stop_services.py
```python
import json
import os
import requests
import shlex
import socket
import sys
import time
from api_common import get_services
from api_common import poll_request
def stop_services(cluster_name):
services = get_services(cluster_name, output=False)
headers = {'X-Requested-By': 'AMBARI'}
payload = {"ServiceInfo": {"state" : "INSTALLED"}}
for x in services:
# skip services without rest endpoints
if not 'href' in x:
continue
thispayload = {'RequestInfo': {'context': 'Stop %s from API' % x['ServiceInfo']['service_name']},
'Body': payload}
print x
r = requests.put(x['href'], auth=('admin', 'admin'),
data=json.dumps(thispayload), headers=headers)
print "# %s" % r.status_code
print "# %s" % r.text
# null text means the service is already running
if not r.text:
continue
rdict = json.loads(r.text)
#"href" : "http://sandbox.odp.org:8080/api/v1/clusters/ODP_Sandbox/requests/7"
#import pdb; pdb.set_trace()
print "# Polling %s shutdown" % x['ServiceInfo']['service_name']
poll_request(rdict['href'])
if __name__ == "__main__":
assert len(sys.argv) >= 1, "Usage: <scriptname> <cluster-name>"
clustername = sys.argv[1]
stop_services(clustername)
```
|
{
"source": "jctanner/odp-scripts",
"score": 3
}
|
#### File: odp-scripts/jartools/jardumper.py
```python
import json
import os
import sys
import subprocess
import tempfile
def run_command(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(so, se) = p.communicate()
return (p.returncode, so, se)
def which(cmd):
''' Get the path for a command '''
cmd = "which %s" % cmd
(rc, so, se) = run_command(cmd)
return so.strip()
def listjarcontents(jarfile):
# jar tf ~/jars/commons-io-2.4.jar
jarfiles = []
jarcmd = which('jar')
thiscmd = "%s tf %s" % (jarcmd, jarfile)
(rc, so, se) = run_command(thiscmd)
jarfiles = so.split('\n')
jarfiles = [x.strip() for x in jarfiles if x.strip()]
return jarfiles
def processjar(jarfile):
classes = {}
javap = which('javap')
# list files
jarfiles = listjarcontents(jarfile)
for jf in jarfiles:
if not jf.endswith('.class'):
continue
print jf
thiscmd = javap + ' -classpath ' + jarfile
thiscmd += ' ' + jf.replace('.class', '')
(rc, so, se) = run_command(thiscmd)
classes[jf] = so
#import pdb; pdb.set_trace()
#import pdb; pdb.set_trace()
return classes
def main():
print "hello world"
print sys.argv
jarA = sys.argv[1]
classes = processjar(jarA)
outfile = os.path.basename(jarA)
outfile = outfile.replace('.jar', '.data')
with open(outfile, 'wb') as f:
f.write(json.dumps(classes,indent=2))
if __name__ == "__main__":
main()
```
|
{
"source": "jctanner/python-examples",
"score": 3
}
|
#### File: jctanner/python-examples/multiprocessing_subprocess.py
```python
import os
import sys
import subprocess
from multiprocessing import Process, Queue
def run_command_live(args, cwd=None, shell=True, checkrc=False, workerid=None):
""" Show realtime output for a subprocess """
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
shell=shell)
pid = p.pid
so = ""
se = ""
while p.poll() is None:
lo = p.stdout.readline() # This blocks until it receives a newline.
sys.stdout.write('worker[' + str(workerid) + '] (' + str(pid) + ') ' + lo)
so += lo
print p.stdout.read()
return (p.returncode, so, "", pid)
def mp_worker(input, output, options):
""" A worker is forked per command """
for command in iter(input.get, 'STOP'):
thispid = os.getpid()
print "worker[%s] --> command: %s" % (thispid, command)
(rc, so, se, pid) = run_command_live(command, workerid=thispid)
rdict = {
'command': command,
'rc': rc,
'so': so,
'se': se,
'pid': pid
}
output.put(rdict)
def mp_processor(commands, options={}):
""" Spawn processes for each command in a list and return the results """
NUMBER_OF_PROCESSES = len(commands)
# Create queues
task_queue = Queue()
done_queue = Queue()
# Add each command to the queue
for command in commands:
task_queue.put(command)
# Fork the processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=mp_worker, args=(task_queue, done_queue, options)).start()
# Collect results
results = []
for i in range(NUMBER_OF_PROCESSES):
results.append(done_queue.get())
# End the queue
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
return results
if __name__ == "__main__":
cmd1 = "whoami"
cmd2 = "uname -a"
cmd3 = "last | head"
cmd4 = "for x in $(seq 1 10); do echo $x; sleep 1; done;"
cmd5 = "for x in $(seq 1 10); do echo $x; sleep 2; done;"
cmd6 = "for x in $(seq 1 10); do echo $x; sleep 3; done;"
commands = [cmd1, cmd2, cmd3, cmd4, cmd5, cmd6]
rdata = mp_processor(commands, options={})
```
|
{
"source": "JCTec/CourseTaker",
"score": 2
}
|
#### File: CourseTaker/question/serializers.py
```python
from rest_framework import serializers
from .models import *
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = ('id', 'title', 'description', 'course')
class AnswersSerializer(serializers.ModelSerializer):
class Meta:
model = Answers
fields = '__all__'
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
depth = 1
fields = '__all__'
class QuestionListSerializer(serializers.ModelSerializer):
class Meta:
model = Question
depth = 1
fields = '__all__'
def to_representation(self, obj):
serializers = QuestionSerializer(obj)
answers_object = Answers.objects.filter(question=obj)
answers = AnswersSerializer(answers_object, many=True)
response = serializers.data
response['answers'] = answers.data
return response
class AnswersSerializerStudent(serializers.ModelSerializer):
class Meta:
model = Answers
fields = ('id', 'value')
class QuestionSerializerStudent(serializers.ModelSerializer):
class Meta:
model = Question
depth = 1
fields = ('id', 'value', 'score', 'lesson')
class QuestionListSerializerStudent(serializers.ModelSerializer):
class Meta:
model = Question
depth = 1
fields = '__all__'
def to_representation(self, obj):
serializers = QuestionSerializerStudent(obj)
answers_object = Answers.objects.filter(question=obj)
answers = AnswersSerializerStudent(answers_object, many=True)
response = serializers.data
response['answers'] = answers.data
return response
```
#### File: CourseTaker/student/views.py
```python
from django.views.decorators.csrf import csrf_exempt
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK
)
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from django.db.models import Subquery
from django.db.models import Q
from .models import *
from course.models import Course
from course.serializers import CourseSerializer
from question.models import Lesson
from question.serializers import LessonSerializer
from question.models import Question
from question.serializers import QuestionListSerializerStudent
from question.models import Answers
from question.models import LogQuestionUser
@csrf_exempt
@api_view(('GET',))
def courses(request):
user = request.user
courses_log = FinishedCourses.objects.filter(user=user.id)
last_log = courses_log.last()
if last_log is not None:
course_objects = Course.objects.filter(id=Subquery(courses_log.values('course')))
last_cours = Course.objects.get(pk=last_log.course.id)
courses_data = CourseSerializer(course_objects, many=True).data
next_courses_data = None
if last_cours.next is not None:
next_courses_data = CourseSerializer(last_cours.next).data
response = {'taken': courses_data, 'next': next_courses_data}
return Response(response, status=HTTP_200_OK, content_type="application/json")
else:
next = Course.objects.filter(prev=None).first()
courses_data = CourseSerializer(next).data
response = {'next': courses_data}
return Response(response, status=HTTP_200_OK, content_type="application/json")
@csrf_exempt
@api_view(('GET',))
def lessons(request, course):
user = request.user
course_object = get_object_or_404(Course, pk=course)
prev = course_object.prev
if prev is not None:
courses_log = FinishedCourses.objects.filter(user=user.id, course=prev.id).first()
if courses_log is not None:
lessons = Lesson.objects.filter(course=course_object.id)
lessons_data = LessonSerializer(lessons, many=True).data
return Response(lessons_data, status=HTTP_200_OK, content_type="application/json")
else:
return Response({'Error': 'Blocked Course'}, status=HTTP_200_OK, content_type="application/json")
else:
lessons = Lesson.objects.filter(course=course_object.id)
lessons_data = LessonSerializer(lessons, many=True).data
return Response(lessons_data, status=HTTP_200_OK, content_type="application/json")
@csrf_exempt
@api_view(('GET',))
def questions(request, lesson):
user = request.user
lesson_object = get_object_or_404(Lesson, pk=lesson)
prev = lesson_object.prev
if prev is not None:
lessons_log = LogScoreUser.objects.filter(user=user.id, lesson=prev.id).first()
if lessons_log is not None:
questions = Question.objects.filter(lesson=lesson_object.id)
question_data = QuestionListSerializerStudent(questions, many=True).data
return Response(question_data, status=HTTP_200_OK, content_type="application/json")
else:
return Response({'Error': 'Blocked Lesson'}, status=HTTP_200_OK, content_type="application/json")
else:
questions = Question.objects.filter(lesson=lesson_object.id)
question_data = QuestionListSerializerStudent(questions, many=True).data
return Response(question_data, status=HTTP_200_OK, content_type="application/json")
@csrf_exempt
@api_view(('POST',))
def answer(request, lesson):
user = request.user
lesson_object = get_object_or_404(Lesson, pk=lesson)
prev = lesson_object.prev
def check_questions(response):
count = 0
print(response)
for question in response:
question_object = get_object_or_404(Question, pk=question['question'])
answers = Answers.objects.filter(question=question_object, correct=True).values('id')
if question_object.type == Question.A or question_object.type == Question.B or question_object.type == Question.D:
correct = False
array_to_compare = [item['id'] for item in answers]
if array_to_compare == question['correct']:
count += question_object.score
correct = True
loging = LogQuestionUser(user=user, question=question_object, lesson=lesson_object,
correct=correct, points=question_object.score)
loging.save()
elif question_object.type == Question.C:
correct = False
for item in question['correct']:
if item in answers:
count += question_object.score
correct = True
break
loging = LogQuestionUser(user=user, question=question_object, lesson=lesson_object,
correct=correct, points=question_object.score)
loging.save()
else:
return 0
return count
if prev is not None:
lessons_log = LogScoreUser.objects.filter(user=user.id, lesson=prev.id).first()
if lessons_log is not None:
response = request.data.get("response")
score = check_questions(response)
log = LogScoreUser(user=user, lesson=lesson_object, points=score)
log.save()
if lesson_object.next is None:
fin = FinishedCourses(user=user, course=lesson_object.course)
fin.save()
data = lesson_object.get_score(user)
return Response(data, status=HTTP_200_OK, content_type="application/json")
else:
return Response({'Error': 'Blocked Lesson'}, status=HTTP_200_OK, content_type="application/json")
else:
response = request.data.get("response")
score = check_questions(response)
log = LogScoreUser(user=user, lesson=lesson_object, points=score)
log.save()
if lesson_object.next is None:
fin = FinishedCourses(user=user, course=lesson_object.course)
fin.save()
data = lesson_object.get_score(user)
return Response(data, status=HTTP_200_OK, content_type="application/json")
```
|
{
"source": "JCTec/Recomender",
"score": 2
}
|
#### File: api/recomend/views.py
```python
import json
from common.ContentKNN import ContentKNN
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.conf import settings
from rest_framework.permissions import IsAuthenticated
from .models import *
from .serializers import *
class QuestionAPI(APIView):
def get(self, request):
try:
courses = Question.objects.all()
serializer = QuestionSerializer(courses, many=True)
return Response(dict(status="Success", data=serializer.data))
except Exception as e:
return Response(dict(status="Error", errors=['Not found', str(e)]), status.HTTP_500_INTERNAL_SERVER_ERROR)
class AnswersAPI(APIView):
def post(self, request):
try:
data = json.loads(request.body.decode('utf-8'))
answers = data["quiz"]
history = History.objects.all()
historic = [(json.loads(item.value), item.result.name) for item in history]
# TODO: Fit with historic
questions = Question.objects.filter(id__in=answers)
tags = {questions.get(pk=key).tag.name: val for key, val in answers.items()}
knn = ContentKNN()
# knn.fit()
prediction = knn.predict(tags)
try:
History(value=json.dumps(tags),
result=Subject.objects.filter(name=prediction[0]["name"]).first()).save()
except Exception as e:
print(e)
return Response(dict(status="Success", data=prediction))
except Exception as e:
return Response(dict(status="Error", errors=['Not found', str(e)]), status.HTTP_500_INTERNAL_SERVER_ERROR)
```
#### File: Recomender/common/ContentKNN.py
```python
from surprise import AlgoBase
from surprise import PredictionImpossible
from sklearn.linear_model import LogisticRegression
from Carreras import Carreras
import pandas as pd
import json
import math
import numpy as np
import heapq
class ContentKNN(AlgoBase):
def __init__(self, k=5, sim_options={}):
AlgoBase.__init__(self)
self.k = k
def fit(self, trainset):
AlgoBase.fit(self,trainset)
#Modelo de regresion Logit
datax = Carreras()
datax.loadCarreras()
habilidades = datax.getHabilidades()
train_df = pd.DataFrame(columns=datax.getHabilidadesList())
train_labels = []
for data in trainset:
train_df = train_df.append(data[0], ignore_index=True)
train_labels.append(datax.getCarreraID(data[1]))
#train_df.to_html('prueba.html')
train_df = train_df.dropna(axis=1)
print(train_df.shape)
self.model = LogisticRegression(multi_class='multinomial',solver='newton-cg')
self.model.fit(train_df,train_labels)
def computeHabilidadesSimilarity(self, carrera1, carrera2, habilidades):
habilidades1 = habilidades[carrera1]
habilidades2 = habilidades[carrera2]
sumxx, sumxy, sumyy = 0, 0, 0
for i in range(len(habilidades1)):
x = habilidades1[i]
y = habilidades2[i]
sumxx += x * x
sumyy += y * y
sumxy += x * y
return sumxy/math.sqrt(sumxx*sumyy)
def calculateSimHabilidades(self, carrera1, u, habilidades,habilidadesDict):
habilidades1 = habilidades[carrera1]
habilidades2 = u
sumxx, sumxy, sumyy = 0, 0, 0
for habilidad in habilidades2:
index = habilidadesDict[habilidad]
x = habilidades1[index]
y = 1 if habilidades2[habilidad] else 0
sumxx += x * x
sumyy += y * y
sumxy += x * y
if sumxx*sumyy == 0:
return 0
else:
return sumxy/math.sqrt(sumxx*sumyy)
def predict(self, array_usuarios, limit=3):
neighbors = []
data = Carreras()
data.loadCarreras()
habilidades = data.getHabilidades()
habilidades_dict = data.getHabiliadesdDict()
valuesToPredict = []
print(array_usuarios)
for habilidad in habilidades_dict:
if (habilidad in array_usuarios) and array_usuarios[habilidad]:
valuesToPredict.append(1)
else:
valuesToPredict.append(0)
# valuesToPredict = valuesToPredict[:len(valuesToPredict)-14]
# valuesToPredict = np.array(valuesToPredict).reshape(1,22)
valuesToPredict = np.array(valuesToPredict).reshape(1,len(list(habilidades_dict.keys())))
print(valuesToPredict.shape)
#print(self.model.predict_proba(valuesToPredict))
#print("Carrera recomendada por LOGIT model: " + data.getCarreraName(self.model.predict(valuesToPredict)[0]))
for carrera in range(1, len(habilidades) + 1):
user_x_carrera_similarity = self.calculateSimHabilidades(carrera,
array_usuarios,
habilidades,
habilidades_dict)
neighbors.append((user_x_carrera_similarity, carrera))
neighbors.sort(reverse=True)
k_neighbors = neighbors
k_neighbors = k_neighbors[:int(limit)]
predictedByLogit = self.model.predict(valuesToPredict)[0]
probabilities = self.model.predict_proba(valuesToPredict)
probabilities = probabilities.tolist()
probabilities.sort(reverse=True)
#print(predictedByLogit)
#print(probabilities[0][0])
copia_k_neighbors = []
# for i in range(len(k_neighbors)):
# if k_neighbors[i][1] == predictedByLogit:
# copia_k_neighbors[i][0] = k_neighbors[i][0] + (k_neighbors[i][0] + probabilities[0][0])/3
return [{'name': data.getCarreraName(item[1]), 'percentage': item[0]} for item in k_neighbors]
```
|
{
"source": "JCThomas4214/ad2dispatch",
"score": 2
}
|
#### File: ad2dispatch/events/views.py
```python
from __future__ import print_function
import json
import urllib
from datetime import timedelta, datetime
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.shortcuts import render, redirect
from django.utils import timezone
from django.conf import settings
from pages.models import get_top_pages
from .models import Event, EventVolunteer, VolunteerType, do_unvolunteer, \
has_upcoming_vol, get_volunteers, get_running_events, position_is_full
from .forms import EventManageDateSelectForm
def upcoming_list(request):
upcoming_events = \
Event.objects.filter(
Q(date_time__lt=timezone.now() + timedelta(days=30),
date_time__gt=timezone.now()) |
Q(list_date__lte=timezone.now()),
date_time__gt=timezone.now()).order_by('date_time')
top_pages = get_top_pages()
context = {
'events': upcoming_events,
'top_pages': top_pages,
}
if request.method == 'POST':
# Verify user is logged in if volunteering
if not request.user.is_authenticated:
return redirect('/accounts/login/?next=%s' % request.path)
from userprofiles.models import Volunteer
try:
if not Volunteer.objects.get(user=request.user).is_populated():
context['flash_type'] = 'warning'
context['flash_message'] = \
'''You must first populate your
<a href="/accounts/profile/">profile</a>.'''
elif not Volunteer.objects.get(user=request.user).accepted_waiver:
context['flash_type'] = 'warning'
context['flash_message'] = \
'''Before volunteering you must accept the waiver in your
<a href="/accounts/profile/">profile</a>.'''
else:
# Parse and verify type
data = json.loads(
urllib.parse.unquote(
request.body.decode()
.split('&')[1]
.split('=')[0]
)
)
submitted_event = data['event']
if not isinstance(submitted_event, int):
raise TypeError
submitted_type = data['type']
if not isinstance(submitted_type, int):
raise TypeError
# Verify selected option is one that was presented
presented = False
for upcoming_event in upcoming_events:
if upcoming_event.pk == submitted_event:
presented = True
break
if not presented:
raise Exception('Option unavailable')
# Handle volunteer/unvolunteer
vol_event = EventVolunteer(
volunteer=request.user,
event=Event.objects.get(pk=submitted_event),
type=VolunteerType.objects.get(pk=submitted_type))
if not vol_event.has_volunteered():
if position_is_full(submitted_event, submitted_type):
context['flash_type'] = 'warning'
context['flash_message'] = \
'This position is already filled.'
else:
if vol_event.event.date_time.date() + timedelta(days=-1) == timezone.localdate():
context['flash_type'] = 'warning'
context['flash_message'] = \
'''If you volunteer on the same day as a shift,
please call or text the on-call phone:
<a href="{}">
{}</a>.'''.format(
settings.ORG_PHONE_ALT,
settings.ORG_PHONE_ALT_DISPLAY
)
vol_event.save()
else:
do_unvolunteer(vol_event)
except Volunteer.DoesNotExist:
context['flash_type'] = 'warning'
context['flash_message'] = \
'''You must first populate your
<a href="/accounts/profile/">profile</a>.'''
for event in upcoming_events:
if request.user.has_perm('events.view_hidden_events_volunteertype'):
event.type = VolunteerType.objects.all()
else:
event.type = VolunteerType.objects.filter(hidden=False)
for voltype in event.type:
voltype.volnum = Event.num_volunteers_type(event, voltype=voltype)
if request.user.is_authenticated:
if EventVolunteer.objects.filter(
volunteer=request.user,
event=event,
type=voltype).count() > 0:
voltype.me = True
if request.user.is_authenticated:
volunteer = {
'is_driver': has_upcoming_vol(
user=request.user, type='driver'),
'is_dispatcher': has_upcoming_vol(
user=request.user, type='dispatcher'),
}
context['volunteer'] = volunteer
return render(request, 'upcoming_list.html', context)
@login_required
def driver(request):
instructions = VolunteerType.objects.get(type='Driver').instructions
top_pages = get_top_pages()
volunteer = {
'is_driver': has_upcoming_vol(
user=request.user, type='driver'),
'is_dispatcher': has_upcoming_vol(
user=request.user, type='dispatcher'),
}
context = {
'instructions': instructions,
'top_pages': top_pages,
'volunteer': volunteer,
}
return render(request, 'driver.html', context)
@login_required
def dispatcher(request):
instructions = VolunteerType.objects.get(type='Dispatcher').instructions
top_pages = get_top_pages()
volunteer = {
'is_driver': has_upcoming_vol(
user=request.user, type='driver'),
'is_dispatcher': has_upcoming_vol(
user=request.user, type='dispatcher'),
}
volunteers = get_volunteers(get_running_events())
context = {
'instructions': instructions,
'volunteers': volunteers,
'top_pages': top_pages,
'volunteer': volunteer,
}
return render(request, 'dispatcher.html', context)
@login_required
def manage(request, start_date=None, end_date=None):
context = {}
if not request.user.has_perm('events.change_eventvolunteer'):
from django.core.exceptions import PermissionDenied
raise PermissionDenied
try:
start_date = datetime.strptime(start_date, "%d%b%y")
end_date = datetime.strptime(end_date, "%d%b%y")
except (ValueError, TypeError) as vtex:
start_date = None
end_date = None
print("Invalid date recieved:\n{}".format(vtex))
if start_date is None or end_date is None:
start_date = timezone.now() - timedelta(days=30)
end_date = timezone.now()
if request.method == 'POST':
filter_form = EventManageDateSelectForm(request.POST)
if filter_form.is_valid():
try:
start_date = datetime.strptime(
filter_form.cleaned_data['start_date'], "%d%b%y")
end_date = datetime.strptime(
filter_form.cleaned_data['end_date'], "%d%b%y")
except (ValueError, TypeError) as vtex:
print("Invalid date recieved:\n{}".format(vtex))
return redirect(
"/volunteer/manage/{}/{}".format(
start_date.strftime("%d%b%y"),
end_date.strftime("%d%b%y"))
)
else:
context['flash_type'] = 'warning'
context['flash_message'] = 'Could not parse dates'
else:
filter_form = EventManageDateSelectForm(initial={
'start_date': start_date.strftime("%d%b%y"),
'end_date': end_date.strftime("%d%b%y")
})
upcoming_events = Event.objects.filter(
Q(date_time__lt=end_date + timedelta(days=1),
date_time__gt=start_date)
).order_by('date_time')[:50]
# This is stupid-inefficient
for event in upcoming_events:
event.type = VolunteerType.objects.all()
for voltype in event.type:
voltype.vol = EventVolunteer.objects.filter(
event=event, type=voltype)
# RIP performance
for vol in voltype.vol:
vol.profile = vol.get_profile()
voltype.volnum = Event.num_volunteers_type(event, voltype=voltype)
# if request.user.is_authenticated:
# if EventVolunteer.objects.filter(
# volunteer=request.user,
# event=event,
# type=voltype).count() > 0:
# voltype.me = True
top_pages = get_top_pages()
context['events'] = upcoming_events
context['top_pages'] = top_pages
context['form'] = filter_form
return render(request, 'manage.html', context)
@login_required
def event(request, event_id):
if not request.user.has_perm('events.change_eventvolunteer'):
from django.core.exceptions import PermissionDenied
raise PermissionDenied
try:
selected = Event.objects.filter(id=event_id)
volunteers = get_volunteers(selected)
except Event.DoesNotExist:
from django.http import Http404
raise Http404("Event does not exist.")
top_pages = get_top_pages()
context = {
'top_pages': top_pages,
'selected': selected,
'volunteers': volunteers,
}
return render(request, 'event.html', context)
```
#### File: ad2dispatch/userprofiles/models.py
```python
import functools
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from events.models import EventVolunteer
class Volunteer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
service = models.CharField(max_length=24, null=True,
blank=True,
default='USAF')
rank = models.CharField(max_length=16, null=True, blank=True)
phone_number = models.CharField(max_length=14, null=True)
phone_number_2 = models.CharField(max_length=14, null=True, blank=True)
vehicle_desc = models.CharField(max_length=128, null=True)
ready = models.BooleanField(default=False, editable=True,
help_text='If driver has marked themselves ' +
'as \'ready\' for the event duration.')
dispatched = models.BooleanField(default=False, editable=False)
council_pos = models.CharField(max_length=24, null=True, blank=True)
sup_name = models.CharField(max_length=64, null=True)
sup_phone = models.CharField(max_length=14, null=True)
location = models.DecimalField(max_digits=9, decimal_places=6,
null=True,
blank=True)
accepted_waiver = models.BooleanField(default=False)
@property
def hours_total(self):
hours = None
try:
hours = functools \
.reduce(lambda x, y: x+y,
[e_vols.event.duration.seconds // 3600 for e_vols in
EventVolunteer.objects.filter(volunteer=self.user)
if e_vols.event.date_time <= timezone.now()])
except TypeError as terr:
pass # Empty set
return hours
# @property
# def hours_yearly(self):
# return [e_vols.event.duration for e_vols in
# EventVolunteer.objects.get(volunteer=self)
# if e_vols.date_time > #the start date of the current year]
def is_populated(self):
if all([self.user, self.phone_number, self.vehicle_desc,
self.sup_name, self.sup_phone]):
return True
else:
return False
def is_ready(self):
if self.ready and not self.dispatched:
return True
return False
```
|
{
"source": "JCThomas4214/docker_secure_network",
"score": 2
}
|
#### File: JCThomas4214/docker_secure_network/setup.py
```python
import os
import sys
import argparse
import socket
import fcntl
import struct
import getpass
import configparser
from subprocess import run, Popen, PIPE
from dotenv import load_dotenv
from collections import OrderedDict
host_iface = None
class MultiOrderedDict(OrderedDict):
def __setitem__(self, key, value):
if isinstance(value, list) and key in self:
self[key].extend(value)
else:
super().__setitem__(key, value)
def restart_wireguard():
'''
Restarts the wireguard container
'''
with Popen(['docker-compose', 'ps'], stdout=PIPE, stderr=PIPE, stdin=PIPE) as pub:
running_containers = pub.communicate()
if 'wireguard' in str(running_containers):
run(['docker-compose', 'stop', 'wireguard'])
run(['docker-compose', 'start', 'wireguard'])
def update_services():
'''
Takes down containers, pulls the latest images, and brings them back up
'''
run(['docker-compose', 'down'])
run(['docker-compose', 'pull'])
run(['docker-compose', 'up', '-d'])
def next_ip_addr(ip_addrs:list=[], first_three_octets:list=['10','200','200'], forth_octet:int=2):
'''
This only increments the last octet and does not account for anything else
'''
if len(ip_addrs) > 0:
first_three_octets = ip_addrs[0].split('.')[:-1]
forth_octet = int(ip_addrs.pop(0).split('.')[-1:][0]) + 1
for ip_addr in ip_addrs:
curr_forth_octet = ip_addr.split('.')[-1:][0]
if curr_forth_octet == str(forth_octet):
forth_octet += 1
first_three_octets.append(str(forth_octet))
return '.'.join(first_three_octets)
def get_ip_address(ifname: str):
'''
Gets the ip address from a interface on the host
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915,
struct.pack('256s', ifname[:15])
)[20:24])
def get_wireguard_public_key(private_key: bytearray):
'''
Generates a public key for wireguard and returns it
'''
with Popen(['wg', 'pubkey'], stdout=PIPE, stderr=PIPE, stdin=PIPE) as pub:
public_key = pub.communicate(input=private_key)[0].strip()
return public_key
def get_wireguard_keys():
'''
Generates a private key for wireguard and returns it
'''
with Popen(['wg', 'genkey'], stdout=PIPE, stderr=PIPE) as priv:
private_key = priv.communicate()[0].strip()
public_key = get_wireguard_public_key(private_key)
return dict(public_key=public_key, private_key=private_key)
def list_peers(iface: str):
'''
Wireguard keeps all peers listed in the interface config file. This function will read
this file for the interface passed in and print out all peers to stdout
'''
server_conf = configparser.RawConfigParser(dict_type=MultiOrderedDict, strict=False, empty_lines_in_values=False)
server_conf.read([f'etc-wireguard/{iface}.conf'])
publickeys = server_conf.get('Peer', 'PublicKey').split(os.linesep)
allowedips = server_conf.get('Peer', 'AllowedIPs').split(os.linesep)
print(f'\nThe following peers are allowed on interface {iface}\n')
for i, key in enumerate(publickeys):
print('[Peer]')
print(f'PublicKey = {key}')
print(f'AllowedIPs = {allowedips[i]}\n')
def delete_peer(iface: str, public_keys: list):
'''
Wireguard keeps all peers listed in the interface config file. This function will take read
this file for the interface passed in and remove all public keys listed in with the --delete-peer
flag.
'''
load_dotenv()
new_config = configparser.ConfigParser()
new_config.optionxform = str
server_conf = configparser.RawConfigParser(dict_type=MultiOrderedDict, strict=False, empty_lines_in_values=False)
server_conf.read([f'etc-wireguard/{iface}.conf'])
try:
publickeys = server_conf.get('Peer', 'PublicKey').split(os.linesep)
allowedips = server_conf.get('Peer', 'AllowedIPs').split(os.linesep)
except configparser.NoSectionError as e:
sys.exit(e)
# itterate all public keys and delete
for public_key in public_keys:
try:
i = publickeys.index(public_key)
except ValueError:
sys.exit('The public key was not found.')
del publickeys[i]
del allowedips[i]
try:
new_config['Interface'] = {
'PrivateKey': server_conf.get('Interface', 'PrivateKey'),
'Address': server_conf.get('Interface', 'Address'),
'ListenPort': os.getenv('WG_PORT'),
'PostUp': server_conf.get('Interface', 'PostUp'),
'PostDown': server_conf.get('Interface', 'PostDown')
}
except configparser.NoOptionError as e:
sys.exit(e)
with open(f'etc-wireguard/{iface}.conf', 'w') as new_conf_file:
new_config.write(new_conf_file)
for i, key in enumerate(publickeys):
with open(f'etc-wireguard/{iface}.conf', 'a') as new_conf_file:
new_config = configparser.ConfigParser()
new_config.optionxform = str
new_config['Peer'] = {
'PublicKey': key,
'AllowedIPs': allowedips[i]
}
new_config.write(new_conf_file)
restart_wireguard()
def create_peer_conf(iface:str, name: str):
'''
Create a client wireguard configuration file
'''
config = configparser.ConfigParser()
config.optionxform = str
keys = get_wireguard_keys()
private_key = keys['private_key']
server_conf = configparser.RawConfigParser(dict_type=MultiOrderedDict, strict=False, empty_lines_in_values=False)
server_conf.read([f'etc-wireguard/{iface}.conf'])
try:
allowedips = server_conf.get('Peer', 'AllowedIPs').split(os.linesep)
next_addr = next_ip_addr(allowedips)
except configparser.NoSectionError:
next_addr = next_ip_addr()
config['Interface'] = {
'PrivateKey': private_key.decode(),
'Address': input(f' - Input tunnel interface IP for {name} (default {next_addr}): ') or next_addr,
'DNS': server_conf['Interface']['Address'].replace('/24', '')
}
config['Peer'] = {
'PublicKey': get_wireguard_public_key(server_conf['Interface']['PrivateKey'].encode()).decode(),
'Endpoint': f'{os.getenv("PublicIP")}:{os.getenv("WG_PORT")}',
'AllowedIPs': '0.0.0.0/0, ::/0'
}
with open(f'{iface}-cli-{name}.conf', 'w') as conf_file:
config.write(conf_file)
return config
def add_wireguard_peer(iface: str, names: list):
'''
Creates a Peer section in the wireguard interface config file and creates a client
wireguard config file.
'''
load_dotenv()
config = configparser.ConfigParser()
config.optionxform = str
for name in names:
peer_conf = create_peer_conf(iface, name)
config['Peer'] = {
'PublicKey': get_wireguard_public_key(peer_conf['Interface']['PrivateKey'].encode()).decode(),
'AllowedIPs': peer_conf['Interface']['Address']
}
try:
with open(f'etc-wireguard/{iface}.conf', 'a') as conf_file:
config.write(conf_file)
except:
sys.exit('This interface config file does not exist.')
restart_wireguard()
def create_wireguard_conf():
'''
Create interface wireguard configuration file
'''
global host_iface
config = configparser.ConfigParser()
config.optionxform = str
wg_iface = input(' - WireGuard interface name? (default wg0) ') or 'wg0'
keys = get_wireguard_keys()
private_key = keys['private_key']
config['Interface'] = {
'PrivateKey': private_key.decode(),
'Address': input(' - WireGuard interface tunnel IPv4 address? (default 10.200.200.1/24) ') or '10.200.200.1/24',
'ListenPort': input(' - WireGuard interface port? (default 51280) ') or '51820',
'PostUp': f'iptables -A FORWARD -i {wg_iface} -j ACCEPT; iptables -t nat -A POSTROUTING -o {host_iface} -j MASQUERADE; ip6tables -A FORWARD -i {wg_iface} -j ACCEPT; ip6tables -t nat -A POSTROUTING -o {host_iface} -j MASQUERADE',
'PostDown': f'iptables -D FORWARD -i {wg_iface} -j ACCEPT; iptables -t nat -D POSTROUTING -o {host_iface} -j MASQUERADE; ip6tables -D FORWARD -i {wg_iface} -j ACCEPT; ip6tables -t nat -D POSTROUTING -o {host_iface} -j MASQUERADE'
}
os.makedirs(os.path.dirname(f'etc-wireguard/{wg_iface}.conf'), exist_ok=True)
with open(f'etc-wireguard/{wg_iface}.conf', 'w') as conf_file:
config.write(conf_file)
# run(['sudo', 'chown', '-v', 'root:root', f'etc-wireguard/{wg_iface}.conf'])
run(['sudo', 'chmod', '660', f'etc-wireguard/{wg_iface}.conf'])
with open('.env', 'a+') as env_file:
env_str = (
f'INTERFACE={host_iface}\n'
F'WG_PORT={config["Interface"]["ListenPort"]}\n\n'
)
env_file.write(env_str)
def create_env_file():
'''
Creates the .env file used with docker-compose to stage container environment vars
'''
global host_iface
tmp = getpass.getpass(prompt=' - Pihole Web Password: ', stream=None)
webpass = tmp if tmp == getpass.getpass(prompt=' - Verify Pihole Web Password: ', stream=None) else sys.exit('Passwords do not match!')
pub_address = input(' - Input your public IP address: ') or sys.exit('Public IP is needed for WireGuard.')
try:
host_iface = input(' - Host interface name: ')
except:
sys.exit('The interface does not exist!')
host_ipv4 = get_ip_address(host_iface.encode())
with open('.env', 'w') as env_file:
env_str = (
f'WEBPASSWORD={<PASSWORD>'
f'PublicIP={pub_address}\n'
f'ServerIP={host_ipv4}\n'
# f'ServerIPv6={host_ipv6}\n'
'IPv6=False\n'
f'TZ=America/Chicago\n'
f'DNS1=127.0.0.1#5053\n'
f'DNS2=127.0.0.1#5054\n'
f'DNSMASQ_USER=pihole\n'
f'DNSMASQ_LISTENING=local\n'
)
env_file.write(env_str)
def setup():
create_env_file()
# Create the WireGuard config file and pass wg port to env file
create_wireguard_conf()
def main():
parser = argparse.ArgumentParser(
description='Script to setup your containers and manage WireGuard', epilog='NOTE: start with \'./setup.py -i\' to stage initial settings')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-i', '--initialize', help='initialize .env file with pihole and WireGuard settings',
action="store_true")
group.add_argument('-u', '--update', help='bring down containers / update containers / bring containers back up ',
action="store_true")
group.add_argument('-a', '--add-peer', dest='add', nargs='+', metavar=('WG_INTERFACE', 'PEER'),
help='add WireGuard peer to your instance (outputs [WG_INTERFACE]-cli-[PEER].conf) the WireGuard container will restart automatically')
group.add_argument('-d', '--delete-peer', dest='delete', nargs='+', metavar=('WG_INTERFACE', 'PUBLIC_KEY'),
help='delete WireGuard peers with the interface and PublicKeys listed with --list-peers the WireGuard container will restart automatically')
group.add_argument('-l', '--list-peers', dest='list', nargs=1, metavar='WG_INTERFACE',
help='list all WireGuard peers on specified interface')
parsed_args = parser.parse_args()
try:
if parsed_args.initialize:
setup()
elif parsed_args.add:
iface = parsed_args.add.pop(0)
add_wireguard_peer(iface, parsed_args.add)
elif parsed_args.delete:
iface = parsed_args.delete.pop(0)
delete_peer(iface, parsed_args.delete)
elif parsed_args.list:
list_peers(parsed_args.list[0])
elif parsed_args.update:
update_services()
except KeyboardInterrupt:
print('\n\n\tGoodbye!\n')
if __name__ == "__main__":
main()
```
|
{
"source": "jcthomassie/euler",
"score": 3
}
|
#### File: euler/euler/problem_27.py
```python
import functools
import numpy as np
from .utils import prime_mask, print_result
def func(n: int, a: int, b: int) -> int:
return n ** 2 + a * n + b
@functools.lru_cache
def primes() -> np.ndarray:
return prime_mask(func(1000, 1000, 1000))
@functools.lru_cache()
def depth(a: int, b: int) -> int:
"""Return the number of consecutive N that produce a prime for func(n, a, b)."""
n = 0
while primes()[func(n, a, b)]:
n += 1
return n
@print_result
def solve() -> int:
d_max = 0
best = None
for b in range(-999, 1001, 2):
# B must be prime to satisfy f(n=0)
if not primes()[b]:
continue
for a in range(-999, 1000):
if depth(a, b) > d_max:
best = (a, b)
d_max = depth(a, b)
if best is None:
raise RuntimeError("Failed to find solution")
return best[0] * best[1]
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_34.py
```python
from .problem_24 import factorial
from .utils import print_result
def get_upper_bound() -> int:
"""
An easy upper bound for the largest number that is the sum of the factorial
of its digits can be found using the following logic:
let B be the bound, and N be the number of digits in B...
9! * N < B
The bound can then be improved to:
9! * (N - 1)
"""
digits = 2
while 10 ** (digits - 1) < factorial(9) * digits:
digits += 1
return factorial(9) * (digits - 1)
@print_result
def solve() -> int:
digit_factorials = {f"{d}": factorial(d) for d in range(10)}
total = 0
for n in range(10, get_upper_bound()):
if n == sum(digit_factorials[d] for d in f"{n}"):
total += n
return total
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_35.py
```python
from typing import Iterator
from .utils import prime_mask, print_result
MAX = 1_000_000
def rotations(word: str) -> Iterator[str]:
"""Generate all rotations of the input word."""
for i in range(1, len(word)):
yield word[i:] + word[:i]
@print_result
def solve() -> int:
primes = prime_mask(MAX)
count = 1 # include 2
for n in range(3, MAX, 2):
# Check number
if not primes[n]:
continue
# Check rotations
for rot in rotations(f"{n}"):
if not primes[int(rot)]:
break
else:
count += 1
return count
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_37.py
```python
from typing import Iterator
from .utils import prime_mask, print_result
MAX = 750_000
def truncations(word: str) -> Iterator[str]:
"""Generate all truncations of the input word."""
for i in range(1, len(word)):
yield word[i:] # left truncation
yield word[:-i] # right truncation
@print_result
def solve() -> int:
results: list[int] = []
primes = prime_mask(MAX)
for n in range(11, MAX, 2):
# Check number
if not primes[n]:
continue
# Check truncations
for trunc in truncations(f"{n}"):
if not primes[int(trunc)]:
break
else:
results.append(n)
if len(results) == 11:
return sum(results)
raise RuntimeError("Failed to find solution")
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_38.py
```python
import itertools
from .utils import print_result
def concatenated_product(n_str: str) -> bool:
"""Return True if the input number is a 'concatenated product'."""
# Check all prefixes
for i in range(1, len(n_str) // 2 + 1):
seed = int(n_str[:i])
term = 1
rhs = n_str[i:]
while rhs:
term += 1
lhs = f"{term * seed}"
if rhs.startswith(lhs):
rhs = rhs[len(lhs) :]
else:
break
else:
return True
return False
@print_result
def solve() -> int:
for perm in itertools.permutations("987654321"):
pandigital = "".join(perm)
if concatenated_product(pandigital):
return int(pandigital)
raise ValueError("Failed to find solution")
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_44.py
```python
import math
from .problem_45 import generate_pentagonals
from .utils import print_result
def is_pentagonal(p: int) -> int:
"""
P = n * (3n - 1) / 2
If P is pentagonal, the above equation will have a positive integer solution
for n. We use the quadratic formula to check if either solution for n is a
positive integer
"""
root = math.sqrt(24 * p + 1)
return root.is_integer() and ((1 + root) / 6).is_integer()
@print_result
def solve() -> int:
pentagonals: list[int] = []
for p_j in generate_pentagonals(1):
for p_k in pentagonals:
if is_pentagonal(p_j + p_k) and is_pentagonal(p_j - p_k):
return p_j - p_k
pentagonals.append(p_j)
raise RuntimeError("Failed to find solution")
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_49.py
```python
from collections import defaultdict
from typing import Iterator
from .utils import prime_mask, print_result
def four_digit_primes() -> Iterator[int]:
"""Generate all four-digit prime numbers."""
primes = prime_mask(9999)
for n in range(1001, 9999, 2):
if primes[n]:
yield n
@print_result
def solve() -> int:
# Find all permutation groups
perms = defaultdict(list)
for prime in four_digit_primes():
digits = tuple(sorted(f"{prime}"))
perms[digits].append(prime)
# Drop example group
del perms[tuple("1478")]
# Find evenly spaced 3-group
for group in perms.values():
if len(group) < 3:
continue
# Check all 3-groups
for i, a in enumerate(group, start=1):
for j, b in enumerate(group[i:], start=1):
for c in group[i + j :]:
# Evenly spaced
if (b - a) == (c - b):
return int(f"{a}{b}{c}")
raise RuntimeError("Failed to find solution")
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_4.py
```python
from .utils import print_result
def is_palindrome(s: str) -> bool:
"""Return True if input string is palindrome."""
return s == s[::-1]
@print_result
def solve() -> int:
res = 0
for n in range(999, 100, -1):
for m in range(n, 100, -1):
prod = m * n
if is_palindrome(f"{prod}"):
res = max(res, prod)
return res
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_54.py
```python
from __future__ import annotations
from collections import Counter
from pathlib import Path
from typing import Iterator, Optional
from . import DATA_DIR
from .utils import print_result
class Card:
"""Playing card representation.
Allows ordering operations between cards, and instantiation from a string
representation.
"""
# Suit values
suits = {char: val for val, char in enumerate("CDHS")}
suits_inv = {val: char for char, val in suits.items()}
# Face values
faces = {char: val for val, char in enumerate("23456789TJQKA")}
faces_inv = {val: char for char, val in faces.items()}
__slots__ = ("face_value", "suit_value")
def __init__(self, face_value: int, suit_value: int) -> None:
self.face_value = face_value
self.suit_value = suit_value
def __eq__(self, other: object) -> bool:
if not isinstance(other, Card):
return NotImplemented
return (
self.face_value == other.face_value and self.suit_value == other.suit_value
)
def __gt__(self, other: object) -> bool:
if other is None:
return True
if not isinstance(other, Card):
return NotImplemented
return (
self.face_value > other.face_value
or self.face_value == other.face_value
and self.suit_value > other.suit_value
)
def __lt__(self, other: object) -> bool:
if other is None:
return False
if not isinstance(other, Card):
return NotImplemented
return (
self.face_value < other.face_value
or self.face_value == other.face_value
and self.suit_value < other.suit_value
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}" f"({self.face_value}, {self.suit_value})"
def __str__(self) -> str:
return f"{self.faces_inv[self.face_value]}" f"{self.suits_inv[self.suit_value]}"
@classmethod
def from_str(cls, string: str) -> Card:
return cls(cls.faces[string[0]], cls.suits[string[-1]])
class Hand:
"""Poker hand representation.
Contains a list of 5 Card objects. Allows ordering operations between hands.
"""
_faces_descending = range(len(Card.faces))[::-1]
_suits_descending = range(len(Card.suits))[::-1]
__slots__ = ("cards", "_face_counts", "_suit_counts")
def __init__(self, *cards: Card) -> None:
# Cards are sorted in descending order; max is always first card
self.cards = sorted(cards, reverse=True)
self._face_counts: Optional[Counter[int]] = None
self._suit_counts: Optional[Counter[int]] = None
@property
def face_counts(self) -> Counter[int]:
if self._face_counts is None:
self._face_counts = Counter(c.face_value for c in self.cards)
return self._face_counts
@property
def suit_counts(self) -> Counter[int]:
if self._suit_counts is None:
self._suit_counts = Counter(c.suit_value for c in self.cards)
return self._suit_counts
def get_where(
self, face: Optional[int] = None, suit: Optional[int] = None
) -> Iterator[Card]:
"""Get cards that match the input face and suit.
Yields:
Matches in descending order.
"""
for card in self.cards:
if (face is None or card.face_value == face) and (
suit is None or card.suit_value == suit
):
yield card
def eval_high(self) -> Card:
return self.cards[0]
def eval_pair(self) -> Optional[Card]:
for face in self._faces_descending:
if self.face_counts[face] == 2:
return next(self.get_where(face=face))
return None
def eval_two_pair(self) -> Optional[Card]:
counts = list(self.face_counts.values())
if counts.count(2) >= 2:
return self.eval_pair()
return None
def eval_three_of_a_kind(self) -> Optional[Card]:
for face in self._faces_descending:
if self.face_counts[face] == 3:
return next(self.get_where(face=face))
return None
def eval_straight(self) -> Optional[Card]:
faces = sorted((c.face_value for c in self.cards), reverse=True)
for a, b in zip(faces, faces[1:]):
if b != (a - 1):
return None
return self.cards[0]
def eval_flush(self) -> Optional[Card]:
for suit in self._suits_descending:
if self.suit_counts[suit] == 5:
return self.cards[0]
return None
def eval_full_house(self) -> Optional[Card]:
counts = self.face_counts.values()
if 3 in counts and 2 in counts:
return self.eval_three_of_a_kind()
return None
def eval_four_of_a_kind(self) -> Optional[Card]:
for face in self._faces_descending:
if self.face_counts[face] == 4:
return next(self.get_where(face=face))
return None
def eval_straight_flush(self) -> Optional[Card]:
if self.eval_straight() and self.eval_flush():
return self.cards[0]
return None
def eval_royal_flush(self) -> Optional[Card]:
if self.eval_straight_flush():
if self.cards[0].face_value == Card.faces["A"]:
return self.cards[0]
return None
hierarchy = [
eval_royal_flush,
eval_straight_flush,
eval_four_of_a_kind,
eval_full_house,
eval_flush,
eval_straight,
eval_three_of_a_kind,
eval_two_pair,
eval_pair,
eval_high,
]
def get_best(self) -> str:
"""Get best card set from hand."""
for method in self.hierarchy:
if method(self) is not None:
return method.__name__.lstrip("eval_")
raise RuntimeError("Failed to get best card set")
def __gt__(self, other: object) -> bool:
if not isinstance(other, Hand):
return NotImplemented
for method in self.hierarchy:
res_s = method(self)
res_o = method(other)
if res_s is None:
if res_o is None:
continue
return False
if res_s > res_o:
return True
if res_s < res_o:
return False
return False
def __bool__(self) -> bool:
return bool(self.cards)
def __str__(self) -> str:
return f"{self.__class__.__name__}" f"({' '.join(str(c) for c in self.cards)})"
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}" f"({', '.join(repr(c) for c in self.cards)})"
)
def scrape_data(path: Path) -> Iterator[tuple[Hand, Hand]]:
"""Scrape card data from text file and load it into two Hands.
Yields:
Pair of hands for one round.
"""
with path.open() as h:
for line in h:
cards = [Card.from_str(card_str) for card_str in line.strip().split()]
yield Hand(*cards[:5]), Hand(*cards[5:])
@print_result
def solve() -> int:
scores = [0, 0]
for p1_hand, p2_hand in scrape_data(DATA_DIR / "p054_poker.txt"):
if p1_hand > p2_hand:
scores[0] += 1
else:
scores[1] += 1
return scores[0]
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_67.py
```python
from pathlib import Path
from . import DATA_DIR
from .problem_18 import max_path_sum
from .utils import print_result
def scrape_pyramid(path: Path) -> list[list[int]]:
"""Scrape pyramid from text file into nested list of integers."""
with path.open() as h:
pyramid = []
for line in h:
pyramid.append([int(node) for node in line.strip().split()])
return pyramid
@print_result
def solve() -> int:
return max_path_sum(scrape_pyramid(DATA_DIR / "p067_triangle.txt"))
if __name__ == "__main__":
solve()
```
#### File: euler/euler/problem_75.py
```python
import collections
import math
from .utils import generate_triples, print_result
@print_result
def solve() -> int:
length = 1_500_000
perims = collections.Counter(
(sum(sides) for sides in generate_triples(math.ceil(length / 2)))
)
return sum(1 for p, count in perims.items() if p <= length and count == 1)
if __name__ == "__main__":
solve()
```
#### File: euler/tests/test_problem_33.py
```python
from euler.problem_33 import solve
from .utils import validate_solution
def test_solution() -> None:
validate_solution(solve, answer=100)
```
|
{
"source": "jctincan/tsadm-webapp",
"score": 2
}
|
#### File: tsadm-webapp/libexec/jobq.xinetd.py
```python
import sys
import re
import os
import os.path
import subprocess
BASE_DIR = '/opt/tsadmdev'
sys.path.insert(0, os.path.join(BASE_DIR, 'libexec'))
sys.path.insert(0, BASE_DIR)
import tsadm.config as tsadm_conf
import tsadm.log
import runner
at_cmd = '/usr/bin/at'
runbg_cmd = os.path.join(BASE_DIR, 'libexec', 'jobq.runbg.py')
re_job_id = re.compile(r'^[a-f0-9]+$')
resp_headers = {
'ERROR': '500 INTERNAL ERROR\n',
'BADREQ': '400 BAD REQUEST\n',
'BADCMD': '401 BAD COMMAND\n',
'NOTCMD': '402 COMMAND NOT FOUND\n',
'BADJID': '403 BAD JOB ID\n',
'NOSITE': '404 SITE NOT FOUND\n',
'OK': '200 OK'
}
def _exit(status):
tsadm.log.dbg('END')
tsadm.log.log_close()
sys.exit(status)
def _exit_badreq(req_line):
tsadm.log.err('bad request: ', req_line)
print(resp_headers['BADREQ'])
_exit(1)
# --- start log
tsadm.log.log_open(tsadm_conf.get('JOBQ_SYSLOG_TAG', 'tsadmdev-jobqd'))
tsadm.log.dbg('START')
tsadm.log.dbg('sys.path: ', sys.path)
tsadm.log.dbg('os.environ: ', os.environ)
# --- read request
req_line = sys.stdin.readline().strip()
tsadm.log.dbg('req_line: ', req_line)
line_items = req_line.split(' ')
try:
req = line_items[0]
req_args = line_items[1:]
except IndexError:
tsadm.log.dbg('bad args')
_exit_badreq(req_line)
tsadm.log.dbg('req: ', req)
tsadm.log.dbg('req_args: ', req_args)
# --- check request
if req != '.run' and req != '.runbg' or len(req_args) < 1:
_exit_badreq(req_line)
# --- run requested job
if req == '.run':
# -- get args
try:
sname = req_args[0]
senv = req_args[1]
cmd_name = req_args[2]
except IndexError:
_exit_badreq(req_line)
try:
cmd_args = req_args[3:]
except:
cmd_args = []
# -- cd to site's env home
if not runner.chdir(sname, senv):
print(resp_headers['NOSITE'])
_exit(1)
# -- check cmd name
if not runner.check_cmd_name(cmd_name):
print(resp_headers['BADCMD'])
_exit(1)
# -- check cmd path
cmd_path = runner.cmd_path(BASE_DIR, cmd_name)
if cmd_path is None:
print(resp_headers['BADCMD'])
_exit(1)
# -- run command
cmd_rtrn, cmd_out = runner.run(cmd_path, cmd_args)
tsadm.log.dbg('cmd_rtrn: ', cmd_rtrn)
print(resp_headers['OK'])
print('CMD-RTRN:', cmd_rtrn)
print()
if cmd_out is None:
tsadm.log.wrn('cmd_name: ', cmd_name, ' - cmd_out: ', cmd_out)
else:
for l in cmd_out.readlines():
print(l.decode('utf-8', 'replace'), end='')
cmd_out.close()
tsadm.log.inf('{}:{}'.format(cmd_name, cmd_rtrn))
_exit(0)
elif req == '.runbg':
if not os.path.exists(at_cmd) or not os.access(at_cmd, os.X_OK):
tsadm.log.err(at_cmd, ': not found or executable')
print(resp_headers['ERROR'])
_exit(1)
if not os.path.exists(runbg_cmd) or not os.access(runbg_cmd, os.X_OK):
tsadm.log.err(runbg_cmd, ': not found or executable')
print(resp_headers['ERROR'])
_exit(1)
job_id = req_args[0]
if not re_job_id.match(job_id):
tsadm.log.err('bad jobid: ', job_id)
print(resp_headers['BADJID'])
_exit(1)
at_input = '{} --job-id={}'.format(runbg_cmd, job_id)
at = subprocess.Popen([at_cmd, 'now'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
at_out, at_err = at.communicate(at_input.encode('utf-8', 'replace'))
at_rtrn = at.wait()
at_out = at_out.decode('utf-8', 'replace').replace('warning: commands will be executed using /bin/sh', '')
except subprocess.TimeoutExpired as e:
tsdam.log.err('at comm: ', e)
at_out = 'at comm failed'
at_rtrn = 128
print(resp_headers['OK'])
print('CMD-RTRN:', at_rtrn)
print()
print('START:', job_id, sep='')
print(at_out)
tsadm.log.inf('{}[{}]: runbg'.format(job_id, at_rtrn))
_exit(0)
tsadm.log.err('end of program reached!')
print(resp_headers['ERROR'])
_exit(128)
```
#### File: tsadm-webapp/tsadmcli/site.py
```python
import argparse
import tsadm.db
from tsadm import log
parser = argparse.ArgumentParser()
db = tsadm.db.TSAdmDB()
def new():
parser.add_argument('name', help="site name")
parser.add_argument('--child-of', metavar='parent_name', help="name of the parent site", default="")
args = parser.parse_args()
sid = db.site_id(args.name)
if sid != 0:
print("ERROR: a site called '{}' already exists: {}".format(args.name, sid))
return 1
parent_id = 0
if args.child_of != "":
parent_id = db.site_id(args.child_of)
if parent_id == 0:
print("ERROR: parent site not found:", args.child_of)
return 2
db.site_add(args.name, parent_id)
log.inf("site '", args.name, "' was created")
return 0
def list():
for sinfo in db.site_all():
print(sinfo[0], sinfo[1])
return 0
def remove():
parser.add_argument('name', help="site name")
args = parser.parse_args()
site_id = db.site_id(args.name)
if site_id == 0:
print("ERROR: site does not exists: {}".format(args.name))
return 1
site_envs = db.siteenv_all(site_id)
if site_envs:
print("ERROR: remove associated environments first!")
return 2
# TODO: check if it has child sites before to remove
db.site_remove(site_id)
log.inf("site removed: {} {}".format(site_id, args.name))
return 0
```
#### File: tsadm-webapp/tsadmcli/slave.py
```python
import argparse
import tsadm.db
from tsadm import log
parser = argparse.ArgumentParser()
db = tsadm.db.TSAdmDB()
def list():
for slv in db.slave_all():
print(slv['id'], slv['fqdn'])
return 0
def new():
parser.add_argument('fqdn', help="host fqdn")
args = parser.parse_args()
for slv in db.slave_all():
if slv['fqdn'] == args.fqdn:
print("ERROR: slave already exists:", args.fqdn)
return 1
db.slave_add(args.fqdn)
log.inf("new slave added: ", args.fqdn)
return 0
def remove():
parser.add_argument('fqdn', help="host fqdn")
args = parser.parse_args()
slave = None
for slv in db.slave_all():
if slv['fqdn'] == args.fqdn:
slave = slv
break
if slave is None:
print("ERROR: slave not found:", args.fqdn)
return 1
envs = db.slave_envs(slave['id'])
if envs:
print("ERROR: remove associated site envs first")
return 2
db.slave_remove(args.fqdn)
log.inf("slave removed: ", args.fqdn)
return 0
```
#### File: tsadm/db/config.py
```python
import tsadm.config
from mysql.connector.constants import ClientFlag
class Config(object):
HOST = tsadm.config.get('DB_HOST', 'localhost')
DATABASE = tsadm.config.get('DB_NAME', 'tsadmdb')
USER = tsadm.config.get('DB_USER', 'tsadm')
PASSWORD = tsadm.<PASSWORD>.get('DB_PASS', None)
PORT = tsadm.config.get('DB_HOST_PORT', 3306)
CHARSET = tsadm.config.get('DB_CHARSET', 'utf8')
UNICODE = True
WARNINGS = True
TIMEOUT = tsadm.config.get('DB_HOST_TIMEOUT', 7)
@classmethod
def dbinfo(self):
return {
'host': self.HOST,
'database': self.DATABASE,
'user': self.USER,
'password': <PASSWORD>,
'charset': self.CHARSET,
'use_unicode': self.UNICODE,
'get_warnings': self.WARNINGS,
'raise_on_warnings': self.WARNINGS,
'port': self.PORT,
'connection_timeout': self.TIMEOUT
}
```
#### File: tsadm/jobq/__init__.py
```python
import hashlib
import time
import telnetlib
import os
import gzip
from base64 import b64encode
import tsadm.log
class TSAdmJobQ:
_db = None
_user_id = None
_senv_id = None
_site_name = None
_site_env = None
_conf = None
_cmd_hooks = None
_server_addr = None
def __init__(self, db, user_id, senv_id, sname, senv, conf):
self._db = db
self._user_id = user_id
self._senv_id = senv_id
self._site_name = sname
self._site_env = senv
self._conf = conf
self._cmd_hooks = dict()
def idgen(self):
d = hashlib.sha1()
d.update(str(time.time()).encode('utf-8', 'replace'))
d.update(str(self._user_id).encode('utf-8', 'replace'))
d.update(str(self._senv_id).encode('utf-8', 'replace'))
d.update(self._site_name.encode('utf-8', 'replace'))
d.update(self._site_env.encode('utf-8', 'replace'))
hd = d.hexdigest()
del d
return hd
def start(self, cmd_name, cmd_args, senv_id=None, adm_log=False):
if senv_id is None:
senv_id = self._senv_id
job_id = self.idgen()
self._db.jobq_start(job_id, self._user_id, senv_id, cmd_name, cmd_args, int(time.time()), adm_log)
return job_id
def end(self, job_id, cmd_exit, cmd_out, compress=True, encode=True):
if compress:
cmd_out = gzip.compress(cmd_out.encode('utf-8', 'replace'))
tsadm.log.dbg('jobq.end: cmd_out compressed')
if encode:
cmd_out = b64encode(cmd_out)
tsadm.log.dbg('jobq.end: cmd_out encoded')
# 9999 and 9090 cmd_exit are used internally
if cmd_exit == 9999 or cmd_exit == 9090:
cmd_exit = 9000
self._db.jobq_end(job_id, int(time.time()), cmd_exit, cmd_out)
return job_id
def status_update(self, job_id, status):
return self._db.jobq_status_update(job_id, status)
def _req(self, req_line, senv_id=None):
if senv_id is None:
senv_id = self._senv_id
cmd_rtrn = 128
cmd_out = []
jobq_server = self._server_addr
if self._server_addr is None:
jobq_server = self._db.jobq_server(senv_id)
tsadm.log.dbg('jobq_server: ', jobq_server)
if jobq_server is None or jobq_server == '__NOT_SET__':
ts = str(time.time())
tsadm.log.err('NO_JOBQ_SERVER[{}]'.format(ts))
return (64, ['NO_JOBQ_SERVER[{}]'.format(ts)])
try:
jobq_port = self._conf.get('JOBQ_SERVER_PORT', 6100)
jobq_timeout = self._conf.get('JOBQ_SERVER_PORT', 15)
tn = telnetlib.Telnet(jobq_server, jobq_port, jobq_timeout)
tn.write('{}\n'.format(req_line).encode('utf-8', 'replace'))
reply = tn.read_all()
rlines = reply.decode('utf-8', 'replace').splitlines()
tsadm.log.dbg('rlines: ', rlines)
req_status = int(rlines[0].split(' ')[0])
if req_status == 200:
cmd_rtrn = int(rlines[1].split(' ')[1])
cmd_out = rlines[3:]
else:
cmd_out = rlines
tn.close()
except Exception as e:
ts = str(time.time())
tsadm.log.err('JOBQ_REQ_EXECP[{}]: '.format(ts), e)
cmd_out = ['JOBQ_REQ_EXCEP[{}]'.format(ts)]
cmd_rtrn = 64
return (cmd_rtrn, cmd_out)
def cmd(self, cmd_name, args_s='', senv_id=None):
if senv_id is None:
site_name = self._site_name
site_env = self._site_env
else:
site_name = self._db.siteenv_site_name(senv_id)
site_env = self._db.siteenv_name(senv_id)
req_line = '.run {} {} {} {}'.format(site_name, site_env, cmd_name, args_s)
tsadm.log.dbg('jobq.cmd: ', req_line)
cmd_rtrn, cmd_out = self._req(req_line, senv_id)
self._hooks_run(cmd_name, cmd_rtrn)
return (cmd_rtrn, cmd_out)
def run(self, cmd_name, cmd_args, runbg=False, senv_id=None, adm_log=False):
args_s = ''
if type(cmd_args) == type(list()):
args_s = ' '.join(cmd_args)
else:
args_s = cmd_args
# log start
job_id = self.start(cmd_name, args_s, senv_id=senv_id, adm_log=adm_log)
if runbg:
reql = '.runbg {}'.format(job_id)
# do request
cmd_rtrn, cmd_out = self._req(reql, senv_id=senv_id)
cmd_out = os.linesep.join(cmd_out)
self._hooks_run(cmd_name, cmd_rtrn)
# log end is done by runbg command
else:
cmd_rtrn, cmd_out = self.cmd(cmd_name, args_s)
cmd_out = os.linesep.join(cmd_out)
# log end
self.end(job_id, cmd_rtrn, cmd_out)
return (job_id, cmd_rtrn, cmd_out)
def cmd_hook(self, cmd_name, when, fcall, fargs=None):
hkey = cmd_name
if not hkey in self._cmd_hooks:
self._cmd_hooks[hkey] = dict()
if not when in self._cmd_hooks[hkey]:
self._cmd_hooks[hkey][when] = list()
self._cmd_hooks[hkey][when].append((fcall, fargs))
def _hooks_run(self, cmd_name, cmd_rtrn):
tsadm.log.dbg('jobq.cmd_hooks: ', self._cmd_hooks)
if cmd_rtrn == 0:
hkey = cmd_name
if hkey in self._cmd_hooks:
if 'post' in self._cmd_hooks[hkey]:
for hd in self._cmd_hooks[hkey]['post']:
f = hd[0]
args = hd[1]
tsadm.log.dbg('jobq.cmd_hooks post: ', cmd_name)
f(args)
else:
tsadm.log.dbg('jobq.cmd_hooks: not a post hook ', hkey)
else:
tsadm.log.dbg('jobq.cmd_hooks post: no hook named ', hkey)
else:
tsadm.log.dbg('jobq.cmd_hooks post: not running hooks, cmd failed')
def env_id(self):
return self._senv_id
def server_addr(self, addr=None):
if addr is not None:
self._server_addr = addr
return self._server_addr
```
#### File: tsadm-webapp/tsadm/log.py
```python
import os
import syslog as sl
from . import config
def log_open(iden='tsadmdev'):
sl.openlog(iden, sl.LOG_PID, sl.LOG_LOCAL3)
def log_close():
sl.closelog()
def dbg(*msg):
__log(sl.LOG_DEBUG, 'DBG: ', *msg)
def inf(*msg):
__log(sl.LOG_DEBUG, 'INF: ', *msg)
def err(*msg):
__log(sl.LOG_DEBUG, 'ERR: ', *msg)
def wrn(*msg):
__log(sl.LOG_DEBUG, 'WRN: ', *msg)
def __log(prio, *msg):
line = ''
for m in msg:
if type(m) == type(''):
line += m
else:
line += str(m)
sl.syslog(prio, line)
```
#### File: tsadm/slave/__init__.py
```python
from tsadm.jobq.req import TSAdmJobQReqInvoke
class TSAdmSlave:
_wapp = None
id = None
_info = None
fqdn = None
slug = None
def __init__(self, wapp, slave_id):
self._wapp = wapp
self.id = slave_id
self._info = self._wapp.db.slave_info(slave_id)
self.fqdn = self._info.get('fqdn')
self.slug = self._info.get('slug')
def hostinfo(self):
cmd_hostinfo = TSAdmJobQReqInvoke('slave.hostinfo', self._wapp)
return cmd_hostinfo.request_lines()
def softinfo(self):
cmd_softinfo = TSAdmJobQReqInvoke('slave.softinfo', self._wapp)
return cmd_softinfo.request_lines()
def tmpl_data(self, hostinfo=True, softinfo=False):
self._wapp.jobq.server_addr(self.fqdn)
self._info['ipaddr'] = self._wapp.nsresolve(self.fqdn)
if hostinfo:
self._info['hostinfo'] = '\n'.join(self.hostinfo())
if softinfo:
self._info['softinfo'] = '\n'.join(self.softinfo())
self._info['graphs_base_url'] = self._wapp.conf.get('SLAVE_GRAPHS_BASE_URL', '/slave/graphs')
return {
'server': self._info
}
```
|
{
"source": "jctissier/304Project",
"score": 3
}
|
#### File: app/db/create_tables.py
```python
from app.db.database import db
def create():
db.engine.execute('''CREATE TABLE Athlete (
ID INT,
teamID INT ,
Salary INT ,
Name VARCHAR ,
DOB VARCHAR ,
Status VARCHAR ,
placeOfBirth VARCHAR ,
countryID VARCHAR ,
Goals INT ,
Assists INT ,
Wins INT ,
Losses INT)''')
db.engine.execute('''CREATE TABLE Coach (
ID INT,
Salary INT,
Name VARCHAR,
DOB VARCHAR,
Status VARCHAR,
placeOfBirth VARCHAR,
countryID VARCHAR)''')
db.engine.execute('''CREATE TABLE Competition (
Name VARCHAR ,
Winner VARCHAR)''')
db.engine.execute('''CREATE TABLE Game (
Score VARCHAR ,
gameID INT ,
Round INT ,
WinningTeamID INT ,
LosingTeamID INT ,
competitionID INT ,
seasonID INT)''')
db.engine.execute('''CREATE TABLE GameGoal (
gameID INT ,
athleteID INT)''')
db.engine.execute('''CREATE TABLE Season (
SeasonID INT ,
Year INT ,
Location VARCHAR )''')
db.engine.execute('''CREATE TABLE Stadium (
Name VARCHAR ,
Location VARCHAR)''')
db.engine.execute('''CREATE TABLE Team (
TeamID INT ,
Location VARCHAR ,
DateCreated VARCHAR,
Goals INT ,
Assists INT ,
Wins INT ,
Losses INT)''')
print("Tables have been created\n")
```
#### File: app/db/drop_data.py
```python
from app.db.database import db
tables = ['GameGoal', 'Athlete', 'Coach', 'Competition', 'Game', 'Season', 'Stadium', 'Team']
def drop():
for table in tables:
try:
db.engine.execute('''DROP TABLE ''' + table)
except Exception:
print('Table ' + table + ' may have already been dropped, continuing to next drop')
```
#### File: app/queries/main.py
```python
from app.util.util import gzipped
from flask import Blueprint, request, render_template, jsonify
from app.db.database import Athlete, Coach, Competition, Game, Season, Stadium, Team, GameGoal, db
from sqlalchemy import text
import app.queries.models as helper
# Define the blueprint: 'queries'
queries = Blueprint('queries', __name__)
@queries.route('/')
@gzipped
def load():
"""
Loads the HTML template
"""
return render_template("dashboard.html")
""" SELECT QUERIES """
@queries.route('/db_tables', methods=['GET'])
def get_tables():
"""
Gets all the rows for a table in the DB
:return: JSON string containing all the rows in a particular table
"""
tables_map = {
"athlete": Athlete.__table__.columns.keys(),
"coach": Coach.__table__.columns.keys(),
"competition": Competition.__table__.columns.keys(),
"game": Game.__table__.columns.keys(),
"gamegoal": GameGoal.__table__.columns.keys(),
"season": Season.__table__.columns.keys(),
"stadium": Stadium.__table__.columns.keys(),
"team": Team.__table__.columns.keys()
}
table_name = request.args.get('table_name').lower()
available_tables = ['athlete', 'coach', 'competition', 'game', 'gamegoal', 'season', 'stadium', 'team']
if table_name in available_tables:
sql = text('''SELECT * FROM ''' + table_name)
rows = db.engine.execute(sql)
headers = tables_map[table_name]
data = []
if table_name == "game":
for r in rows:
data.append([r[6], r[9], r[3], r[4], r[5], r[7]])
elif table_name == "competition":
for r in rows:
data.append([r[1], r[2], r[3]])
else:
data = [list(row[1:]) for row in rows]
return jsonify({'code': 200, 'table': table_name, 'entries': data, 'headers': headers})
return jsonify({'code': 400, 'error': 'Table Name was not valid'})
""" INSERT QUERIES """
@queries.route('/insert_query', methods=['GET', 'POST'])
@gzipped
def insert_query():
"""Insert data for Athlete, Team or Coach"""
insert_table = request.form['table_name']
# Athlete fields
a_name = request.form['a_name']
a_status = request.form['a_status']
# Team fields
t_name = request.form['t_name']
t_loc = request.form['t_location']
if insert_table == "Athlete":
last_id = int(db.session.query(Athlete).order_by(Athlete.id.desc()).first().id)
table = 'Athlete (id, Salary, Name, DOB, Status, placeOfBirth, countryID, goals, assists, wins, losses)'
vals = 'VALUES (' + str(last_id + 1) + ', 999999, "' + a_name + '", "1970-01-05", "' + a_status + '", "Canada", "Canada", 15, 10, 10, 0)'
elif insert_table == "Team":
team_exists = db.session.query(Team).filter_by(teamID=t_name).count()
table = 'Team (teamID, location, dateCreated, goals, assists, wins, losses)'
vals = 'VALUES ("' + t_name + '", "' + t_loc + '", "2018-01-01", 168, 153, 55, 20)'
else:
return jsonify({'error': "Invalid Table Name"})
sql = text('''INSERT INTO ''' + table + vals)
if insert_table == "Team" and team_exists > 0:
pass
else:
db.engine.execute(sql) # Runs the SQL INSERT
if insert_table == "Athlete":
row = db.session.query(Athlete).order_by(Athlete.id.desc()).limit(5).all()
last_vals = [[r.id, r.salary, r.name, r.dob, r.status, r.placeOfBirth, r.countryID, r.goals, r.assists, r.wins, r.losses]
for r in row]
elif insert_table == "Team":
row = db.session.query(Team).order_by(Team.teamID.desc()).all()
last_vals = [[r.teamID, r.location, r.dateCreated, r.goals, r.assists, r.wins, r.losses] for r in row]
return jsonify({
'code': 200,
'query_type': 'INSERT',
'table': insert_table,
'entries': last_vals
})
""" DELETE QUERIES """
@queries.route('/delete_query', methods=['GET', 'POST'])
@gzipped
def delete_query():
"""Delete from Stadium or Team"""
delete_table = request.form['table_name']
# Stadium fields
s_name = request.form['s_name'].replace("-", " ") + " "
s_location = request.form['s_location']
# Team fields
t_name = request.form['t_name']
if delete_table == "Stadium":
sql = text('''DELETE FROM Stadium WHERE name ="''' + s_name + '''" AND location ="''' + s_location + '"')
elif delete_table == "Team":
sql = text('''DELETE FROM Team WHERE teamID ="''' + t_name + '"')
else:
return jsonify({'error': "Invalid Table Name"})
db.engine.execute(sql)
if delete_table == "Stadium":
row = db.session.query(Stadium).order_by(Stadium.name.asc()).all()
last_vals = [[r.name, r.location] for r in row]
elif delete_table == "Team":
row = db.session.query(Team).order_by(Team.teamID.asc()).all()
last_vals = [[r.teamID, r.location, r.dateCreated, r.goals, r.assists, r.wins, r.losses] for r in row]
return jsonify({
'code': 200,
'query_type': 'DELETE',
'table': delete_table,
'entries': last_vals
})
""" UPDATE QUERIES """
@queries.route('/update_player_stats', methods=['GET', 'POST'])
@gzipped
def update_query_salary():
"""Updates the salary of a certain player"""
p_keys = {
"Messi": 238,
"Ronaldo": 190,
"Neymar": 200
}
player_salary = request.args.get('new_salary')
player_key = request.args.get('player_name')
sql = text('''UPDATE Athlete SET salary = ''' + player_salary + ''' WHERE id = ''' + str(p_keys[player_key]))
db.engine.execute(sql)
# Select player with updated salary
sql = text('''SELECT Athlete.name, Athlete.salary FROM Athlete WHERE id = ''' + str(p_keys[player_key]))
data = db.engine.execute(sql)
json_data = [list(row) for row in data]
return jsonify({
'code': 200,
'query_type': 'UPDATE',
'table': "Athlete",
'entries': json_data
})
@queries.route('/update_player_country', methods=['GET', 'POST'])
@gzipped
def update_query_country():
"""Updates the country of a certain player"""
p_keys = {
"Messi": 238,
"Ronaldo": 190,
"Neymar": 200
}
player_country = request.args.get('new_country')
player_key = request.args.get('player_name')
sql = text('''UPDATE Athlete SET countryID = "''' + player_country + '''" WHERE id = ''' + str(p_keys[player_key]))
db.engine.execute(sql)
# Select player with updated salary
sql = text('''SELECT Athlete.name, Athlete.countryID FROM Athlete WHERE id = ''' + str(p_keys[player_key]))
data = db.engine.execute(sql)
json_data = [list(row) for row in data]
return jsonify({
'code': 200,
'query_type': 'UPDATE',
'table': "Athlete",
'entries': json_data
})
""" JOIN QUERIES """
@queries.route('/join_query', methods=['GET'])
@gzipped
def join_query():
sql = ''
qry_num = int(request.args.get('qry'))
if qry_num == 1:
sql = helper.join_2_query1()
elif qry_num == 2:
sql = helper.join_2_query2()
elif qry_num == 3:
sql = helper.join_3_query()
data = db.engine.execute(sql)
json_data = [list(row) for row in data]
return jsonify({
'code': 200,
'query_type': 'JOIN',
'entries': json_data
})
""" GROUP BY QUERIES """
@queries.route('/groupby_query', methods=['GET'])
@gzipped
def groupby_query():
"""Find the number of players in each club team who are not born in the country the club is located in"""
sql = text('''SELECT t.teamID, count(*) FROM Athlete a, Team t
WHERE a.teamID = t.teamID AND t.location <> a.countryID
GROUP BY t.teamID''')
data = db.engine.execute(sql)
a_data = [list(row) for row in data]
json_data = helper.select_groupby_table(data=a_data)
return jsonify({
'code': 200,
'query_type': 'GROUP BY',
'table': 'Team',
'entries': json_data
})
""" CREATE VIEW QUERY """
@queries.route('/create_view_query', methods=['GET'])
@gzipped
def create_view():
"""Manager Performance view -
- Only care about Name, position and current stats
"""
tb_exists = "SELECT count(*) FROM sqlite_master WHERE type='view' AND name='AthletePerformanceView'"
row = db.engine.execute(tb_exists)
if str(row.fetchone()) == '(1,)':
print("Create View already created")
else:
sql = text('''CREATE VIEW AthletePerformanceView AS
SELECT Athlete.Name, Athlete.Position, Athlete.Goals, Athlete.Assists, Athlete.Wins, Athlete.Losses
FROM Athlete''')
db.engine.execute(sql)
qry_view = text('''SELECT * FROM AthletePerformanceView''') # View Table
rows = db.engine.execute(qry_view)
json_data = [list(row) for row in rows]
return jsonify({
'code': 200,
'query_type': 'CREATE VIEW',
'entries': json_data
})
```
#### File: app/queries/models.py
```python
import collections
from sqlalchemy import text
""" SELECT Query Helpers """
def select_athlete_table(data):
json_data = collections.OrderedDict({})
for i in data:
json_data.update({
i[0]:
[{
'id': i[0],
'salary': i[1], # index of list used is determined by SQL query statement
'name': i[2], # athlete.salary = i[1] because it's the second field selected
'dob': i[3],
'status': i[4],
'placeOfBirth': i[5],
'countryID': i[6],
'goals': i[7],
'assists': i[8],
'wins': i[9],
'losses': i[10]
}]
})
return json_data
def select_team_table(data):
json_data = collections.OrderedDict({})
for i in data:
json_data.update({
i[0]:
[{
'name': i[0],
'location': i[1],
'dateCreated': i[2],
'goals': i[3],
'assists': i[4],
'wins': i[5],
'losses': i[6],
}]
})
return json_data
def select_coach_table(data):
json_data = collections.OrderedDict({})
for i in data:
json_data.update({
i[0]:
[{
'id': i[0],
'salary': i[1],
'name': i[2],
'dob': i[3],
'placeOfBirth': i[4],
'status': i[5],
'countryID': i[6]
}]
})
return json_data
def select_groupby_table(data):
json_data = collections.OrderedDict({})
for num, i in enumerate(data):
json_data.update({
num:
[{
'Team ID': i[0],
'Number Players': i[1]
}]
})
return json_data
""" JOIN Query Helpers """
def join_2_query1():
"""Find all the teams that play in the 2017 edition of the Champions League that have scored at least 5 goals"""
desired_year = 2017
desired_leaguename = "UEFA Champions League"
desired_goals = 4
sql = text('''SELECT distinct t.teamID, t.location, t.dateCreated
FROM Team t, Game g, Competition c, Season s
WHERE g.seasonID = s.seasonID AND g.competitionID = c.competitionID AND
(g.winningTeamID = t.teamID OR g.losingTeamID = t.teamID) AND t.goals > ''' +
str(desired_goals) + ''' AND c.name = "''' + desired_leaguename + '''"''' +
''' AND s.seasonID = ''' + str(desired_year))
return sql
def join_2_query2():
"""Find all the players who have scored at least 10 goals and won a trophy in a certain city"""
desired_goals = 2
desired_city = "Europe"
sql = text('''SELECT distinct a.name, a.teamID, a.status, a.salary
FROM Athlete a, Competition c, Game g, Season s
WHERE c.winner = a.teamID AND g.competitionID = c.competitionID AND g.seasonID = s.seasonID AND
a.goals > ''' + str(desired_goals) + ''' AND s.location = "''' + desired_city + '''"''')
return sql
def join_3_query():
"""Finds all players from country X who scored at least one goal in a game played in Y city and Z year."""
desired_country = "Brazil"
desired_gameDest = "Europe"
desired_gameYear = 2017
sql = text('''SELECT distinct a.name, a.teamID, a.status, a.salary
FROM Athlete a, GameGoal gg, Game g, Season s
WHERE a.id = gg.athleteID AND gg.gameID = g.gameID AND s.seasonID = g.seasonID AND a.countryID
LIKE "''' + desired_country + '''"''' + ''' AND s.seasonID = ''' + str(desired_gameYear) +
''' AND s.location LIKE "''' + desired_gameDest + '''"''')
return sql
```
|
{
"source": "jctissier/Jarvis-Speech-Recognition",
"score": 3
}
|
#### File: jctissier/Jarvis-Speech-Recognition/jarvis.py
```python
import speech_recognition as sr
import time
import webbrowser
import os
from gtts import gTTS
import subprocess
import pyowm
import json
import datetime
from itertools import islice
import warnings
import pyautogui
import pyaudio
import imaplib
import webbrowser
from termcolor import cprint
import vlc_ctrl
#Globals
warnings.filterwarnings("ignore")
#API Keys
owm = pyowm.OWM('<KEY>') #API Key for weather data
def speak(audioString):
print(audioString)
tts = gTTS(text=audioString, lang='en')
tts.save("audio.mp3")
subprocess.call(['xdg-open', 'audio.mp3'])
def greeting():
tts = gTTS(text="Hey JC, I'm listening", lang='en')
tts.save("greeting.mp3")
subprocess.call(['xdg-open', 'greeting.mp3'])
# Use at the beginning
def login():
greeting()
voice()
def voice():
try:
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("I'm listening...")
audio = r.listen(source)
speech = r.recognize_google(audio)
print("You said: " + r.recognize_google(audio))
speech_search = ['google', 'directions', 'youtube']
speech = speech.lower().split(" ")
print(speech)
#Gets web searches
if speech[0] in speech_search:
searching(speech)
voice()
#Runs my scripts
elif "script" and "run" in speech:
scripts(speech)
voice()
#Control messaging apps
elif "send" in speech:
messaging_app(speech)
voice()
# close applications
elif 'set' in speech:
set_calendar(speech)
voice()
#cast media to TV
elif speech[0] == "cast":
google_cast(speech)
voice()
#close applications
elif 'close' in speech:
close_apps(speech)
voice()
#open applications
elif 'open' in speech:
open_apps(speech)
voice()
#Mac controls
elif 'mac' in speech:
control_mac(speech)
voice()
#Current time
elif 'time' in speech:
speak(datetime.datetime.now().strftime("%I:%M %p"))
voice()
#provides date information
elif 'date' in speech:
date(speech)
voice()
#Gets weather data
elif 'weather' in speech:
choose_weather(speech)
voice()
#Gets temperature data
elif 'temperature' in speech:
choose_weather(speech)
voice()
#Sunrise time
elif 'sunrise' in speech:
choose_weather(speech)
voice()
#Sunset time
elif 'sunset' in speech:
choose_weather(speech)
voice()
#pause & restart program
elif 'jarvis' in speech:
echo(speech)
voice()
#provides a cheatsheet for all the voice commands
elif 'help' in speech:
cheatsheet()
voice()
#checks if any new emails have arrived in my inbox
elif "mail" or "email" in speech:
check_mail(speech)
voice()
else:
voice()
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
voice()
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
voice()
'''Putting jarvis to sleep'''
def echo(speech):
if "sleep" in speech:
speak("I'm going to nap")
wake_up = input("Type anything to wake me up...\n")
if len(wake_up) > 0:
speak("Hey, I'm awake, do you need anything?")
else:
speak("I'm here, what's up?")
'''Main web searches'''
def searching(audio):
audio_sentence = " ".join(audio)
# search google maps
if "google maps" in audio_sentence:
#General Maps search
print("Entering Google Maps search")
location = audio[2:]
webbrowser.open('https://www.google.nl/maps/place/' + "+".join(location) + "/&")
speak("I opened Google Maps for " + " ".join(location))
#search google
elif "google" in audio:
#Google search
search_phrase = "+".join(audio)
webbrowser.open('https://www.google.ca/#q=' + search_phrase)
#full google maps directions from location to destination
elif "directions from" in audio_sentence:
#Maps directions from [location] to [destination]
audio = audio_sentence.split(" ")
index_to = audio.index("to")
location = audio[2:index_to]
destination = audio[index_to + 1:]
speak_location = " ".join(location)
location = "+".join(location)
speak_destination = " ".join(destination)
destination = "+".join(destination)
webbrowser.open('https://www.google.ca/maps/dir/' + location + "/" + destination)
speak("Directions from " + speak_location + " to " + speak_destination)
#find directions to google maps destination with location missing
elif "directions" in audio:
#Maps directions to destination, requires location
location = audio[1:]
location = "+".join(location)
webbrowser.open('https://www.google.nl/maps/dir//' + location )
speak("Please enter your current location")
#play next youtube video
elif "next" in audio:
print("I'm here")
pyautogui.hotkey('shift', 'command', 'right')
#searches youtube
elif "search" in audio:
print("searching youtube")
# Searches a youtube video
search_phrase = audio_sentence.replace("youtube", "").replace("search", "").replace(" ", "+")
webbrowser.open('https://www.youtube.com/results?search_query=' + search_phrase)
#Pause/play youtube videos
elif "play" or "pause" in audio:
pyautogui.hotkey('shift', 'command', ' ')
'''Running python scripts'''
def scripts(speech):
if "soccer" in speech:
os.system("cd /Users/Add_Folder_Path/ && python3 Soccer_streams.py")
else:
os.system("cd /Users/Add_Folder_Path/ && python3 Instalinks.py")
'''Check if any new emails'''
def check_mail(speech):
obj = imaplib.IMAP4_SSL('imap.gmail.com', '993')
obj.login('<EMAIL>', 'password')
obj.select()
obj.search(None, 'UnSeen')
unseen_message = len(obj.search(None, 'UnSeen')[1][0].split()) - 5351
if unseen_message > 1:
speak("You have " + str(unseen_message) + " new messages!")
webbrowser.open('mail.google.com')
else:
speak("There isn't any new emails!")
'''Google casting media to ChromeCast'''
def google_cast(speech):
#format = cast [media] to [monitor or laptop]
if "monitor" in speech:
if "youtube" in speech:
monitor_cast(media='youtube')
elif "netflix" in speech:
monitor_cast(media='netflix')
else:
monitor_cast(media= speech[1])
elif "laptop" in speech:
if "youtube" in speech:
laptop_cast(media='youtube')
elif "netflix" in speech:
laptop_cast(media='netflix')
else:
laptop_cast(media=speech[1])
def monitor_cast(media):
# Cast for 34-inch UltraWide Monitor
subprocess.call(["/usr/bin/open", "/Applications/Google Chrome.app"])
time.sleep(0.5)
pyautogui.hotkey('shift', 'up')
time.sleep(0.5)
pyautogui.hotkey('command', 'e')
pyautogui.typewrite(media, interval=0.02)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.click(2150, 50)
time.sleep(1.1)
pyautogui.moveTo(1200, 150)
pyautogui.moveTo(1200, 160)
pyautogui.click(1200, 150)
time.sleep(0.5)
pyautogui.press('esc')
pyautogui.hotkey('command', 'tab')
def laptop_cast(media):
# Cast for 15-inch macbook
subprocess.call(["/usr/bin/open", "/Applications/Google Chrome.app"])
time.sleep(0.5)
pyautogui.hotkey('shift', 'up')
time.sleep(0.5)
pyautogui.hotkey('command', 'e')
pyautogui.typewrite(media, interval=0.02)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.click(1030, 50)
time.sleep(1.5)
pyautogui.moveTo(640, 160)
pyautogui.click(650, 160)
time.sleep(0.3)
pyautogui.press('esc')
pyautogui.hotkey('command', 'tab')
'''Controlling Messaging Apps'''
def messaging_app(speech):
try:
if "messenger" in speech:
if speech[1] == "new":
receiver = speech[4]
message = " ".join(speech[5:])
messenger_automator(receiver, message)
else:
message = " ".join(speech[2:])
messenger_same_convo(message)
elif "whatsapp" in speech:
receiver = speech[3]
message = " ".join(speech[4:])
whatsapp(receiver, message)
except IndexError:
print("Index Error just occured, repeat what you were saying..")
#New Messenger = send new messenger to [recipient] [message_string]
def messenger_automator(receiver,message):
#Getting Messenger to new person
subprocess.call(["/usr/bin/open", "/Applications/Messenger.app"])
time.sleep(1.5)
pyautogui.press('tab',presses=1)
pyautogui.typewrite(receiver, interval=0.2)
pyautogui.press('down', presses=1)
pyautogui.press('enter',presses=1)
time.sleep(1)
pyautogui.typewrite(message, interval=0.02)
time.sleep(0.5)
pyautogui.hotkey('command', 'tab')
# pyautogui.press('enter')
speak("Message has been sent to " + receiver)
#Same Messenger = send messenger [message_string]
def messenger_same_convo(message):
subprocess.call(["/usr/bin/open", "/Applications/Messenger.app"])
time.sleep(1)
pyautogui.typewrite(message, interval=0.02)
time.sleep(0.5)
pyautogui.hotkey('command', 'tab')
# pyautogui.press('enter')
# Message on Whatsapp = send whatsapp to [receiver] [message_string]
def whatsapp(receiver, message):
subprocess.call(["/usr/bin/open", "/Applications/Whatsapp.app"])
time.sleep(1.6)
pyautogui.press('tab',presses=2,interval=0.5)
pyautogui.typewrite(receiver, interval=0.2)
time.sleep(1)
pyautogui.press('enter', presses=1)
time.sleep(1)
pyautogui.typewrite(message, interval=0.02)
pyautogui.press('enter')
time.sleep(1)
pyautogui.press('tab',presses=1)
time.sleep(0.4)
pyautogui.hotkey('command' , 'tab')
speak("Whatsapp has been sent to " + receiver)
'''Control Fantastical and set calendar events'''
#set calendar [entry_name] at [location] on the [date] at [time]
def set_calendar(speech):
if "calendar" in speech:
pyautogui.hotkey('ctrl', 'c')
time.sleep(0.2)
pyautogui.typewrite(" ".join(speech[2:]), interval=0.03)
pyautogui.press('enter')
time.sleep(0.7)
pyautogui.hotkey('ctrl', 'c')
speak("I have created your calendar event")
else:
# Creating a new reminder
# set reminder [message_string]
subprocess.call(["/usr/bin/open", "/Applications/Reminders.app"])
time.sleep(1)
pyautogui.hotkey('command', 'n')
time.sleep(0.2)
pyautogui.typewrite(" ".join(speech[2:]), interval=0.02)
time.sleep(0.1)
pyautogui.press('enter')
pyautogui.hotkey('command', 'tab')
speak("I have created a new reminder")
'''Control Macbook Functions'''
def control_mac(speech):
if "mute" in speech:
cmd ="""osascript -e "set volume 0"
"""
os.system(cmd)
#TODO - add extra functionalities if needed
'''Close Mac apps'''
def close_apps(speech):
#Closing mac apps with applescript
print("Chosing method...")
if "itunes" in speech:
close ="""osascript -e 'quit app "iTunes"'"""
os.system(close)
elif "skype" in speech:
close = """osascript -e 'quit app "Skype"'"""
os.system(close)
elif "evernote" in speech:
close = """osascript -e 'quit app "Evernote"'"""
os.system(close)
elif "spotify" in speech:
close = """osascript -e 'quit app "Spotify"'"""
os.system(close)
elif "messenger" in speech:
close = """osascript -e 'quit app "Messenger"'"""
os.system(close)
elif "trello" in speech:
close = """osascript -e 'quit app "Paws for Trello"'"""
os.system(close)
elif "chrome" in speech:
close = """osascript -e 'quit app "Google Chrome"'"""
os.system(close)
elif "feedly" in speech:
close = """osascript -e 'quit app "Feedly"'"""
os.system(close)
elif "preview" in speech:
close = """osascript -e 'quit app "Preview"'"""
os.system(close)
'''Open Mac apps'''
def open_apps(speech):
#Opening mac apps
if "itunes" in speech:
subprocess.call(["/usr/bin/open", "/Applications/iTunes.app"])
elif "skype" in speech:
subprocess.call(["/usr/bin/open", "/Applications/Skype.app"])
elif "evernote" in speech:
subprocess.call(["/usr/bin/open", "/Applications/Evernote.app"])
elif "spotify" in speech:
subprocess.call(["/usr/bin/open", "/Applications/Spotify.app"])
elif "messenger" in speech:
subprocess.call(["/usr/bin/open", "/Applications/Messenger.app"])
elif "trello" in speech:
subprocess.call(["/usr/bin/open", "/Applications/Paws for Trello.app"])
elif "text" in speech:
subprocess.call(["/usr/bin/open", "/Applications/TextEdit.app"])
elif "feedly" in speech:
subprocess.call(["/usr/bin/open", "/Applications/feedly.app"])
elif "whatsapp" in speech:
subprocess.call(["/usr/bin/open", "/Applications/WhatsApp.app"])
elif "fantastical" in speech:
subprocess.call(["/usr/bin/open", "/Applications/Fantastical 2.app"])
elif "facebook" in speech:
webbrowser.open("https://www.facebook.com/jean.c.tissier")
elif "reddit" in speech:
webbrowser.open("https://www.reddit.com")
elif "livescore" in speech:
webbrowser.open("https://www.livescore.com")
elif "gmail" in speech:
webbrowser.open("https://www.gmail.com")
'''Weather API data'''
def sunrise(data):
# sunrise time
print("Sunrise: " + datetime.datetime.fromtimestamp(data['sunrise_time']).strftime('%B %d %H:%M'))
speak("Sunrise will be at " + datetime.datetime.fromtimestamp(data['sunrise_time']).strftime('%I:%M %p'))
def sunset(data):
# sunset time
print("Sunset: " + datetime.datetime.fromtimestamp(data['sunset_time']).strftime('%B %d %H:%M'))
speak("Sunset will be at " + datetime.datetime.fromtimestamp(data['sunset_time']).strftime('%I:%M %p'))
def weather(speech, data, temp):
# includes today, tomorrow and forecast
weather_status = data['detailed_status'].strip("''")
if "weather" and "today" in speech:
# Today's weather
speak("Today's weather: " + weather_status)
speak("Temperature will average at " + str(round(temp['temp'])) + " Celcius")
elif "weather" and "forecast" in speech:
# Get Forecast for the next week
forecast_week = owm.daily_forecast("Vancouver,ca", limit=7)
f = forecast_week.get_forecast()
print("\nForecast for the next 7 days: ")
for weather in islice(f, 1, None):
unix_time = weather.get_reference_time('unix')
print("Date: " + datetime.datetime.fromtimestamp(unix_time).strftime('%B-%d') +
" Weather: " + weather.get_detailed_status())
elif "weather" and "tomorrow" in speech:
# Tomorrow's weather
forecast_week = owm.daily_forecast("Vancouver,ca", limit=2)
f = forecast_week.get_forecast()
print("\nTomorrow's Weather: ")
for weather in f:
unix_time = weather.get_reference_time('unix')
tomorrow_weather = (datetime.datetime.fromtimestamp(unix_time).strftime('%B-%d') +
" " + weather.get_detailed_status())
speak(tomorrow_weather)
def temperature(temp):
#TODO - add temp for today and tomorrow
# Temperature status
speak("Temperature will average at " + str(round(temp['temp'])) + " Celcius")
speak("Max Temperature will be " + str(round(temp['temp_max'])) + " Celcius")
def choose_weather(speech):
# weather report for Vancouver
observation = owm.weather_at_place('Vancouver,ca')
w = observation.get_weather()
data = json.loads(w.to_JSON())
temperature_data = w.get_temperature(unit='celsius')
# pick the right method
if "weather" in speech:
weather(speech, data, temperature_data)
elif "temperature" in speech:
temperature(temperature_data)
elif "sunrise" in speech:
sunrise(data)
elif "sunset" in speech:
sunset(data)
'''Provides dates information'''
def date(speech):
days = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
dayNumber = datetime.datetime.today().weekday()
if "today" in speech:
speak(days[dayNumber])
speak(datetime.datetime.now().strftime("%B %d"))
if "tomorrow" in speech:
dayNumber = dayNumber + 1
if dayNumber == 7:
speak("Monday")
speak((datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%B %d"))
else:
speak(days[dayNumber])
speak((datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%B %d"))
# Run these methods
if __name__ == "__main__":
run = login()
```
|
{
"source": "jctissier/myfootylinks",
"score": 2
}
|
#### File: app/stats/controllers.py
```python
import os
import json
import requests
import requests_cache
from flask import Blueprint, request, render_template, make_response, jsonify
from app.stats.standings import Standings
from app.stats.fixtures import Fixtures
from app.stats.topstats import TopStats
import app.content_management as content
from app.util import log, gzipped
# Define the blueprint: 'highlights', set its url prefix: app.url/highlights
stat = Blueprint('stats', __name__, url_prefix='/stats/')
""" Fixtures Static Template """
@stat.route("fixtures", methods=['GET'])
@gzipped
def fixtures_load_page():
"""
Static template for fixtures
:return: HTML template
"""
cards = content.cards()
response = make_response(render_template("stats/fixtures.html", cards=cards))
response.headers['Cache-Control'] = 'public, max-age=2628000'
return response
""" Fixtures """
@stat.route('fixtures-ajax', methods=['POST'])
@gzipped
def fixtures_ajax():
"""
AJAX requests to find fixtures
:return: JSON response with the list of fixtures content
"""
cache_db = "fixtures_cache.sqlite"
requests_cache.install_cache('fixtures_cache', expire_after=1800) # Cached for 15 mins
if os.path.isfile(cache_db) and request.form.get('scheduler'): # and value of league = epl? first one in the worker's recaching call
log("Replacing Fixtures Cache")
os.remove(cache_db)
league = request.form['league']
html = requests.get("https://footylinks.herokuapp.com/stats/rest-api/fixtures?league=" + league)
log("Used Cache for Fixtures " + league + ": {0}".format(html.from_cache))
try:
fixtures = html.json() # Retrieve JSON string from GET request
except json.JSONDecodeError: # If url/rest-api/highlights is down for some reason
fixtures = Fixtures(league=league) # Provide content without caching
fixtures = fixtures.get_fixtures()
# Create Flask response and add headers to optimize delivery
response = make_response(json.dumps(fixtures))
response.headers['Cache-Control'] = 'public, max-age=1800' # Cached for 15 min
return response
@stat.route('rest-api/fixtures', methods=['GET'])
@gzipped
def fixtures_api():
if request.args.get('league', None) is not None:
league = request.args.get('league', None)
fixtures = Fixtures(fixture=league)
if fixtures.check_valid_league():
return fixtures.get_fixtures()
else:
return jsonify({'status': 400, 'error': 'Wrong league argument found, Please try again.'})
else:
return jsonify({'status': 400, 'error': 'No league argument found. '
'Format should be "/rest-api/fixtures?league=league_name"'})
""" Stats Static Template"""
@stat.route("home", methods=['GET'])
@gzipped
def stats_homepage():
"""
Return information about a specific league: Standings, Top Scorers, Fixtures
:return: Stats static template page HTML
"""
response = make_response(render_template("stats/stat_home.html"))
return response
""" Stats -> Standings """
@stat.route('standings-ajax', methods=['POST'])
@gzipped
def standings_ajax():
"""
AJAX requests to find Standings
:return: JSON response with the list of fixtures content
"""
cache_db = "highlights_cache.sqlite"
requests_cache.install_cache('standings_cache', expire_after=1800) # Cached for 15 mins
# request.form.get('scheduler') # Make this true
if os.path.isfile(cache_db) and request.form.get('scheduler'):
log("Replacing Standings Cache")
os.remove(cache_db)
league = request.form['league']
html = requests.get("https://footylinks.herokuapp.com/stats/rest-api/standings?league=" + league)
log("Used Cache for Standings " + league + ": {0}".format(html.from_cache))
try:
standing = html.json() # Retrieve JSON string from GET request
except json.JSONDecodeError: # If url/rest-api/highlights is down for some reason
stats = Standings(league=league) # Provide content without caching
standing = stats.get_standings()
# Create Flask response and add headers to optimize delivery
response = make_response(json.dumps(standing))
response.headers['Cache-Control'] = 'public, max-age=1800' # Cached for 15 min
return response
@stat.route('rest-api/standings', methods=['GET'])
@gzipped
def standings_api():
if request.args.get('league', None) is not None:
league = request.args.get('league')
standing = Standings(league=league)
if standing.league_link is not None:
return standing.get_standings() # Already loaded into a JSON string
else:
return jsonify({'status': 400, 'error': 'Wrong league argument found, Please try again.'})
else:
return jsonify({
'status': 400,
'error': 'Missing the league argument, URL should look like "/rest-api/standings?league=league_name"'
})
""" Stats -> Top Scorers & Top Assists """
@stat.route('topstats-ajax', methods=['POST'])
@gzipped
def topstats_ajax():
"""
AJAX requests to find Top Scorers & Assists
:return: JSON response with the list of top scorers & assists
"""
league = request.form['league']
scorer = TopStats(league=league, scorer=True)
assist = TopStats(league=league, assist=True)
return jsonify({
'scorers': scorer.get_topstats(),
'assists': assist.get_topstats()
})
@stat.route('rest-api/topstats', methods=['GET'])
@gzipped
def topstats_api():
if request.args.get('league', None) is not None:
league = request.args.get('league')
scorer = TopStats(league=league, scorer=True)
assist = TopStats(league=league, assist=True)
if scorer.league_name is not None and assist.league_name is not None:
return jsonify({
'scorers': scorer.get_topstats(),
'assists': assist.get_topstats()
})
# One or both of the league arguments were not valid
else:
return jsonify({'status': 400, 'error': 'Wrong league argument found, Please try again.'})
else:
# No league arguments were included in the request
return jsonify({
'status': 400,
'error': 'Missing the league argument, URL should look like "/rest-api/topstats?league=league_name"'
})
```
#### File: app/stats/fixtures.py
```python
import collections
import os.path
from datetime import datetime, timedelta
import json
import requests
import requests_cache
from bs4 import BeautifulSoup
from flask import jsonify
from app.util import log
# Fixtures request gets cached for 12 hour
# requests_cache.install_cache('get_fixtures', expire_after=43200)
class Fixtures(object):
"""
Retrieve fixtures about a specific league/competition
Options:
England
premierleague
fa-cup
efl-cup
Competitions
championsleague
uefa-europa-league
Leagues
laligafootball
ligue1football
bundesligafootball
serieafootball
International
euro-2016-qualifiers
friendlies
copa-america
world-cup-2018
"""
PRE_LINK = "https://www.theguardian.com/football/"
POST_LINK = "/fixtures"
CACHE_DB = 'get_fixtures.sqlite'
CHOICES = ['euro-2016-qualifiers', 'world-cup-2018-qualifiers', 'friendlies', 'serieafootball', 'bundesligafootball',
'ligue1football', 'laligafootball', 'uefa-europa-league', 'champions-league-qualifying', 'premierleague']
def __init__(self, fixture, **kwargs):
self.fixture_name = fixture
# self.scheduled_job = kwargs.get('scheduler', False)
def get_fixtures(self):
"""
Generate dynamic JSON with upcoming fixtures about a specific league/competition
:return: JSON content
"""
# If request is coming in for scheduler job (cached results) and db still exists and it's element 1 in list:
# - remove db file and re-cache request
# if self.scheduled_job and self.fixture_name == "premierleague":
# log("In cache removal!")
# if os.path.isfile(self.CACHE_DB):
# log("Removing cache")
# os.remove(self.CACHE_DB)
# Check if cache file exists
# log("Fixtures Cache Exists Pre-Request: {0}".format(os.path.isfile(self.CACHE_DB)))
html = requests.get(self.PRE_LINK + self.fixture_name + self.POST_LINK)
# Check if cache was used & if the cache file exists (True True or False False)
# log("Used Cache: {0}".format(html.from_cache))
# log("File Exists Post-Cache: {0}".format(os.path.isfile(self.CACHE_DB)))
soup = BeautifulSoup(html.content, "lxml")
divs = soup.find_all('div', {'class': 'football-matches__day'})
if len(divs) != 0:
data = collections.OrderedDict({})
for i, date in enumerate(divs):
if i < len(divs):
game_date = date.find('div', {'class': 'date-divider'}).text
# for each 'tr' extract all of the td's and extract what's necessary
temp_data = []
for x, game in enumerate(date.find_all('tr')):
if x > 0:
temp_data.append(self.extract_games_per_gameday(game))
# update the dict with scraped content
data.update({str(i): [{
"no_games": str(len(date.find_all('tr')) - 1),
"game_date": game_date.replace('2017', '').strip(),
"content": temp_data
}]
})
return json.dumps(data)
else:
# if there are no games scheduled
return jsonify({'status': 200, 'message': 'No Fixtures were found. Try again later.', 'error': True})
def check_valid_league(self):
return True if self.fixture_name in self.CHOICES else False
def extract_games_per_gameday(self, data):
"""
Extract relevant data from each game divs
:param data:
# - date of game --> td[0] Scraping Vancouver time, need UTC+1 for europe
# - home team crest --> td[1]
# - home team name --> td[2]
# - away team name --> td[2]
# - away team crest --> td[3]
:return: String (game date, home logo, home team, away team, away logo)
"""
game_detail = data.find_all('td')
g_date = self.get_game_date(game_detail[0])
h_crest = self.get_team_crests(game_detail[1])
h_name, a_name = self.get_team_names(game_detail[2])
a_crest = self.get_team_crests(game_detail[3])
return g_date, h_crest, h_name, a_name, a_crest
@staticmethod
def get_game_date(game_date):
"""
Extract game's date and time
"""
extract_date = game_date.time.get('datetime') # parse this
date = extract_date.split('T')[1].split('+')[0]
game = datetime.strptime(date, "%H:%M:%S") + timedelta(hours=-8) # Converts to Vancouver Time
return str(format(game, "%H:%M %p"))
@staticmethod
def get_team_names(game_info):
"""
Extract team names
"""
team_names = game_info.find_all('span')
h_name = team_names[0].text
a_name = team_names[1].text
return h_name, a_name
@staticmethod
def get_team_crests(team_crest):
"""
Extract team logo
"""
return team_crest.span.get('style')[22:-2] # extract team logo
# Creating a Fixture object instance
# if __name__ == "__main__":
# test = Fixtures(fixture="premierleague", scheduler=True)
# test.get_fixtures()
```
#### File: jctissier/myfootylinks/locustfile.py
```python
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
@task
def get_something(self):
self.client.get("")
class WebsiteUser(HttpLocust):
task_set = UserBehavior
```
|
{
"source": "jctissier/Youtube_Vid_Downloader",
"score": 3
}
|
#### File: jctissier/Youtube_Vid_Downloader/download_playlist.py
```python
import webbrowser
from termcolor import cprint, colored
import requests
import warnings
from bs4 import BeautifulSoup
from pprint import pprint
import time
# Globals
main_link = 'http://www.youtube.com'
mp3_download_link = 'http://www.youtubeinmp3.com/download/?video='
mp3_download_info = 'http://www.youtubeinmp3.com/fetch/?format=JSON&bitrate=1&filesize=1&video='
extension = '&autostart=1'
def main():
r = requests.get(playlist_link)
soup = BeautifulSoup(r.content, "lxml")
if "list" in playlist_link:
print("Request to download whole list")
download_playlist_info(soup)
else:
print("Go to single download")
def download_playlist_info(soup):
try:
# Playlist info
length = soup.find_all("span", {"id": "playlist-length"})[0] # Get playlist length
list_length = int(length.text.replace("videos", ""))
print("{:<20}{:<20}".format("Playlist Length: ", colored(str(list_length) + " videos",'red')))
title = soup.find_all("h3", {"class": "playlist-title"})[0].text
print("{:<20}{:<20}".format('Playlist Title: ', colored(title.strip(),'red')))
author = soup.find_all("li", {"class": "author-attribution"})[0].text
print("{:<20}{:<20}".format('Playlist Author: ', colored(author.strip(),'red')))
download_playlist(soup, start=0, playlist_size=list_length)
except IndexError:
print("This is not a playlist, redirect to method that downloads one song")
def download_playlist(soup, start, playlist_size):
try:
for x in range(start, playlist_size):
href = soup.find_all("a", {"class": "yt-uix-sessionlink spf-link playlist-video clearfix spf-link "})[x]
# print(href.get('href'))
song_info(href_string=href, index=x)
download_songs(href_string=href)
except IndexError:
print("Video #" + str(x+1) + " in the playlist has been removed or is not valid...")
download_playlist(soup, start=x+1, playlist_size=playlist_size)
def song_info(href_string, index):
# JSON parser to get media info
r = requests.get(mp3_download_info + str(href_string))
data = r.json()
title = data['title']
length = time.strftime("%M:%S", time.gmtime(int(data['length'])))
file_size = int(data['filesize']) / 1000000
bitrate = data['bitrate']
cprint("\nDownloading Song: #" + str(index + 1), 'yellow')
print("{:<20}".format("Title:") + colored(title, 'blue'))
print("{:<20}".format("Length:") + colored(length, 'blue'))
print("{:<20}".format("Size:") + colored("{0:.1f}".format(file_size) + "MB", 'blue'))
print("{:<20}".format("Bitrate:") + colored(bitrate + "kbps", 'blue'))
if index == 0:
pass
elif index == 10 or index == 20 or index == 30 or index == 40:
time.sleep(10)
else:
time.sleep(3)
def download_songs(href_string):
full_link = mp3_download_link + str(href_string)
r = requests.get(full_link)
soup = BeautifulSoup(r.content, "lxml")
href = soup.find_all("a", {"id": "download"})
href_link = href[0].get('href')
print(main_link + href_link)
webbrowser.open_new_tab(main_link + href_link)
main()
```
|
{
"source": "jctkorp/sslcommerce",
"score": 2
}
|
#### File: templates/pages/sslcommerz_payment_failed.py
```python
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cint
import json
from six import string_types
from frappe.integrations.utils import make_post_request
no_cache = 1
def get_context(context):
args = frappe._dict(frappe.local.form_dict)
frappe.log_error(args,"response")
reference_docname = frappe.db.get_value("Integration Request", args.get("tran_id"),"reference_docname")
if frappe.session.user == "Guest":
payment_request = frappe.get_doc("Payment Request",reference_docname)
reference_doc = frappe.get_doc(payment_request.reference_doctype,payment_request.reference_name)
if payment_request.reference_doctype == "Sales Order":
user = frappe.db.get_value("Contact",reference_doc.contact_person,"user")
frappe.local.login_manager.user = user
frappe.local.login_manager.post_login()
```
|
{
"source": "JCTLearning/Project-Runner",
"score": 3
}
|
#### File: Project-Runner/beta/fetchDb.py
```python
from oauth2client.service_account import ServiceAccountCredentials
import gspread
import sqlite3 as lite
class gdrive:
def __init__(self):
pass
def getValues(self):
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('authfile.json', scope)
gc = gspread.authorize(credentials)
sheet = '13UTcj1AKMIZ-cCYlKQVIxaYdr8TmOuX43HVw0l0KYmE'
wks = gc.open_by_key(sheet)
worksheet = wks.worksheet("VDOT")
conn = lite.connect('vdotDb.db')
c = conn.cursor()
c.execute("create table vdot(vdot str, m1500 str, m1600 str, mile str, m3000 str, m3200 str, mile2 str, m5000 str)")
x = 1
y = 58
#values
while(x!=y):
data = worksheet.row_values(x)
if(data[0]=="VDOT"):
pass
else:
vdotNum = data[0]
data1500 = data[1]
data1600 = data[2]
dataMile = data[3]
data3000M = data[4]
data3200 = data[5]
data2mile = data[6]
data5000 = data[7]
c.execute("insert into vdot(vdot, m1500, m1600, mile, m3000, m3200, mile2, m5000) values (?, ?, ?, ?, ?, ?, ?, ?)", (vdotNum,data1500,data1600,dataMile,data3000M, data3200 ,data2mile,data5000 ))
print("VDOT: "+vdotNum+" m1500: "+data1500+" m1600: "+data1600+" Mile: "+dataMile+" m3000: "+data3000M+" m3200"+data3200+" Two Mile: "+data2mile+" m5000: "+data5000)
x = x + 1
conn.commit()
gdriveC = gdrive()
gdriveC.getValues()
```
#### File: old/Basics/exampleButtons.py
```python
from tkinter import *
isOpen = True
x = True
"""This Script is the Basic logic for the GUI (It will obviously have
custom buttons and not be default buttons. Just created this to refer
to creating buttons. We aren't using Datalists. We'll be putting their
inputs into a .json file so we can call it back and average the numbers.
"""
class Window(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.init_window()
def init_window(self):
self.master.title("Track Thingy")
self.pack(fill=BOTH, expand=1)
#Creating Buttons for the GUI
addData = Button(self, text="Inputs", command=self.cstop)
stopProcess = Button(self, text = "Are you done?", command=self.output)
#Setting Position of Buttons on the geom.
addData.place(x = 5, y=0)
stopProcess.place(x = 450, y = 0)
def cstop(self):
"""Minor error (Forever loops and you can't clikck the
gui
"""
x = False
while(isOpen == True):
self.runnerName = input("[Runner Name]: ")
self.runnerTime = input("[Runner Time]: ")
self.rTime = []
self.rName = []
self.rTime.append(self.runnerTime)
self.rName.append(self.runnerName)
print(self.rTime)
print(self.rName)
def output(self):
if(x = False):
isOpen == False
root = Tk()
#size of the window
root.geometry("500x500")
app = Window(root)
root.mainloop()
```
#### File: old/Basics/testFileCreation.py
```python
import sqlite3 as lite
#Script to write our test data
conn = lite.connect('test.db')
c = conn.cursor()
def start():
print("Starting the program that allows you to insert data into a test data base file")
fileCreation()
def fileCreation():
print("Creating the database file")
print("Connected to file")
c.execute("CREATE TABLE Identificaiton(RunnerID TEXT, RunnerName TEXT)")
c.execute("CREATE TABLE Stats(Race1 TEXT, Race2 TEXT, Race3 TEXT, Race4 TEXT, Race5)")
c.execute("CREATE TABLE Team(Avg TEXT)")
print("Done with file creation")
fileManipulation()
def fileManipulation():
#It's a test file you don't need this many damn variables but ok
print("Starting file manipulation")
a = str(1)
b = "<NAME>"
c.execute("INSERT INTO Identification(RunnerID, RunnerName) VALUES('"+a+"', '"+b+"')")
d = "5:50"
e = "9:50"
f = "10:30"
pb = "9:50"
mile = "9:02"
c.execute("INSERT INTO Stats(Race1, Race2, Race3, Race4, Race5) VALUES('"+d+"', '"+e+"', '"+f+"', '"+pb+"', '"+mile+"')")
ravg = "10:02"
mavg = "8:30"
c.execute("INSERT INTO Team(Avg) VALUES('"+ravg+"', '"+mavg+"')")
print("done")
start()
```
#### File: Project-Runner/Server/vdotTest.py
```python
import sqlite3 as lite
import gspread
from oauth2client.service_account import ServiceAccountCredentials
class vdot:
def __init__(self):
print('[-- VDOT CALLED --]')
def vdotMiles(self, runnerTime):
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('authfile.json', scope)
gc = gspread.authorize(credentials)
sheet = '13UTcj1AKMIZ-cCYlKQVIxaYdr8TmOuX43HVw0l0KYmE'
wks = gc.open_by_key(sheet)
worksheet = wks.worksheet("VDOT")
db = []
vdotNum = 85
loopNum = 0
for items in worksheet.col_values(4): #4 is miles
if(items):
if(items=='Mile'): # gets rid of row one -- mile
pass
else:
#print(items)
listV = []
listV.insert(0, items)
listV.insert(1, vdotNum)
db.insert(loopNum, listV)
vdotNum = vdotNum - 1
loopNum = loopNum + 1
else:
break
Vmiles = runnerTime # User mile time
output = []
x = 0
for miles in db:
#print(miles)
#print('x'+miles[0])
x, y = miles[0].split(':')
mileTime = int(x)*60
mileTime = mileTime + int(y)
print('VDOTNUMBER: '+str(miles[1])+' ITS SECONDS: '+str(mileTime))
list = []
"""
x is the math
"""
x = int(Vmiles) - int(mileTime)
x = str(x)
x = x.replace('-', '')
print('USERTIME: '+str(Vmiles)+' - VDOT SECONDS: '+ str(mileTime)+' THE DISTANCE BETWEEN THE TWO: '+str(x))
x = int(x)
list.insert(0, x) #subtracted time
list.insert(1, miles[1]) #vdot
output.insert(x, list)
x = x + 1
"""
Sorting -- what need to happen here is that each file is deleted upon the script running, but we store the 1500.db in the user fiie. Lets also name it mile or km depending on value.
"""
conn = lite.connect('miles.db')
c = conn.cursor()
c.execute("create table data(vdot int, time int)")
for items in output:
#each item is now a list that contains time and vdot soooo
c.execute("insert into data(vdot, time) values (?, ?)", (items[1], items[0]) )
conn.commit()
c.close()
#grab Data
conn = lite.connect('miles.db')
c = conn.cursor()
c.execute('select vdot, time from data order by time asc')
returnSql = c.fetchall()
vdot = returnSql[0] #selects the first value
return vdot[0] #this is the vdot for that number -- just returns the vdot number
"""
runnerTime = input('Mile Time: ')
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
vdotC = vdot()
selfs = None
vdotNum = vdot.vdotMiles(selfs, runnerTime)
print(vdotNum)
print('The VDOT for time: '+str(runnerTime)+' is: '+str(vdotNum[0]))
"""
def vdot1500(self, runnerTime):
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('authfile.json', scope)
gc = gspread.authorize(credentials)
sheet = '<KEY>'
wks = gc.open_by_key(sheet)
worksheet = wks.worksheet("VDOT")
db = []
vdotNum = 85
loopNum = 0
for items in worksheet.col_values(2): #4 is miles
if(items):
if(items=='Mile'): # gets rid of row one -- mile
pass
else:
#print(items)
listV = []
listV.insert(0, items)
listV.insert(1, vdotNum)
db.insert(loopNum, listV)
vdotNum = vdotNum - 1
loopNum = loopNum + 1
else:
break
Vmiles = runnerTime # User mile time
output = []
x = 0
for miles in db:
#print(miles)
#print('x'+miles[0])
x, y = miles[0].split(':')
mileTime = int(x)*60
mileTime = mileTime + int(y)
print('VDOTNUMBER: '+str(miles[1])+' ITS SECONDS: '+str(mileTime))
list = []
"""
x is the math
"""
x = int(Vmiles) - int(mileTime)
x = str(x)
x = x.replace('-', '')
print('USERTIME: '+str(Vmiles)+' - VDOT SECONDS: '+ str(mileTime)+' THE DISTANCE BETWEEN THE TWO: '+str(x))
x = int(x)
list.insert(0, x) #subtracted time
list.insert(1, miles[1]) #vdot
output.insert(x, list)
x = x + 1
"""
Sorting -- what need to happen here is that each file is deleted upon the script running, but we store the 1500.db in the user fiie. Lets also name it mile or km depending on value.
"""
conn = lite.connect('1500.db')
c = conn.cursor()
c.execute("create table data(vdot int, time int)")
for items in output:
#each item is now a list that contains time and vdot soooo
c.execute("insert into data(vdot, time) values (?, ?)", (items[1], items[0]) )
conn.commit()
c.close()
#grab Data
conn = lite.connect('1500.db')
c = conn.cursor()
c.execute('select vdot, time from data order by time asc')
returnSql = c.fetchall()
vdot = returnSql[0] #selects the first value
return vdot[0] #this is the vdot for that number -- just returns the vdot number
"""
runnerTime = input('Mile Time: ')
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
vdotC = vdot()
selfs = None
vdotNum = vdot.vdotMiles(selfs, runnerTime)
print(vdotNum)
print('The VDOT for time: '+str(runnerTime)+' is: '+str(vdotNum[0]))
"""
def vdot1600(self, runnerTime):
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('authfile.json', scope)
gc = gspread.authorize(credentials)
sheet = '13UTcj1AKMIZ-cCYlKQVIxaYdr8TmOuX43HVw0l0KYmE'
wks = gc.open_by_key(sheet)
worksheet = wks.worksheet("VDOT")
db = []
vdotNum = 85
loopNum = 0
for items in worksheet.col_values(3): #4 is miles
if(items):
if(items=='Mile'): # gets rid of row one -- mile
pass
else:
#print(items)
listV = []
listV.insert(0, items)
listV.insert(1, vdotNum)
db.insert(loopNum, listV)
vdotNum = vdotNum - 1
loopNum = loopNum + 1
else:
break
Vmiles = runnerTime # User mile time
output = []
x = 0
for miles in db:
#print(miles)
#print('x'+miles[0])
x, y = miles[0].split(':')
mileTime = int(x)*60
mileTime = mileTime + int(y)
print('VDOTNUMBER: '+str(miles[1])+' ITS SECONDS: '+str(mileTime))
list = []
"""
x is the math
"""
x = int(Vmiles) - int(mileTime)
x = str(x)
x = x.replace('-', '')
print('USERTIME: '+str(Vmiles)+' - VDOT SECONDS: '+ str(mileTime)+' THE DISTANCE BETWEEN THE TWO: '+str(x))
x = int(x)
list.insert(0, x) #subtracted time
list.insert(1, miles[1]) #vdot
output.insert(x, list)
x = x + 1
"""
Sorting -- what need to happen here is that each file is deleted upon the script running, but we store the 1600.db in the user fiie. Lets also name it mile or km depending on value.
"""
conn = lite.connect('1600.db')
c = conn.cursor()
c.execute("create table data(vdot int, time int)")
for items in output:
#each item is now a list that contains time and vdot soooo
c.execute("insert into data(vdot, time) values (?, ?)", (items[1], items[0]) )
conn.commit()
c.close()
#grab Data
conn = lite.connect('1600.db')
c = conn.cursor()
c.execute('select vdot, time from data order by time asc')
returnSql = c.fetchall()
vdot = returnSql[0] #selects the first value
return vdot[0] #this is the vdot for that number -- just returns the vdot number
def vdot3000M(self, runnerTime):
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('authfile.json', scope)
gc = gspread.authorize(credentials)
sheet = '13UTcj1AKMIZ-cCYlKQVIxaYdr8TmOuX43HVw0l0KYmE'
wks = gc.open_by_key(sheet)
worksheet = wks.worksheet("VDOT")
db = []
vdotNum = 85
loopNum = 0
for items in worksheet.col_values(3): #4 is miles
if(items):
if(items=='Mile'): # gets rid of row one -- mile
pass
else:
#print(items)
listV = []
listV.insert(0, items)
listV.insert(1, vdotNum)
db.insert(loopNum, listV)
vdotNum = vdotNum - 1
loopNum = loopNum + 1
else:
break
Vmiles = runnerTime # User mile time
output = []
x = 0
for miles in db:
#print(miles)
#print('x'+miles[0])
x, y = miles[0].split(':')
mileTime = int(x)*60
mileTime = mileTime + int(y)
print('VDOTNUMBER: '+str(miles[1])+' ITS SECONDS: '+str(mileTime))
list = []
"""
x is the math
"""
x = int(Vmiles) - int(mileTime)
x = str(x)
x = x.replace('-', '')
print('USERTIME: '+str(Vmiles)+' - VDOT SECONDS: '+ str(mileTime)+' THE DISTANCE BETWEEN THE TWO: '+str(x))
x = int(x)
list.insert(0, x) #subtracted time
list.insert(1, miles[1]) #vdot
output.insert(x, list)
x = x + 1
"""
Sorting -- what need to happen here is that each file is deleted upon the script running, but we store the 1600.db in the user fiie. Lets also name it mile or km depending on value.
"""
conn = lite.connect('3000M.db')
c = conn.cursor()
c.execute("create table data(vdot int, time int)")
for items in output:
#each item is now a list that contains time and vdot soooo
c.execute("insert into data(vdot, time) values (?, ?)", (items[1], items[0]) )
conn.commit()
c.close()
#grab Data
conn = lite.connect('3000M.db')
c = conn.cursor()
c.execute('select vdot, time from data order by time asc')
returnSql = c.fetchall()
vdot = returnSql[0] #selects the first value
return vdot[0] #this is the vdot for that number -- just returns the vdot number
def vdot3200(self, runnerTime):
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('authfile.json', scope)
gc = gspread.authorize(credentials)
sheet = '13UTcj1AKMIZ-cCYlKQVIxaYdr8TmOuX43HVw0l0KYmE'
wks = gc.open_by_key(sheet)
worksheet = wks.worksheet("VDOT")
db = []
vdotNum = 85
loopNum = 0
for items in worksheet.col_values(6): #4 is miles
if(items):
if(items=='Mile'): # gets rid of row one -- mile
pass
else:
#print(items)
listV = []
listV.insert(0, items)
listV.insert(1, vdotNum)
db.insert(loopNum, listV)
vdotNum = vdotNum - 1
loopNum = loopNum + 1
else:
break
Vmiles = runnerTime # User mile time
output = []
x = 0
for miles in db:
#print(miles)
#print('x'+miles[0])
x, y = miles[0].split(':')
mileTime = int(x)*60
mileTime = mileTime + int(y)
print('VDOTNUMBER: '+str(miles[1])+' ITS SECONDS: '+str(mileTime))
list = []
"""
x is the math
"""
x = int(Vmiles) - int(mileTime)
x = str(x)
x = x.replace('-', '')
print('USERTIME: '+str(Vmiles)+' - VDOT SECONDS: '+ str(mileTime)+' THE DISTANCE BETWEEN THE TWO: '+str(x))
x = int(x)
list.insert(0, x) #subtracted time
list.insert(1, miles[1]) #vdot
output.insert(x, list)
x = x + 1
"""
Sorting -- what need to happen here is that each file is deleted upon the script running, but we store the 1600.db in the user fiie. Lets also name it mile or km depending on value.
"""
conn = lite.connect('3200.db')
c = conn.cursor()
c.execute("create table data(vdot int, time int)")
for items in output:
#each item is now a list that contains time and vdot soooo
c.execute("insert into data(vdot, time) values (?, ?)", (items[1], items[0]) )
conn.commit()
c.close()
#grab Data
conn = lite.connect('3200.db')
c = conn.cursor()
c.execute('select vdot, time from data order by time asc')
returnSql = c.fetchall()
vdot = returnSql[0] #selects the first value
return vdot[0] #this is the vdot for that number -- just returns the vdot number
def vdotMileTwo(self, runnerTime):
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('authfile.json', scope)
gc = gspread.authorize(credentials)
sheet = '13UTcj1AKMIZ-cCYlKQVIxaYdr8TmOuX43HVw0l0KYmE'
wks = gc.open_by_key(sheet)
worksheet = wks.worksheet("VDOT")
db = []
vdotNum = 85
loopNum = 0
for items in worksheet.col_values(7): #4 is miles
if(items):
if(items=='Mile'): # gets rid of row one -- mile
pass
else:
#print(items)
listV = []
listV.insert(0, items)
listV.insert(1, vdotNum)
db.insert(loopNum, listV)
vdotNum = vdotNum - 1
loopNum = loopNum + 1
else:
break
Vmiles = runnerTime # User mile time
output = []
x = 0
for miles in db:
#print(miles)
#print('x'+miles[0])
x, y = miles[0].split(':')
mileTime = int(x)*60
mileTime = mileTime + int(y)
print('VDOTNUMBER: '+str(miles[1])+' ITS SECONDS: '+str(mileTime))
list = []
"""
x is the math
"""
x = int(Vmiles) - int(mileTime)
x = str(x)
x = x.replace('-', '')
print('USERTIME: '+str(Vmiles)+' - VDOT SECONDS: '+ str(mileTime)+' THE DISTANCE BETWEEN THE TWO: '+str(x))
x = int(x)
list.insert(0, x) #subtracted time
list.insert(1, miles[1]) #vdot
output.insert(x, list)
x = x + 1
"""
Sorting -- what need to happen here is that each file is deleted upon the script running, but we store the 1600.db in the user fiie. Lets also name it mile or km depending on value.
"""
conn = lite.connect('mileTwo.db')
c = conn.cursor()
c.execute("create table data(vdot int, time int)")
for items in output:
#each item is now a list that contains time and vdot soooo
c.execute("insert into data(vdot, time) values (?, ?)", (items[1], items[0]) )
conn.commit()
c.close()
#grab Data
conn = lite.connect('mileTwo.db')
c = conn.cursor()
c.execute('select vdot, time from data order by time asc')
returnSql = c.fetchall()
vdot = returnSql[0] #selects the first value
return vdot[0] #this is the vdot for that number -- just returns the vdot number
def vdot5000M(self, runnerTime):
x, y = runnerTime.split(':')
runnerTime = int(x) * 60
runnerTime = runnerTime + int(y)
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('authfile.json', scope)
gc = gspread.authorize(credentials)
sheet = '13UTcj1AKMIZ-cCYlKQVIxaYdr8TmOuX43HVw0l0KYmE'
wks = gc.open_by_key(sheet)
worksheet = wks.worksheet("VDOT")
db = []
vdotNum = 85
loopNum = 0
for items in worksheet.col_values(8): #4 is miles
if(items):
if(items=='Mile'): # gets rid of row one -- mile
pass
else:
#print(items)
listV = []
listV.insert(0, items)
listV.insert(1, vdotNum)
db.insert(loopNum, listV)
vdotNum = vdotNum - 1
loopNum = loopNum + 1
else:
break
Vmiles = runnerTime # User mile time
output = []
x = 0
for miles in db:
#print(miles)
#print('x'+miles[0])
x, y = miles[0].split(':')
mileTime = int(x)*60
mileTime = mileTime + int(y)
print('VDOTNUMBER: '+str(miles[1])+' ITS SECONDS: '+str(mileTime))
list = []
"""
x is the math
"""
x = int(Vmiles) - int(mileTime)
x = str(x)
x = x.replace('-', '')
print('USERTIME: '+str(Vmiles)+' - VDOT SECONDS: '+ str(mileTime)+' THE DISTANCE BETWEEN THE TWO: '+str(x))
x = int(x)
list.insert(0, x) #subtracted time
list.insert(1, miles[1]) #vdot
output.insert(x, list)
x = x + 1
"""
Sorting -- what need to happen here is that each file is deleted upon the script running, but we store the 1600.db in the user fiie. Lets also name it mile or km depending on value.
"""
conn = lite.connect('5000m.db')
c = conn.cursor()
c.execute("create table data(vdot int, time int)")
for items in output:
#each item is now a list that contains time and vdot soooo
c.execute("insert into data(vdot, time) values (?, ?)", (items[1], items[0]) )
conn.commit()
c.close()
#grab Data
conn = lite.connect('5000m.db')
c = conn.cursor()
c.execute('select vdot, time from data order by time asc')
returnSql = c.fetchall()
vdot = returnSql[0] #selects the first value
return vdot[0] #this is the vdot for that number -- just returns the vdot number
```
|
{
"source": "JCTLearning/W4lT0R-APA",
"score": 3
}
|
#### File: W4lT0R-APA/testAndExamples/exampleBrain.py
```python
from xml.etree import ElementTree
class start:
def __init__(self):
self.xs = '0'
def main(self):
self.userInput = input('[WALTOR]: ')
print('This is a test to see how the brain *could* work')
print('importing the xml sheet')
with open('data.xml', 'rt') as files:
tree = ElementTree.parse(files)
#Check if input is in command list
brainC = brain()
self.x = brainC.checkCommand(self.userInput)
if(self.x=='True'):
#input is a command
self.command = brainC.fetchCommand(self.userInput)
print('exeCommand: '+ userInput)
#toss the command into a command execution script? or maybe execute a file with said name
if(self.x=='False'):
# is just a word
self.resp = brainC.fetchResp(self.userInput)
if(self.resp!='False'):
print(self.resp)
else:
print('Command and responses not found')
class brain:
def __init__(self):
self.xs = 1
#print('I should build xml here but ehhhhh')
def checkCommand(self, userInput):
with open('data.xml', 'rt') as f:
tree = ElementTree.parse(f)
for path in[ './commands/resp' ]:
node = tree.find(path)
if(userInput==node.text):
self.check = 't'
return "True"
else:
self.check = 'f'
if(self.check=='f'): #Doing this whole thing because IDK what the loop does once a return occurs... I suppose I could break but idk
return "False"
def fetchResp(self, userInput):
with open('data.xml', 'rt') as f:
tree = ElementTree.parse(f)
for path in[ './responses/resp' ]:
node = tree.find(path)
if(userInput==node.text):
self.check = 't'
return node.tail
else:
self.check = 'f'
if(self.check=='f'):
return "Not Found"
startC = start()
startC.main()
```
|
{
"source": "jctt1983/flask-widgets",
"score": 2
}
|
#### File: flask-widgets/example/app.py
```python
from flask import Flask, render_template
from flask.ext.widgets import Widgets
from flask.ext.cache import Cache
from datetime import datetime
app = Flask(__name__)
app.config.update({
'DEBUG': True,
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': 'redis://localhost:6379/2'
})
cache = Cache(app)
widgets = Widgets(app, cache=cache)
@widgets.widget('title')
def title():
return 'Flask-Widget example'
@widgets.widget('say')
def say(msg):
return 'says %s!<br>Last cached time for this widget was: %s' % (msg, datetime.now())
@widgets.position('header', order=100)
def hello_world():
return {'greeting': 'HELLO WORLD'}
@widgets.position('header')
def welcome():
return '<h1>Welcome to Flask-Widgets</h1>'
@widgets.position('footer')
def welcome():
return 'Flask-Widgets by <NAME><br>Last cached time for this widget was: %s' % datetime.now()
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run()
```
|
{
"source": "JCube001/calc-grako-example",
"score": 2
}
|
#### File: calc-grako-example/calc/__main__.py
```python
from calc import CalcParser
from grako.exceptions import FailedParse
from decimal import Decimal
__version__ = '1.0.1'
class CalcSemantics:
def expression(self, ast):
result = ast.head
for node in ast.tail:
op = node[0]
rhs = node[1]
if '+' == op:
result += rhs
elif '-' == op:
result -= rhs
return result
def term(self, ast):
result = ast.head
for node in ast.tail:
op = node[0]
rhs = node[1]
if '*' == op:
result *= rhs
elif '/' == op:
result /= rhs
return result
def power(self, ast):
result = ast.head
for node in ast.tail:
result = result ** node[1]
return result
def factor(self, ast):
return ast
def negative(self, ast):
return -ast
def number(self, ast):
return Decimal(ast)
def main():
print('Welcome to Calc v{}'.format(__version__))
try:
parser = CalcParser(semantics=CalcSemantics())
while True:
try:
text = input('> ')
if text:
print(parser.parse(text))
except FailedParse:
print('Invalid syntax')
except EOFError:
pass
except KeyboardInterrupt:
print()
print('Bye')
if __name__ == '__main__':
main()
```
|
{
"source": "j-cube/oidn",
"score": 2
}
|
#### File: oidn/scripts/test.py
```python
import sys
import shutil
from glob import glob
from shutil import which
import argparse
from common import *
MODEL_VERSION='v1.4.0'
# Parse the command-line arguments
parser = argparse.ArgumentParser(description='Runs all tests, including comparing images produced by the library with generated baseline images.')
parser.usage = '\rIntel(R) Open Image Denoise - Test\n' + parser.format_usage()
parser.add_argument('command', type=str, nargs='?', choices=['baseline', 'run'], default='run')
parser.add_argument('--filter', '-f', type=str, nargs='*', choices=['RT', 'RTLightmap'], default=None, help='filters to test')
parser.add_argument('--build_dir', '-B', type=str, help='build directory')
parser.add_argument('--data_dir', '-D', type=str, help='directory of datasets (e.g. training, validation, test)')
parser.add_argument('--results_dir', '-R', type=str, help='directory of training results')
parser.add_argument('--baseline_dir', '-G', type=str, help='directory of generated baseline images')
parser.add_argument('--arch', '-a', type=str, nargs='*', choices=['native', 'pnr', 'hsw', 'skx', 'knl'], default=['native'], help='CPU architectures to test (requires Intel SDE)')
parser.add_argument('--log', '-l', type=str, default=os.path.join(root_dir, 'test.log'), help='output log file')
cfg = parser.parse_args()
training_dir = os.environ.get('OIDN_TRAINING_DIR_' + OS.upper())
if training_dir is None:
training_dir = os.path.join(root_dir, 'training')
if cfg.data_dir is None:
cfg.data_dir = os.path.join(training_dir, 'data')
if cfg.results_dir is None:
cfg.results_dir = os.path.join(training_dir, 'results')
if cfg.baseline_dir is None:
cfg.baseline_dir = os.path.join(training_dir, 'baseline_' + MODEL_VERSION)
if cfg.command == 'run':
# Detect the OIDN binary directory
if cfg.build_dir is None:
cfg.build_dir = os.path.join(root_dir, 'build')
else:
cfg.build_dir = os.path.abspath(cfg.build_dir)
bin_dir = os.path.join(cfg.build_dir, 'install', 'bin')
if not os.path.isdir(bin_dir):
bin_dir = os.path.join(root_dir, 'build')
# Detect the Intel(R) Software Development Emulator (SDE)
# See: https://software.intel.com/en-us/articles/intel-software-development-emulator
sde = 'sde.exe' if OS == 'windows' else 'sde64'
sde_dir = os.environ.get('OIDN_SDE_DIR_' + OS.upper())
if sde_dir is not None:
sde = os.path.join(sde_dir, sde)
# Prints the name of a test
def print_test(name, kind='Test'):
print(kind + ':', name, '...', end='', flush=True)
# Runs a test command
def run_test(cmd, arch='native'):
# Run test through SDE if required
if arch != 'native':
cmd = f'{sde} -{arch} -- ' + cmd
# Write command and redirect output to log
run(f'echo >> "{cfg.log}"')
run(f'echo "{cmd}" >> "{cfg.log}"')
cmd += f' >> "{cfg.log}" 2>&1'
# Run the command and check the return value
if os.system(cmd) == 0:
print(' PASSED')
else:
print(' FAILED')
print(f'Error: test failed, see "{cfg.log}" for details')
exit(1)
# Runs main tests
def test():
if cfg.command == 'run':
# Iterate over architectures
for arch in cfg.arch:
print_test(f'oidnTest.{arch}')
run_test(os.path.join(bin_dir, 'oidnTest'), arch)
# Gets the option name of a feature
def get_feature_opt(feature):
if feature == 'calb':
return 'alb'
elif feature == 'cnrm':
return 'nrm'
else:
return feature
# Gets the file extension of a feature
def get_feature_ext(feature):
if feature == 'dir':
return 'sh1x'
else:
return get_feature_opt(feature)
# Runs regression tests for the specified filter
def test_regression(filter, feature_sets, dataset):
dataset_dir = os.path.join(cfg.data_dir, dataset)
# Convert the input images to PFM
if cfg.command == 'baseline':
image_filenames = sorted(glob(os.path.join(dataset_dir, '**', '*.exr'), recursive=True))
for input_filename in image_filenames:
input_name = os.path.relpath(input_filename, dataset_dir).rsplit('.', 1)[0]
print_test(f'{filter}.{input_name}', 'Convert')
output_filename = input_filename.rsplit('.', 1)[0] + '.pfm'
convert_cmd = os.path.join(root_dir, 'training', 'convert_image.py')
convert_cmd += f' "{input_filename}" "{output_filename}"'
run_test(convert_cmd)
# Iterate over the feature sets
for features, full_test in feature_sets:
# Get the result name
result = filter.lower()
for f in features:
result += '_' + f
features_str = result.split('_', 1)[1]
if cfg.command == 'baseline':
# Generate the baseline images
print_test(f'{filter}.{features_str}', 'Infer')
infer_cmd = os.path.join(root_dir, 'training', 'infer.py')
infer_cmd += f' -D "{cfg.data_dir}" -R "{cfg.results_dir}" -O "{cfg.baseline_dir}" -i {dataset} -r {result} -F pfm -d cpu'
run_test(infer_cmd)
elif cfg.command == 'run':
main_feature = features[0]
main_feature_ext = get_feature_ext(main_feature)
# Gather the list of images
image_filenames = sorted(glob(os.path.join(dataset_dir, '**', f'*.{main_feature_ext}.pfm'), recursive=True))
if not image_filenames:
print('Error: baseline input images missing (run with "baseline" first)')
exit(1)
image_names = [os.path.relpath(filename, dataset_dir).rsplit('.', 3)[0] for filename in image_filenames]
# Iterate over architectures
for arch in cfg.arch:
# Iterate over images
for image_name in image_names:
# Iterate over in-place mode
for inplace in ([False, True] if full_test else [False]):
# Run test
test_name = f'{filter}.{features_str}.{arch}.{image_name}'
if inplace:
test_name += '.inplace'
print_test(test_name)
denoise_cmd = os.path.join(bin_dir, 'oidnDenoise')
ref_filename = os.path.join(cfg.baseline_dir, dataset, f'{image_name}.{result}.{main_feature_ext}.pfm')
if not os.path.isfile(ref_filename):
print('Error: baseline output image missing (run with "baseline" first)')
exit(1)
denoise_cmd += f' -f {filter} -v 2 --ref "{ref_filename}"'
for feature in features:
feature_opt = get_feature_opt(feature)
feature_ext = get_feature_ext(feature)
feature_filename = os.path.join(dataset_dir, image_name) + f'.{feature_ext}.pfm'
denoise_cmd += f' --{feature_opt} "{feature_filename}"'
if set(features) & {'calb', 'cnrm'}:
denoise_cmd += ' --clean_aux'
if inplace:
denoise_cmd += ' --inplace'
run_test(denoise_cmd, arch)
# Main tests
test()
# Regression tests: RT
if not cfg.filter or 'RT' in cfg.filter:
test_regression(
'RT',
[
(['hdr', 'alb', 'nrm'], True),
(['hdr', 'alb'], True),
(['hdr'], True),
(['hdr', 'calb', 'cnrm'], False),
(['ldr', 'alb', 'nrm'], False),
(['ldr', 'alb'], False),
(['ldr'], True),
(['ldr', 'calb', 'cnrm'], False),
(['alb'], True),
(['nrm'], True)
],
'rt_regress'
)
# Regression tests: RTLightmap
if not cfg.filter or 'RTLightmap' in cfg.filter:
test_regression(
'RTLightmap',
[
(['hdr'], True),
(['dir'], True)
],
'rtlightmap_regress'
)
# Done
if cfg.command == 'run':
print('Success: all tests passed')
```
|
{
"source": "j-cube/OpenPype",
"score": 2
}
|
#### File: flame/api/lib.py
```python
import sys
import os
import re
import json
import pickle
import tempfile
import itertools
import contextlib
import xml.etree.cElementTree as cET
from copy import deepcopy
from xml.etree import ElementTree as ET
from pprint import pformat
from .constants import (
MARKER_COLOR,
MARKER_DURATION,
MARKER_NAME,
COLOR_MAP,
MARKER_PUBLISH_DEFAULT
)
import openpype.api as openpype
log = openpype.Logger.get_logger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
class CTX:
# singleton used for passing data between api modules
app_framework = None
flame_apps = []
selection = None
@contextlib.contextmanager
def io_preferences_file(klass, filepath, write=False):
try:
flag = "w" if write else "r"
yield open(filepath, flag)
except IOError as _error:
klass.log.info("Unable to work with preferences `{}`: {}".format(
filepath, _error))
class FlameAppFramework(object):
# flameAppFramework class takes care of preferences
class prefs_dict(dict):
def __init__(self, master, name, **kwargs):
self.name = name
self.master = master
if not self.master.get(self.name):
self.master[self.name] = {}
self.master[self.name].__init__()
def __getitem__(self, k):
return self.master[self.name].__getitem__(k)
def __setitem__(self, k, v):
return self.master[self.name].__setitem__(k, v)
def __delitem__(self, k):
return self.master[self.name].__delitem__(k)
def get(self, k, default=None):
return self.master[self.name].get(k, default)
def setdefault(self, k, default=None):
return self.master[self.name].setdefault(k, default)
def pop(self, *args, **kwargs):
return self.master[self.name].pop(*args, **kwargs)
def update(self, mapping=(), **kwargs):
self.master[self.name].update(mapping, **kwargs)
def __contains__(self, k):
return self.master[self.name].__contains__(k)
def copy(self): # don"t delegate w/ super - dict.copy() -> dict :(
return type(self)(self)
def keys(self):
return self.master[self.name].keys()
@classmethod
def fromkeys(cls, keys, v=None):
return cls.master[cls.name].fromkeys(keys, v)
def __repr__(self):
return "{0}({1})".format(
type(self).__name__, self.master[self.name].__repr__())
def master_keys(self):
return self.master.keys()
def __init__(self):
self.name = self.__class__.__name__
self.bundle_name = "OpenPypeFlame"
# self.prefs scope is limited to flame project and user
self.prefs = {}
self.prefs_user = {}
self.prefs_global = {}
self.log = log
try:
import flame
self.flame = flame
self.flame_project_name = self.flame.project.current_project.name
self.flame_user_name = flame.users.current_user.name
except Exception:
self.flame = None
self.flame_project_name = None
self.flame_user_name = None
import socket
self.hostname = socket.gethostname()
if sys.platform == "darwin":
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
"Library",
"Caches",
"OpenPype",
self.bundle_name
)
elif sys.platform.startswith("linux"):
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
".OpenPype",
self.bundle_name)
self.prefs_folder = os.path.join(
self.prefs_folder,
self.hostname,
)
self.log.info("[{}] waking up".format(self.__class__.__name__))
try:
self.load_prefs()
except RuntimeError:
self.save_prefs()
# menu auto-refresh defaults
if not self.prefs_global.get("menu_auto_refresh"):
self.prefs_global["menu_auto_refresh"] = {
"media_panel": True,
"batch": True,
"main_menu": True,
"timeline_menu": True
}
self.apps = []
def get_pref_file_paths(self):
prefix = self.prefs_folder + os.path.sep + self.bundle_name
prefs_file_path = "_".join([
prefix, self.flame_user_name,
self.flame_project_name]) + ".prefs"
prefs_user_file_path = "_".join([
prefix, self.flame_user_name]) + ".prefs"
prefs_global_file_path = prefix + ".prefs"
return (prefs_file_path, prefs_user_file_path, prefs_global_file_path)
def load_prefs(self):
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path) as prefs_file:
self.prefs = pickle.load(prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path) as prefs_file:
self.prefs_user = pickle.load(prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path) as prefs_file:
self.prefs_global = pickle.load(prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def save_prefs(self):
# make sure the preference folder is available
if not os.path.isdir(self.prefs_folder):
try:
os.makedirs(self.prefs_folder)
except Exception:
self.log.info("Unable to create folder {}".format(
self.prefs_folder))
return False
# get all pref file paths
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path, True) as prefs_file:
pickle.dump(self.prefs, prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path, True) as prefs_file:
pickle.dump(self.prefs_user, prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path, True) as prefs_file:
pickle.dump(self.prefs_global, prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def get_current_project():
import flame
return flame.project.current_project
def get_current_sequence(selection):
import flame
def segment_to_sequence(_segment):
track = _segment.parent
version = track.parent
return version.parent
process_timeline = None
if len(selection) == 1:
if isinstance(selection[0], flame.PySequence):
process_timeline = selection[0]
if isinstance(selection[0], flame.PySegment):
process_timeline = segment_to_sequence(selection[0])
else:
for segment in selection:
if isinstance(segment, flame.PySegment):
process_timeline = segment_to_sequence(segment)
break
return process_timeline
def rescan_hooks():
import flame
try:
flame.execute_shortcut('Rescan Python Hooks')
except Exception:
pass
def get_metadata(project_name, _log=None):
# TODO: can be replaced by MediaInfoFile class method
from adsk.libwiretapPythonClientAPI import (
WireTapClient,
WireTapServerHandle,
WireTapNodeHandle,
WireTapStr
)
class GetProjectColorPolicy(object):
def __init__(self, host_name=None, _log=None):
# Create a connection to the Backburner manager using the Wiretap
# python API.
#
self.log = _log or log
self.host_name = host_name or "localhost"
self._wiretap_client = WireTapClient()
if not self._wiretap_client.init():
raise Exception("Could not initialize Wiretap Client")
self._server = WireTapServerHandle(
"{}:IFFFS".format(self.host_name))
def process(self, project_name):
policy_node_handle = WireTapNodeHandle(
self._server,
"/projects/{}/syncolor/policy".format(project_name)
)
self.log.info(policy_node_handle)
policy = WireTapStr()
if not policy_node_handle.getNodeTypeStr(policy):
self.log.warning(
"Could not retrieve policy of '%s': %s" % (
policy_node_handle.getNodeId().id(),
policy_node_handle.lastError()
)
)
return policy.c_str()
policy_wiretap = GetProjectColorPolicy(_log=_log)
return policy_wiretap.process(project_name)
def get_segment_data_marker(segment, with_marker=None):
"""
Get openpype track item tag created by creator or loader plugin.
Attributes:
segment (flame.PySegment): flame api object
with_marker (bool)[optional]: if true it will return also marker object
Returns:
dict: openpype tag data
Returns(with_marker=True):
flame.PyMarker, dict
"""
for marker in segment.markers:
comment = marker.comment.get_value()
color = marker.colour.get_value()
name = marker.name.get_value()
if (name == MARKER_NAME) and (
color == COLOR_MAP[MARKER_COLOR]):
if not with_marker:
return json.loads(comment)
else:
return marker, json.loads(comment)
def set_segment_data_marker(segment, data=None):
"""
Set openpype track item tag to input segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
dict: json loaded data
"""
data = data or dict()
marker_data = get_segment_data_marker(segment, True)
if marker_data:
# get available openpype tag if any
marker, tag_data = marker_data
# update tag data with new data
tag_data.update(data)
# update marker with tag data
marker.comment = json.dumps(tag_data)
else:
# update tag data with new data
marker = create_segment_data_marker(segment)
# add tag data to marker's comment
marker.comment = json.dumps(data)
def set_publish_attribute(segment, value):
""" Set Publish attribute in input Tag object
Attribute:
segment (flame.PySegment)): flame api object
value (bool): True or False
"""
tag_data = get_segment_data_marker(segment)
tag_data["publish"] = value
# set data to the publish attribute
set_segment_data_marker(segment, tag_data)
def get_publish_attribute(segment):
""" Get Publish attribute from input Tag object
Attribute:
segment (flame.PySegment)): flame api object
Returns:
bool: True or False
"""
tag_data = get_segment_data_marker(segment)
if not tag_data:
set_publish_attribute(segment, MARKER_PUBLISH_DEFAULT)
return MARKER_PUBLISH_DEFAULT
return tag_data["publish"]
def create_segment_data_marker(segment):
""" Create openpype marker on a segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
flame.PyMarker: flame api object
"""
# get duration of segment
duration = segment.record_duration.relative_frame
# calculate start frame of the new marker
start_frame = int(segment.record_in.relative_frame) + int(duration / 2)
# create marker
marker = segment.create_marker(start_frame)
# set marker name
marker.name = MARKER_NAME
# set duration
marker.duration = MARKER_DURATION
# set colour
marker.colour = COLOR_MAP[MARKER_COLOR] # Red
return marker
def get_sequence_segments(sequence, selected=False):
segments = []
# loop versions in sequence
for ver in sequence.versions:
# loop track in versions
for track in ver.tracks:
# ignore all empty tracks and hidden too
if len(track.segments) == 0 and track.hidden:
continue
# loop all segment in remaining tracks
for segment in track.segments:
if segment.name.get_value() == "":
continue
if segment.hidden.get_value() is True:
continue
if (
selected is True
and segment.selected.get_value() is not True
):
continue
# add it to original selection
segments.append(segment)
return segments
@contextlib.contextmanager
def maintained_segment_selection(sequence):
"""Maintain selection during context
Attributes:
sequence (flame.PySequence): python api object
Yield:
list of flame.PySegment
Example:
>>> with maintained_segment_selection(sequence) as selected_segments:
... for segment in selected_segments:
... segment.selected = False
>>> print(segment.selected)
True
"""
selected_segments = get_sequence_segments(sequence, True)
try:
# do the operation on selected segments
yield selected_segments
finally:
# reset all selected clips
reset_segment_selection(sequence)
# select only original selection of segments
for segment in selected_segments:
segment.selected = True
def reset_segment_selection(sequence):
"""Deselect all selected nodes
"""
for ver in sequence.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden:
continue
for segment in track.segments:
segment.selected = False
def _get_shot_tokens_values(clip, tokens):
old_value = None
output = {}
if not clip.shot_name:
return output
old_value = clip.shot_name.get_value()
for token in tokens:
clip.shot_name.set_value(token)
_key = str(re.sub("[<>]", "", token)).replace(" ", "_")
try:
output[_key] = int(clip.shot_name.get_value())
except ValueError:
output[_key] = clip.shot_name.get_value()
clip.shot_name.set_value(old_value)
return output
def get_segment_attributes(segment):
if segment.name.get_value() == "":
return None
# Add timeline segment to tree
clip_data = {
"shot_name": segment.shot_name.get_value(),
"segment_name": segment.name.get_value(),
"segment_comment": segment.comment.get_value(),
"tape_name": segment.tape_name,
"source_name": segment.source_name,
"fpath": segment.file_path,
"PySegment": segment
}
# head and tail with forward compatibility
if segment.head:
# `infinite` can be also returned
if isinstance(segment.head, str):
clip_data["segment_head"] = 0
else:
clip_data["segment_head"] = int(segment.head)
if segment.tail:
# `infinite` can be also returned
if isinstance(segment.tail, str):
clip_data["segment_tail"] = 0
else:
clip_data["segment_tail"] = int(segment.tail)
# add all available shot tokens
shot_tokens = _get_shot_tokens_values(segment, [
"<colour space>", "<width>", "<height>", "<depth>", "<segment>",
"<track>", "<track name>"
])
clip_data.update(shot_tokens)
# populate shot source metadata
segment_attrs = [
"record_duration", "record_in", "record_out",
"source_duration", "source_in", "source_out"
]
segment_attrs_data = {}
for attr_name in segment_attrs:
if not hasattr(segment, attr_name):
continue
attr = getattr(segment, attr_name)
segment_attrs_data[attr] = str(attr).replace("+", ":")
if attr_name in ["record_in", "record_out"]:
clip_data[attr_name] = attr.relative_frame
else:
clip_data[attr_name] = attr.frame
clip_data["segment_timecodes"] = segment_attrs_data
return clip_data
def get_clips_in_reels(project):
output_clips = []
project_desktop = project.current_workspace.desktop
for reel_group in project_desktop.reel_groups:
for reel in reel_group.reels:
for clip in reel.clips:
clip_data = {
"PyClip": clip,
"fps": float(str(clip.frame_rate)[:-4])
}
attrs = [
"name", "width", "height",
"ratio", "sample_rate", "bit_depth"
]
for attr in attrs:
val = getattr(clip, attr)
clip_data[attr] = val
version = clip.versions[-1]
track = version.tracks[-1]
for segment in track.segments:
segment_data = get_segment_attributes(segment)
clip_data.update(segment_data)
output_clips.append(clip_data)
return output_clips
def get_reformated_filename(filename, padded=True):
"""
Return fixed python expression path
Args:
filename (str): file name
Returns:
type: string with reformated path
Example:
get_reformated_filename("plate.1001.exr") > plate.%04d.exr
"""
found = FRAME_PATTERN.search(filename)
if not found:
log.info("File name is not sequence: {}".format(filename))
return filename
padding = get_padding_from_filename(filename)
replacement = "%0{}d".format(padding) if padded else "%d"
start_idx, end_idx = found.span(1)
return replacement.join(
[filename[:start_idx], filename[end_idx:]]
)
def get_padding_from_filename(filename):
"""
Return padding number from Flame path style
Args:
filename (str): file name
Returns:
int: padding number
Example:
get_padding_from_filename("plate.0001.exr") > 4
"""
found = get_frame_from_filename(filename)
return len(found) if found else None
def get_frame_from_filename(filename):
"""
Return sequence number from Flame path style
Args:
filename (str): file name
Returns:
int: sequence frame number
Example:
def get_frame_from_filename(path):
("plate.0001.exr") > 0001
"""
found = re.findall(FRAME_PATTERN, filename)
return found.pop() if found else None
@contextlib.contextmanager
def maintained_object_duplication(item):
"""Maintain input item duplication
Attributes:
item (any flame.PyObject): python api object
Yield:
duplicate input PyObject type
"""
import flame
# Duplicate the clip to avoid modifying the original clip
duplicate = flame.duplicate(item)
try:
# do the operation on selected segments
yield duplicate
finally:
# delete the item at the end
flame.delete(duplicate)
@contextlib.contextmanager
def maintained_temp_file_path(suffix=None):
_suffix = suffix or ""
try:
# Store dumped json to temporary file
temporary_file = tempfile.mktemp(
suffix=_suffix, prefix="flame_maintained_")
yield temporary_file.replace("\\", "/")
except IOError as _error:
raise IOError(
"Not able to create temp json file: {}".format(_error))
finally:
# Remove the temporary json
os.remove(temporary_file)
def get_clip_segment(flame_clip):
name = flame_clip.name.get_value()
version = flame_clip.versions[0]
track = version.tracks[0]
segments = track.segments
if len(segments) < 1:
raise ValueError("Clip `{}` has no segments!".format(name))
if len(segments) > 1:
raise ValueError("Clip `{}` has too many segments!".format(name))
return segments[0]
def get_batch_group_from_desktop(name):
project = get_current_project()
project_desktop = project.current_workspace.desktop
for bgroup in project_desktop.batch_groups:
if bgroup.name.get_value() in name:
return bgroup
class MediaInfoFile(object):
"""Class to get media info file clip data
Raises:
IOError: MEDIA_SCRIPT_PATH path doesn't exists
TypeError: Not able to generate clip xml data file
ET.ParseError: Missing clip in xml clip data
IOError: Not able to save xml clip data to file
Attributes:
str: `MEDIA_SCRIPT_PATH` path to flame binary
logging.Logger: `log` logger
TODO: add method for getting metadata to dict
"""
MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info"
log = log
_clip_data = None
_start_frame = None
_fps = None
_drop_mode = None
def __init__(self, path, **kwargs):
# replace log if any
if kwargs.get("logger"):
self.log = kwargs["logger"]
# test if `dl_get_media_info` paht exists
self._validate_media_script_path()
# derivate other feed variables
self.feed_basename = os.path.basename(path)
self.feed_dir = os.path.dirname(path)
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
with maintained_temp_file_path(".clip") as tmp_path:
self.log.info("Temp File: {}".format(tmp_path))
self._generate_media_info_file(tmp_path)
# get clip data and make them single if there is multiple
# clips data
xml_data = self._make_single_clip_media_info(tmp_path)
self.log.debug("xml_data: {}".format(xml_data))
self.log.debug("type: {}".format(type(xml_data)))
# get all time related data and assign them
self._get_time_info_from_origin(xml_data)
self.log.debug("start_frame: {}".format(self.start_frame))
self.log.debug("fps: {}".format(self.fps))
self.log.debug("drop frame: {}".format(self.drop_mode))
self.clip_data = xml_data
@property
def clip_data(self):
"""Clip's xml clip data
Returns:
xml.etree.ElementTree: xml data
"""
return self._clip_data
@clip_data.setter
def clip_data(self, data):
self._clip_data = data
@property
def start_frame(self):
""" Clip's starting frame found in timecode
Returns:
int: number of frames
"""
return self._start_frame
@start_frame.setter
def start_frame(self, number):
self._start_frame = int(number)
@property
def fps(self):
""" Clip's frame rate
Returns:
float: frame rate
"""
return self._fps
@fps.setter
def fps(self, fl_number):
self._fps = float(fl_number)
@property
def drop_mode(self):
""" Clip's drop frame mode
Returns:
str: drop frame flag
"""
return self._drop_mode
@drop_mode.setter
def drop_mode(self, text):
self._drop_mode = str(text)
def _validate_media_script_path(self):
if not os.path.isfile(self.MEDIA_SCRIPT_PATH):
raise IOError("Media Scirpt does not exist: `{}`".format(
self.MEDIA_SCRIPT_PATH))
def _generate_media_info_file(self, fpath):
# Create cmd arguments for gettig xml file info file
cmd_args = [
self.MEDIA_SCRIPT_PATH,
"-e", self.feed_ext,
"-o", fpath,
self.feed_dir
]
try:
# execute creation of clip xml template data
openpype.run_subprocess(cmd_args)
except TypeError as error:
raise TypeError(
"Error creating `{}` due: {}".format(fpath, error))
def _make_single_clip_media_info(self, fpath):
with open(fpath) as f:
lines = f.readlines()
_added_root = itertools.chain(
"<root>", deepcopy(lines)[1:], "</root>")
new_root = ET.fromstringlist(_added_root)
# find the clip which is matching to my input name
xml_clips = new_root.findall("clip")
matching_clip = None
for xml_clip in xml_clips:
if xml_clip.find("name").text in self.feed_basename:
matching_clip = xml_clip
if matching_clip is None:
# return warning there is missing clip
raise ET.ParseError(
"Missing clip in `{}`. Available clips {}".format(
self.feed_basename, [
xml_clip.find("name").text
for xml_clip in xml_clips
]
))
return matching_clip
def _get_time_info_from_origin(self, xml_data):
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
# start frame
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
self.start_frame = out_feed_nb_ticks_obj.text
# fps
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
self.fps = out_feed_fps_obj.text
# drop frame mode
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
self.drop_mode = out_feed_drop_mode_obj.text
break
else:
continue
except Exception as msg:
self.log.warning(msg)
@staticmethod
def write_clip_data_to_file(fpath, xml_element_data):
""" Write xml element of clip data to file
Args:
fpath (string): file path
xml_element_data (xml.etree.ElementTree.Element): xml data
Raises:
IOError: If data could not be written to file
"""
try:
# save it as new file
tree = cET.ElementTree(xml_element_data)
tree.write(
fpath, xml_declaration=True,
method='xml', encoding='UTF-8'
)
except IOError as error:
raise IOError(
"Not able to write data to file: {}".format(error))
```
#### File: plugins/create/create_multiverse_usd.py
```python
from openpype.hosts.maya.api import plugin, lib
class CreateMultiverseUsd(plugin.Creator):
"""Multiverse USD data"""
name = "usdMain"
label = "Multiverse USD"
family = "usd"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateMultiverseUsd, self).__init__(*args, **kwargs)
# Add animation data first, since it maintains order.
self.data.update(lib.collect_animation_data(True))
self.data["stripNamespaces"] = False
self.data["mergeTransformAndShape"] = False
self.data["writeAncestors"] = True
self.data["flattenParentXforms"] = False
self.data["writeSparseOverrides"] = False
self.data["useMetaPrimPath"] = False
self.data["customRootPath"] = ''
self.data["customAttributes"] = ''
self.data["nodeTypesToIgnore"] = ''
self.data["writeMeshes"] = True
self.data["writeCurves"] = True
self.data["writeParticles"] = True
self.data["writeCameras"] = False
self.data["writeLights"] = False
self.data["writeJoints"] = False
self.data["writeCollections"] = False
self.data["writePositions"] = True
self.data["writeNormals"] = True
self.data["writeUVs"] = True
self.data["writeColorSets"] = False
self.data["writeTangents"] = False
self.data["writeRefPositions"] = False
self.data["writeBlendShapes"] = False
self.data["writeDisplayColor"] = False
self.data["writeSkinWeights"] = False
self.data["writeMaterialAssignment"] = False
self.data["writeHardwareShader"] = False
self.data["writeShadingNetworks"] = False
self.data["writeTransformMatrix"] = True
self.data["writeUsdAttributes"] = False
self.data["timeVaryingTopology"] = False
self.data["customMaterialNamespace"] = ''
self.data["numTimeSamples"] = 1
self.data["timeSamplesSpan"] = 0.0
```
#### File: plugins/publish/extract_multiverse_usd_over.py
```python
import os
import openpype.api
from openpype.hosts.maya.api.lib import maintained_selection
from maya import cmds
class ExtractMultiverseUsdOverride(openpype.api.Extractor):
"""Extractor for USD Override by Multiverse."""
label = "Extract Multiverse USD Override"
hosts = ["maya"]
families = ["usdOverride"]
@property
def options(self):
"""Overridable options for Multiverse USD Export
Given in the following format
- {NAME: EXPECTED TYPE}
If the overridden option's type does not match,
the option is not included and a warning is logged.
"""
return {
"writeAll": bool,
"writeTransforms": bool,
"writeVisibility": bool,
"writeAttributes": bool,
"writeMaterials": bool,
"writeVariants": bool,
"writeVariantsDefinition": bool,
"writeActiveState": bool,
"writeNamespaces": bool,
"numTimeSamples": int,
"timeSamplesSpan": float
}
@property
def default_options(self):
"""The default options for Multiverse USD extraction."""
return {
"writeAll": False,
"writeTransforms": True,
"writeVisibility": True,
"writeAttributes": True,
"writeMaterials": True,
"writeVariants": True,
"writeVariantsDefinition": True,
"writeActiveState": True,
"writeNamespaces": False,
"numTimeSamples": 1,
"timeSamplesSpan": 0.0
}
def process(self, instance):
# Load plugin firstly
cmds.loadPlugin("MultiverseForMaya", quiet=True)
# Define output file path
staging_dir = self.staging_dir(instance)
file_name = "{}.usda".format(instance.name)
file_path = os.path.join(staging_dir, file_name)
file_path = file_path.replace("\\", "/")
# Parse export options
options = self.default_options
self.log.info("Export options: {0}".format(options))
# Perform extraction
self.log.info("Performing extraction ...")
with maintained_selection():
members = instance.data("setMembers")
members = cmds.ls(members,
dag=True,
shapes=True,
type="mvUsdCompoundShape",
noIntermediate=True,
long=True)
self.log.info("Collected object {}".format(members))
# TODO: Deal with asset, composition, overide with options.
import multiverse
time_opts = None
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
step = instance.data["step"]
fps = instance.data["fps"]
if frame_end != frame_start:
time_opts = multiverse.TimeOptions()
time_opts.writeTimeRange = True
time_opts.frameRange = (
frame_start - handle_start, frame_end + handle_end)
time_opts.frameIncrement = step
time_opts.numTimeSamples = instance.data["numTimeSamples"]
time_opts.timeSamplesSpan = instance.data["timeSamplesSpan"]
time_opts.framePerSecond = fps
over_write_opts = multiverse.OverridesWriteOptions(time_opts)
options_discard_keys = {
"numTimeSamples",
"timeSamplesSpan",
"frameStart",
"frameEnd",
"handleStart",
"handleEnd",
"step",
"fps"
}
for key, value in options.items():
if key in options_discard_keys:
continue
setattr(over_write_opts, key, value)
for member in members:
multiverse.WriteOverrides(file_path, member, over_write_opts)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
"name": "usd",
"ext": "usd",
"files": file_name,
"stagingDir": staging_dir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance {} to {}".format(
instance.name, file_path))
```
#### File: plugins/create/create_write_prerender.py
```python
import nuke
from openpype.hosts.nuke.api import plugin
from openpype.hosts.nuke.api.lib import create_write_node
class CreateWritePrerender(plugin.AbstractWriteRender):
# change this to template preset
name = "WritePrerender"
label = "Create Write Prerender"
hosts = ["nuke"]
n_class = "Write"
family = "prerender"
icon = "sign-out"
defaults = ["Key01", "Bg01", "Fg01", "Branch01", "Part01"]
def __init__(self, *args, **kwargs):
super(CreateWritePrerender, self).__init__(*args, **kwargs)
def _create_write_node(self, selected_node, inputs, outputs, write_data):
reviewable = self.presets.get("reviewable")
write_node = create_write_node(
self.data["subset"],
write_data,
input=selected_node,
prenodes=[],
review=reviewable,
linked_knobs=["channels", "___", "first", "last", "use_limit"])
return write_node
def _modify_write_node(self, write_node):
# open group node
write_node.begin()
for n in nuke.allNodes():
# get write node
if n.Class() in "Write":
w_node = n
write_node.end()
if self.presets.get("use_range_limit"):
w_node["use_limit"].setValue(True)
w_node["first"].setValue(nuke.root()["first_frame"].value())
w_node["last"].setValue(nuke.root()["last_frame"].value())
return write_node
```
#### File: plugins/publish/collect_batch_data.py
```python
import os
import pyblish.api
from openpype.lib.plugin_tools import (
parse_json,
get_batch_asset_task_info
)
from openpype.pipeline import legacy_io
class CollectBatchData(pyblish.api.ContextPlugin):
"""Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir.
The directory must contain 'manifest.json' file where batch data should be
stored.
"""
# must be really early, context values are only in json file
order = pyblish.api.CollectorOrder - 0.495
label = "Collect batch data"
hosts = ["photoshop"]
targets = ["remotepublish"]
def process(self, context):
self.log.info("CollectBatchData")
batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
assert batch_dir, (
"Missing `OPENPYPE_PUBLISH_DATA`")
assert os.path.exists(batch_dir), \
"Folder {} doesn't exist".format(batch_dir)
project_name = os.environ.get("AVALON_PROJECT")
if project_name is None:
raise AssertionError(
"Environment `AVALON_PROJECT` was not found."
"Could not set project `root` which may cause issues."
)
batch_data = parse_json(os.path.join(batch_dir, "manifest.json"))
context.data["batchDir"] = batch_dir
context.data["batchData"] = batch_data
asset_name, task_name, task_type = get_batch_asset_task_info(
batch_data["context"]
)
os.environ["AVALON_ASSET"] = asset_name
os.environ["AVALON_TASK"] = task_name
legacy_io.Session["AVALON_ASSET"] = asset_name
legacy_io.Session["AVALON_TASK"] = task_name
context.data["asset"] = asset_name
context.data["task"] = task_name
context.data["taskType"] = task_type
context.data["project_name"] = project_name
context.data["variant"] = batch_data["variant"]
```
#### File: plugins/publish/extract_sequence.py
```python
import os
import copy
import tempfile
from PIL import Image
import pyblish.api
from openpype.hosts.tvpaint.api import lib
from openpype.hosts.tvpaint.lib import (
calculate_layers_extraction_data,
get_frame_filename_template,
fill_reference_frames,
composite_rendered_layers,
rename_filepaths_by_frame_start,
)
class ExtractSequence(pyblish.api.Extractor):
label = "Extract Sequence"
hosts = ["tvpaint"]
families = ["review", "renderPass", "renderLayer", "renderScene"]
# Modifiable with settings
review_bg = [255, 255, 255, 255]
def process(self, instance):
self.log.info(
"* Processing instance \"{}\"".format(instance.data["label"])
)
# Get all layers and filter out not visible
layers = instance.data["layers"]
filtered_layers = [
layer
for layer in layers
if layer["visible"]
]
layer_names = [str(layer["name"]) for layer in filtered_layers]
if not layer_names:
self.log.info(
"None of the layers from the instance"
" are visible. Extraction skipped."
)
return
joined_layer_names = ", ".join(
["\"{}\"".format(name) for name in layer_names]
)
self.log.debug(
"Instance has {} layers with names: {}".format(
len(layer_names), joined_layer_names
)
)
family_lowered = instance.data["family"].lower()
mark_in = instance.context.data["sceneMarkIn"]
mark_out = instance.context.data["sceneMarkOut"]
# Change scene Start Frame to 0 to prevent frame index issues
# - issue is that TVPaint versions deal with frame indexes in a
# different way when Start Frame is not `0`
# NOTE It will be set back after rendering
scene_start_frame = instance.context.data["sceneStartFrame"]
lib.execute_george("tv_startframe 0")
# Frame start/end may be stored as float
frame_start = int(instance.data["frameStart"])
frame_end = int(instance.data["frameEnd"])
# Handles are not stored per instance but on Context
handle_start = instance.context.data["handleStart"]
handle_end = instance.context.data["handleEnd"]
scene_bg_color = instance.context.data["sceneBgColor"]
# --- Fallbacks ----------------------------------------------------
# This is required if validations of ranges are ignored.
# - all of this code won't change processing if range to render
# match to range of expected output
# Prepare output frames
output_frame_start = frame_start - handle_start
output_frame_end = frame_end + handle_end
# Change output frame start to 0 if handles cause it's negative number
if output_frame_start < 0:
self.log.warning((
"Frame start with handles has negative value."
" Changed to \"0\". Frames start: {}, Handle Start: {}"
).format(frame_start, handle_start))
output_frame_start = 0
# Check Marks range and output range
output_range = output_frame_end - output_frame_start
marks_range = mark_out - mark_in
# Lower Mark Out if mark range is bigger than output
# - do not rendered not used frames
if output_range < marks_range:
new_mark_out = mark_out - (marks_range - output_range)
self.log.warning((
"Lowering render range to {} frames. Changed Mark Out {} -> {}"
).format(marks_range + 1, mark_out, new_mark_out))
# Assign new mark out to variable
mark_out = new_mark_out
# Lower output frame end so representation has right `frameEnd` value
elif output_range > marks_range:
new_output_frame_end = (
output_frame_end - (output_range - marks_range)
)
self.log.warning((
"Lowering representation range to {} frames."
" Changed frame end {} -> {}"
).format(output_range + 1, mark_out, new_output_frame_end))
output_frame_end = new_output_frame_end
# -------------------------------------------------------------------
# Save to staging dir
output_dir = instance.data.get("stagingDir")
if not output_dir:
# Create temp folder if staging dir is not set
output_dir = (
tempfile.mkdtemp(prefix="tvpaint_render_")
).replace("\\", "/")
instance.data["stagingDir"] = output_dir
self.log.debug(
"Files will be rendered to folder: {}".format(output_dir)
)
if instance.data["family"] == "review":
result = self.render_review(
output_dir, mark_in, mark_out, scene_bg_color
)
else:
# Render output
result = self.render(
output_dir, mark_in, mark_out, filtered_layers
)
output_filepaths_by_frame_idx, thumbnail_fullpath = result
# Change scene frame Start back to previous value
lib.execute_george("tv_startframe {}".format(scene_start_frame))
# Sequence of one frame
if not output_filepaths_by_frame_idx:
self.log.warning("Extractor did not create any output.")
return
repre_files = self._rename_output_files(
output_filepaths_by_frame_idx,
mark_in,
mark_out,
output_frame_start
)
# Fill tags and new families
tags = []
if family_lowered in ("review", "renderlayer", "renderscene"):
tags.append("review")
# Sequence of one frame
single_file = len(repre_files) == 1
if single_file:
repre_files = repre_files[0]
# Extension is hardcoded
# - changing extension would require change code
new_repre = {
"name": "png",
"ext": "png",
"files": repre_files,
"stagingDir": output_dir,
"tags": tags
}
if not single_file:
new_repre["frameStart"] = output_frame_start
new_repre["frameEnd"] = output_frame_end
self.log.debug("Creating new representation: {}".format(new_repre))
instance.data["representations"].append(new_repre)
if family_lowered in ("renderpass", "renderlayer", "renderscene"):
# Change family to render
instance.data["family"] = "render"
if not thumbnail_fullpath:
return
thumbnail_ext = os.path.splitext(
thumbnail_fullpath
)[1].replace(".", "")
# Create thumbnail representation
thumbnail_repre = {
"name": "thumbnail",
"ext": thumbnail_ext,
"outputName": "thumb",
"files": os.path.basename(thumbnail_fullpath),
"stagingDir": output_dir,
"tags": ["thumbnail"]
}
instance.data["representations"].append(thumbnail_repre)
def _rename_output_files(
self, filepaths_by_frame, mark_in, mark_out, output_frame_start
):
new_filepaths_by_frame = rename_filepaths_by_frame_start(
filepaths_by_frame, mark_in, mark_out, output_frame_start
)
repre_filenames = []
for filepath in new_filepaths_by_frame.values():
repre_filenames.append(os.path.basename(filepath))
if mark_in < output_frame_start:
repre_filenames = list(reversed(repre_filenames))
return repre_filenames
def render_review(
self, output_dir, mark_in, mark_out, scene_bg_color
):
""" Export images from TVPaint using `tv_savesequence` command.
Args:
output_dir (str): Directory where files will be stored.
mark_in (int): Starting frame index from which export will begin.
mark_out (int): On which frame index export will end.
scene_bg_color (list): Bg color set in scene. Result of george
script command `tv_background`.
Returns:
tuple: With 2 items first is list of filenames second is path to
thumbnail.
"""
filename_template = get_frame_filename_template(mark_out)
self.log.debug("Preparing data for rendering.")
first_frame_filepath = os.path.join(
output_dir,
filename_template.format(frame=mark_in)
)
bg_color = self._get_review_bg_color()
george_script_lines = [
# Change bg color to color from settings
"tv_background \"color\" {} {} {}".format(*bg_color),
"tv_SaveMode \"PNG\"",
"export_path = \"{}\"".format(
first_frame_filepath.replace("\\", "/")
),
"tv_savesequence '\"'export_path'\"' {} {}".format(
mark_in, mark_out
)
]
if scene_bg_color:
# Change bg color back to previous scene bg color
_scene_bg_color = copy.deepcopy(scene_bg_color)
bg_type = _scene_bg_color.pop(0)
orig_color_command = [
"tv_background",
"\"{}\"".format(bg_type)
]
orig_color_command.extend(_scene_bg_color)
george_script_lines.append(" ".join(orig_color_command))
lib.execute_george_through_file("\n".join(george_script_lines))
first_frame_filepath = None
output_filepaths_by_frame_idx = {}
for frame_idx in range(mark_in, mark_out + 1):
filename = filename_template.format(frame=frame_idx)
filepath = os.path.join(output_dir, filename)
output_filepaths_by_frame_idx[frame_idx] = filepath
if not os.path.exists(filepath):
raise AssertionError(
"Output was not rendered. File was not found {}".format(
filepath
)
)
if first_frame_filepath is None:
first_frame_filepath = filepath
thumbnail_filepath = None
if first_frame_filepath and os.path.exists(first_frame_filepath):
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
source_img = Image.open(first_frame_filepath)
if source_img.mode.lower() != "rgb":
source_img = source_img.convert("RGB")
source_img.save(thumbnail_filepath)
return output_filepaths_by_frame_idx, thumbnail_filepath
def render(self, output_dir, mark_in, mark_out, layers):
""" Export images from TVPaint.
Args:
output_dir (str): Directory where files will be stored.
mark_in (int): Starting frame index from which export will begin.
mark_out (int): On which frame index export will end.
layers (list): List of layers to be exported.
Returns:
tuple: With 2 items first is list of filenames second is path to
thumbnail.
"""
self.log.debug("Preparing data for rendering.")
# Map layers by position
layers_by_position = {}
layers_by_id = {}
layer_ids = []
for layer in layers:
layer_id = layer["layer_id"]
position = layer["position"]
layers_by_position[position] = layer
layers_by_id[layer_id] = layer
layer_ids.append(layer_id)
# Sort layer positions in reverse order
sorted_positions = list(reversed(sorted(layers_by_position.keys())))
if not sorted_positions:
return [], None
self.log.debug("Collecting pre/post behavior of individual layers.")
behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids)
exposure_frames_by_layer_id = lib.get_layers_exposure_frames(
layer_ids, layers
)
extraction_data_by_layer_id = calculate_layers_extraction_data(
layers,
exposure_frames_by_layer_id,
behavior_by_layer_id,
mark_in,
mark_out
)
# Render layers
filepaths_by_layer_id = {}
for layer_id, render_data in extraction_data_by_layer_id.items():
layer = layers_by_id[layer_id]
filepaths_by_layer_id[layer_id] = self._render_layer(
render_data, layer, output_dir
)
# Prepare final filepaths where compositing should store result
output_filepaths_by_frame = {}
thumbnail_src_filepath = None
finale_template = get_frame_filename_template(mark_out)
for frame_idx in range(mark_in, mark_out + 1):
filename = finale_template.format(frame=frame_idx)
filepath = os.path.join(output_dir, filename)
output_filepaths_by_frame[frame_idx] = filepath
if thumbnail_src_filepath is None:
thumbnail_src_filepath = filepath
self.log.info("Started compositing of layer frames.")
composite_rendered_layers(
layers, filepaths_by_layer_id,
mark_in, mark_out,
output_filepaths_by_frame
)
self.log.info("Compositing finished")
thumbnail_filepath = None
if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath):
source_img = Image.open(thumbnail_src_filepath)
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
# Composite background only on rgba images
# - just making sure
if source_img.mode.lower() == "rgba":
bg_color = self._get_review_bg_color()
self.log.debug("Adding thumbnail background color {}.".format(
" ".join([str(val) for val in bg_color])
))
bg_image = Image.new("RGBA", source_img.size, bg_color)
thumbnail_obj = Image.alpha_composite(bg_image, source_img)
thumbnail_obj.convert("RGB").save(thumbnail_filepath)
else:
self.log.info((
"Source for thumbnail has mode \"{}\" (Expected: RGBA)."
" Can't use thubmanail background color."
).format(source_img.mode))
source_img.save(thumbnail_filepath)
return output_filepaths_by_frame, thumbnail_filepath
def _get_review_bg_color(self):
red = green = blue = 255
if self.review_bg:
if len(self.review_bg) == 4:
red, green, blue, _ = self.review_bg
elif len(self.review_bg) == 3:
red, green, blue = self.review_bg
return (red, green, blue)
def _render_layer(self, render_data, layer, output_dir):
frame_references = render_data["frame_references"]
filenames_by_frame_index = render_data["filenames_by_frame_index"]
layer_id = layer["layer_id"]
george_script_lines = [
"tv_layerset {}".format(layer_id),
"tv_SaveMode \"PNG\""
]
filepaths_by_frame = {}
frames_to_render = []
for frame_idx, ref_idx in frame_references.items():
# None reference is skipped because does not have source
if ref_idx is None:
filepaths_by_frame[frame_idx] = None
continue
filename = filenames_by_frame_index[frame_idx]
dst_path = "/".join([output_dir, filename])
filepaths_by_frame[frame_idx] = dst_path
if frame_idx != ref_idx:
continue
frames_to_render.append(str(frame_idx))
# Go to frame
george_script_lines.append("tv_layerImage {}".format(frame_idx))
# Store image to output
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
self.log.debug("Rendering Exposure frames {} of layer {} ({})".format(
",".join(frames_to_render), layer_id, layer["name"]
))
# Let TVPaint render layer's image
lib.execute_george_through_file("\n".join(george_script_lines))
# Fill frames between `frame_start_index` and `frame_end_index`
self.log.debug("Filling frames not rendered frames.")
fill_reference_frames(frame_references, filepaths_by_frame)
return filepaths_by_frame
```
#### File: plugins/load/load_layout.py
```python
import os
import json
from pathlib import Path
import unreal
from unreal import EditorAssetLibrary
from unreal import EditorLevelLibrary
from unreal import EditorLevelUtils
from unreal import AssetToolsHelpers
from unreal import FBXImportType
from unreal import MathLibrary as umath
from openpype.pipeline import (
discover_loader_plugins,
loaders_from_representation,
load_container,
get_representation_path,
AVALON_CONTAINER_ID,
legacy_io,
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
class LayoutLoader(plugin.Loader):
"""Load Layout from a JSON file"""
families = ["layout"]
representations = ["json"]
label = "Load Layout"
icon = "code-fork"
color = "orange"
ASSET_ROOT = "/Game/OpenPype"
def _get_asset_containers(self, path):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
asset_content = EditorAssetLibrary.list_assets(
path, recursive=True)
asset_containers = []
# Get all the asset containers
for a in asset_content:
obj = ar.get_asset_by_object_path(a)
if obj.get_asset().get_class().get_name() == 'AssetContainer':
asset_containers.append(obj)
return asset_containers
@staticmethod
def _get_fbx_loader(loaders, family):
name = ""
if family == 'rig':
name = "SkeletalMeshFBXLoader"
elif family == 'model':
name = "StaticMeshFBXLoader"
elif family == 'camera':
name = "CameraLoader"
if name == "":
return None
for loader in loaders:
if loader.__name__ == name:
return loader
return None
@staticmethod
def _get_abc_loader(loaders, family):
name = ""
if family == 'rig':
name = "SkeletalMeshAlembicLoader"
elif family == 'model':
name = "StaticMeshAlembicLoader"
if name == "":
return None
for loader in loaders:
if loader.__name__ == name:
return loader
return None
def _get_data(self, asset_name):
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
return asset_doc.get("data")
def _set_sequence_hierarchy(
self, seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths
):
# Get existing sequencer tracks or create them if they don't exist
tracks = seq_i.get_master_tracks()
subscene_track = None
visibility_track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
if (t.get_class() ==
unreal.MovieSceneLevelVisibilityTrack.static_class()):
visibility_track = t
if not subscene_track:
subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
if not visibility_track:
visibility_track = seq_i.add_master_track(
unreal.MovieSceneLevelVisibilityTrack)
# Create the sub-scene section
subscenes = subscene_track.get_sections()
subscene = None
for s in subscenes:
if s.get_editor_property('sub_sequence') == seq_j:
subscene = s
break
if not subscene:
subscene = subscene_track.add_section()
subscene.set_row_index(len(subscene_track.get_sections()))
subscene.set_editor_property('sub_sequence', seq_j)
subscene.set_range(
min_frame_j,
max_frame_j + 1)
# Create the visibility section
ar = unreal.AssetRegistryHelpers.get_asset_registry()
maps = []
for m in map_paths:
# Unreal requires to load the level to get the map name
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(m)
maps.append(str(ar.get_asset_by_object_path(m).asset_name))
vis_section = visibility_track.add_section()
index = len(visibility_track.get_sections())
vis_section.set_range(
min_frame_j,
max_frame_j + 1)
vis_section.set_visibility(unreal.LevelVisibility.VISIBLE)
vis_section.set_row_index(index)
vis_section.set_level_names(maps)
if min_frame_j > 1:
hid_section = visibility_track.add_section()
hid_section.set_range(
1,
min_frame_j)
hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
hid_section.set_row_index(index)
hid_section.set_level_names(maps)
if max_frame_j < max_frame_i:
hid_section = visibility_track.add_section()
hid_section.set_range(
max_frame_j + 1,
max_frame_i + 1)
hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
hid_section.set_row_index(index)
hid_section.set_level_names(maps)
def _process_family(
self, assets, class_name, transform, sequence, inst_name=None
):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
actors = []
bindings = []
for asset in assets:
obj = ar.get_asset_by_object_path(asset).get_asset()
if obj.get_class().get_name() == class_name:
actor = EditorLevelLibrary.spawn_actor_from_object(
obj,
transform.get('translation')
)
if inst_name:
try:
# Rename method leads to crash
# actor.rename(name=inst_name)
# The label works, although it make it slightly more
# complicated to check for the names, as we need to
# loop through all the actors in the level
actor.set_actor_label(inst_name)
except Exception as e:
print(e)
actor.set_actor_rotation(unreal.Rotator(
umath.radians_to_degrees(
transform.get('rotation').get('x')),
-umath.radians_to_degrees(
transform.get('rotation').get('y')),
umath.radians_to_degrees(
transform.get('rotation').get('z')),
), False)
actor.set_actor_scale3d(transform.get('scale'))
if class_name == 'SkeletalMesh':
skm_comp = actor.get_editor_property(
'skeletal_mesh_component')
skm_comp.set_bounds_scale(10.0)
actors.append(actor)
binding = sequence.add_possessable(actor)
bindings.append(binding)
return actors, bindings
def _import_animation(
self, asset_dir, path, instance_name, skeleton, actors_dict,
animation_file, bindings_dict, sequence
):
anim_file = Path(animation_file)
anim_file_name = anim_file.with_suffix('')
anim_path = f"{asset_dir}/animations/{anim_file_name}"
# Import animation
task = unreal.AssetImportTask()
task.options = unreal.FbxImportUI()
task.set_editor_property(
'filename', str(path.with_suffix(f".{animation_file}")))
task.set_editor_property('destination_path', anim_path)
task.set_editor_property(
'destination_name', f"{instance_name}_animation")
task.set_editor_property('replace_existing', False)
task.set_editor_property('automated', True)
task.set_editor_property('save', False)
# set import options here
task.options.set_editor_property(
'automated_import_should_detect_type', False)
task.options.set_editor_property(
'original_import_type', FBXImportType.FBXIT_SKELETAL_MESH)
task.options.set_editor_property(
'mesh_type_to_import', FBXImportType.FBXIT_ANIMATION)
task.options.set_editor_property('import_mesh', False)
task.options.set_editor_property('import_animations', True)
task.options.set_editor_property('override_full_name', True)
task.options.set_editor_property('skeleton', skeleton)
task.options.anim_sequence_import_data.set_editor_property(
'animation_length',
unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME
)
task.options.anim_sequence_import_data.set_editor_property(
'import_meshes_in_bone_hierarchy', False)
task.options.anim_sequence_import_data.set_editor_property(
'use_default_sample_rate', True)
task.options.anim_sequence_import_data.set_editor_property(
'import_custom_attribute', True)
task.options.anim_sequence_import_data.set_editor_property(
'import_bone_tracks', True)
task.options.anim_sequence_import_data.set_editor_property(
'remove_redundant_keys', True)
task.options.anim_sequence_import_data.set_editor_property(
'convert_scene', True)
AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
asset_content = unreal.EditorAssetLibrary.list_assets(
anim_path, recursive=False, include_folder=False
)
animation = None
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
imported_asset_data = unreal.EditorAssetLibrary.find_asset_data(a)
imported_asset = unreal.AssetRegistryHelpers.get_asset(
imported_asset_data)
if imported_asset.__class__ == unreal.AnimSequence:
animation = imported_asset
break
if animation:
actor = None
if actors_dict.get(instance_name):
for a in actors_dict.get(instance_name):
if a.get_class().get_name() == 'SkeletalMeshActor':
actor = a
break
animation.set_editor_property('enable_root_motion', True)
actor.skeletal_mesh_component.set_editor_property(
'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE)
actor.skeletal_mesh_component.animation_data.set_editor_property(
'anim_to_play', animation)
# Add animation to the sequencer
bindings = bindings_dict.get(instance_name)
for binding in bindings:
binding.add_track(unreal.MovieSceneSkeletalAnimationTrack)
for track in binding.get_tracks():
section = track.add_section()
section.set_range(
sequence.get_playback_start(),
sequence.get_playback_end())
sec_params = section.get_editor_property('params')
sec_params.set_editor_property('animation', animation)
def _process(self, lib_path, asset_dir, sequence, loaded=None):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
with open(lib_path, "r") as fp:
data = json.load(fp)
all_loaders = discover_loader_plugins()
if not loaded:
loaded = []
path = Path(lib_path)
skeleton_dict = {}
actors_dict = {}
bindings_dict = {}
for element in data:
reference = None
if element.get('reference_fbx'):
reference = element.get('reference_fbx')
elif element.get('reference_abc'):
reference = element.get('reference_abc')
# If reference is None, this element is skipped, as it cannot be
# imported in Unreal
if not reference:
continue
instance_name = element.get('instance_name')
skeleton = None
if reference not in loaded:
loaded.append(reference)
family = element.get('family')
loaders = loaders_from_representation(
all_loaders, reference)
loader = None
if reference == element.get('reference_fbx'):
loader = self._get_fbx_loader(loaders, family)
elif reference == element.get('reference_abc'):
loader = self._get_abc_loader(loaders, family)
if not loader:
continue
options = {
"asset_dir": asset_dir
}
assets = load_container(
loader,
reference,
namespace=instance_name,
options=options
)
instances = [
item for item in data
if (item.get('reference_fbx') == reference or
item.get('reference_abc') == reference)]
for instance in instances:
transform = instance.get('transform')
inst = instance.get('instance_name')
actors = []
if family == 'model':
actors, _ = self._process_family(
assets, 'StaticMesh', transform, sequence, inst)
elif family == 'rig':
actors, bindings = self._process_family(
assets, 'SkeletalMesh', transform, sequence, inst)
actors_dict[inst] = actors
bindings_dict[inst] = bindings
if family == 'rig':
# Finds skeleton among the imported assets
for asset in assets:
obj = ar.get_asset_by_object_path(asset).get_asset()
if obj.get_class().get_name() == 'Skeleton':
skeleton = obj
if skeleton:
break
if skeleton:
skeleton_dict[reference] = skeleton
else:
skeleton = skeleton_dict.get(reference)
animation_file = element.get('animation')
if animation_file and skeleton:
self._import_animation(
asset_dir, path, instance_name, skeleton, actors_dict,
animation_file, bindings_dict, sequence)
@staticmethod
def _remove_family(assets, components, class_name, prop_name):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
objects = []
for a in assets:
obj = ar.get_asset_by_object_path(a)
if obj.get_asset().get_class().get_name() == class_name:
objects.append(obj)
for obj in objects:
for comp in components:
if comp.get_editor_property(prop_name) == obj.get_asset():
comp.get_owner().destroy_actor()
def _remove_actors(self, path):
asset_containers = self._get_asset_containers(path)
# Get all the static and skeletal meshes components in the level
components = EditorLevelLibrary.get_all_level_actors_components()
static_meshes_comp = [
c for c in components
if c.get_class().get_name() == 'StaticMeshComponent']
skel_meshes_comp = [
c for c in components
if c.get_class().get_name() == 'SkeletalMeshComponent']
# For all the asset containers, get the static and skeletal meshes.
# Then, check the components in the level and destroy the matching
# actors.
for asset_container in asset_containers:
package_path = asset_container.get_editor_property('package_path')
family = EditorAssetLibrary.get_metadata_tag(
asset_container.get_asset(), 'family')
assets = EditorAssetLibrary.list_assets(
str(package_path), recursive=False)
if family == 'model':
self._remove_family(
assets, static_meshes_comp, 'StaticMesh', 'static_mesh')
elif family == 'rig':
self._remove_family(
assets, skel_meshes_comp, 'SkeletalMesh', 'skeletal_mesh')
def load(self, context, name, namespace, options):
"""Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
options (dict): Those would be data to be imprinted. This is not
used now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and avalon container
hierarchy = context.get('asset').get('data').get('parents')
root = self.ASSET_ROOT
hierarchy_dir = root
hierarchy_list = []
for h in hierarchy:
hierarchy_dir = f"{hierarchy_dir}/{h}"
hierarchy_list.append(hierarchy_dir)
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
"{}/{}/{}".format(hierarchy_dir, asset, name), suffix="")
container_name += suffix
EditorAssetLibrary.make_directory(asset_dir)
# Create map for the shot, and create hierarchy of map. If the maps
# already exist, we will use them.
maps = []
for h in hierarchy_list:
a = h.split('/')[-1]
map = f"{h}/{a}_map.{a}_map"
new = False
if not EditorAssetLibrary.does_asset_exist(map):
EditorLevelLibrary.new_level(f"{h}/{a}_map")
new = True
maps.append({"map": map, "new": new})
EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map")
maps.append(
{"map": f"{asset_dir}/{asset}_map.{asset}_map", "new": True})
for i in range(0, len(maps) - 1):
for j in range(i + 1, len(maps)):
if maps[j].get('new'):
EditorLevelLibrary.load_level(maps[i].get('map'))
EditorLevelUtils.add_level_to_world(
EditorLevelLibrary.get_editor_world(),
maps[j].get('map'),
unreal.LevelStreamingDynamic
)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(maps[-1].get('map'))
# Get all the sequences in the hierarchy. It will create them, if
# they don't exist.
sequences = []
frame_ranges = []
i = 0
for h in hierarchy_list:
root_content = EditorAssetLibrary.list_assets(
h, recursive=False, include_folder=False)
existing_sequences = [
EditorAssetLibrary.find_asset_data(asset)
for asset in root_content
if EditorAssetLibrary.find_asset_data(
asset).get_class().get_name() == 'LevelSequence'
]
if not existing_sequences:
sequence = tools.create_asset(
asset_name=hierarchy[i],
package_path=h,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
asset_data = legacy_io.find_one({
"type": "asset",
"name": h.split('/')[-1]
})
id = asset_data.get('_id')
start_frames = []
end_frames = []
elements = list(
legacy_io.find({"type": "asset", "data.visualParent": id}))
for e in elements:
start_frames.append(e.get('data').get('clipIn'))
end_frames.append(e.get('data').get('clipOut'))
elements.extend(legacy_io.find({
"type": "asset",
"data.visualParent": e.get('_id')
}))
min_frame = min(start_frames)
max_frame = max(end_frames)
sequence.set_display_rate(
unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
sequence.set_playback_start(min_frame)
sequence.set_playback_end(max_frame)
sequences.append(sequence)
frame_ranges.append((min_frame, max_frame))
tracks = sequence.get_master_tracks()
track = None
for t in tracks:
if (t.get_class() ==
unreal.MovieSceneCameraCutTrack.static_class()):
track = t
break
if not track:
track = sequence.add_master_track(
unreal.MovieSceneCameraCutTrack)
else:
for e in existing_sequences:
sequences.append(e.get_asset())
frame_ranges.append((
e.get_asset().get_playback_start(),
e.get_asset().get_playback_end()))
i += 1
shot = tools.create_asset(
asset_name=asset,
package_path=asset_dir,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
# sequences and frame_ranges have the same length
for i in range(0, len(sequences) - 1):
maps_to_add = []
for j in range(i + 1, len(maps)):
maps_to_add.append(maps[j].get('map'))
self._set_sequence_hierarchy(
sequences[i], sequences[i + 1],
frame_ranges[i][1],
frame_ranges[i + 1][0], frame_ranges[i + 1][1],
maps_to_add)
data = self._get_data(asset)
shot.set_display_rate(
unreal.FrameRate(data.get("fps"), 1.0))
shot.set_playback_start(0)
shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1)
self._set_sequence_hierarchy(
sequences[-1], shot,
frame_ranges[-1][1],
data.get('clipIn'), data.get('clipOut'),
[maps[-1].get('map')])
EditorLevelLibrary.load_level(maps[-1].get('map'))
self._process(self.fname, asset_dir, shot)
for s in sequences:
EditorAssetLibrary.save_asset(s.get_full_name())
EditorLevelLibrary.save_current_level()
# Create Asset Container
unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=False)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
EditorLevelLibrary.load_level(maps[0].get('map'))
return asset_content
def update(self, container, representation):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
source_path = get_representation_path(representation)
destination_path = container["namespace"]
lib_path = Path(get_representation_path(representation))
self._remove_actors(destination_path)
# Delete old animations
anim_path = f"{destination_path}/animations/"
EditorAssetLibrary.delete_directory(anim_path)
with open(source_path, "r") as fp:
data = json.load(fp)
references = [e.get('reference_fbx') for e in data]
asset_containers = self._get_asset_containers(destination_path)
loaded = []
# Delete all the assets imported with the previous version of the
# layout, if they're not in the new layout.
for asset_container in asset_containers:
if asset_container.get_editor_property(
'asset_name') == container["objectName"]:
continue
ref = EditorAssetLibrary.get_metadata_tag(
asset_container.get_asset(), 'representation')
ppath = asset_container.get_editor_property('package_path')
if ref not in references:
# If the asset is not in the new layout, delete it.
# Also check if the parent directory is empty, and delete that
# as well, if it is.
EditorAssetLibrary.delete_directory(ppath)
parent = os.path.dirname(str(ppath))
parent_content = EditorAssetLibrary.list_assets(
parent, recursive=False, include_folder=True
)
if len(parent_content) == 0:
EditorAssetLibrary.delete_directory(parent)
else:
# If the asset is in the new layout, search the instances in
# the JSON file, and create actors for them.
actors_dict = {}
skeleton_dict = {}
for element in data:
reference = element.get('reference_fbx')
instance_name = element.get('instance_name')
skeleton = None
if reference == ref and ref not in loaded:
loaded.append(ref)
family = element.get('family')
assets = EditorAssetLibrary.list_assets(
ppath, recursive=True, include_folder=False)
instances = [
item for item in data
if item.get('reference_fbx') == reference]
for instance in instances:
transform = instance.get('transform')
inst = instance.get('instance_name')
actors = []
if family == 'model':
actors = self._process_family(
assets, 'StaticMesh', transform, inst)
elif family == 'rig':
actors = self._process_family(
assets, 'SkeletalMesh', transform, inst)
actors_dict[inst] = actors
if family == 'rig':
# Finds skeleton among the imported assets
for asset in assets:
obj = ar.get_asset_by_object_path(
asset).get_asset()
if obj.get_class().get_name() == 'Skeleton':
skeleton = obj
if skeleton:
break
if skeleton:
skeleton_dict[reference] = skeleton
else:
skeleton = skeleton_dict.get(reference)
animation_file = element.get('animation')
if animation_file and skeleton:
self._import_animation(
destination_path, lib_path,
instance_name, skeleton,
actors_dict, animation_file)
self._process(source_path, destination_path, loaded)
container_path = "{}/{}".format(container["namespace"],
container["objectName"])
# update metadata
unreal_pipeline.imprint(
container_path,
{
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
})
asset_content = EditorAssetLibrary.list_assets(
destination_path, recursive=True, include_folder=False)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
def remove(self, container):
"""
First, destroy all actors of the assets to be removed. Then, deletes
the asset's directory.
"""
path = container["namespace"]
parent_path = os.path.dirname(path)
self._remove_actors(path)
EditorAssetLibrary.delete_directory(path)
asset_content = EditorAssetLibrary.list_assets(
parent_path, recursive=False, include_folder=True
)
if len(asset_content) == 0:
EditorAssetLibrary.delete_directory(parent_path)
```
#### File: openpype/lib/applications.py
```python
import os
import sys
import copy
import json
import tempfile
import platform
import collections
import inspect
import subprocess
from abc import ABCMeta, abstractmethod
import six
from openpype.settings import (
get_system_settings,
get_project_settings,
get_local_settings
)
from openpype.settings.constants import (
METADATA_KEYS,
M_DYNAMIC_KEY_LABEL
)
from . import (
PypeLogger,
Anatomy
)
from .profiles_filtering import filter_profiles
from .local_settings import get_openpype_username
from .avalon_context import (
get_workdir_data,
get_workdir_with_workdir_data,
get_workfile_template_key,
get_last_workfile
)
from .python_module_tools import (
modules_from_path,
classes_from_module
)
from .execute import (
find_executable,
get_linux_launcher_args
)
_logger = None
PLATFORM_NAMES = {"windows", "linux", "darwin"}
DEFAULT_ENV_SUBGROUP = "standard"
CUSTOM_LAUNCH_APP_GROUPS = {
"djvview"
}
def parse_environments(env_data, env_group=None, platform_name=None):
"""Parse environment values from settings byt group and platform.
Data may contain up to 2 hierarchical levels of dictionaries. At the end
of the last level must be string or list. List is joined using platform
specific joiner (';' for windows and ':' for linux and mac).
Hierarchical levels can contain keys for subgroups and platform name.
Platform specific values must be always last level of dictionary. Platform
names are "windows" (MS Windows), "linux" (any linux distribution) and
"darwin" (any MacOS distribution).
Subgroups are helpers added mainly for standard and on farm usage. Farm
may require different environments for e.g. licence related values or
plugins. Default subgroup is "standard".
Examples:
```
{
# Unchanged value
"ENV_KEY1": "value",
# Empty values are kept (unset environment variable)
"ENV_KEY2": "",
# Join list values with ':' or ';'
"ENV_KEY3": ["value1", "value2"],
# Environment groups
"ENV_KEY4": {
"standard": "DEMO_SERVER_URL",
"farm": "LICENCE_SERVER_URL"
},
# Platform specific (and only for windows and mac)
"ENV_KEY5": {
"windows": "windows value",
"darwin": ["value 1", "value 2"]
},
# Environment groups and platform combination
"ENV_KEY6": {
"farm": "FARM_VALUE",
"standard": {
"windows": ["value1", "value2"],
"linux": "value1",
"darwin": ""
}
}
}
```
"""
output = {}
if not env_data:
return output
if not env_group:
env_group = DEFAULT_ENV_SUBGROUP
if not platform_name:
platform_name = platform.system().lower()
for key, value in env_data.items():
if isinstance(value, dict):
# Look if any key is platform key
# - expect that represents environment group if does not contain
# platform keys
if not PLATFORM_NAMES.intersection(set(value.keys())):
# Skip the key if group is not available
if env_group not in value:
continue
value = value[env_group]
# Check again if value is dictionary
# - this time there should be only platform keys
if isinstance(value, dict):
value = value.get(platform_name)
# Check if value is list and join it's values
# QUESTION Should empty values be skipped?
if isinstance(value, (list, tuple)):
value = os.pathsep.join(value)
# Set key to output if value is string
if isinstance(value, six.string_types):
output[key] = value
return output
def get_logger():
"""Global lib.applications logger getter."""
global _logger
if _logger is None:
_logger = PypeLogger.get_logger(__name__)
return _logger
class ApplicationNotFound(Exception):
"""Application was not found in ApplicationManager by name."""
def __init__(self, app_name):
self.app_name = app_name
super(ApplicationNotFound, self).__init__(
"Application \"{}\" was not found.".format(app_name)
)
class ApplictionExecutableNotFound(Exception):
"""Defined executable paths are not available on the machine."""
def __init__(self, application):
self.application = application
details = None
if not application.executables:
msg = (
"Executable paths for application \"{}\"({}) are not set."
)
else:
msg = (
"Defined executable paths for application \"{}\"({})"
" are not available on this machine."
)
details = "Defined paths:"
for executable in application.executables:
details += "\n- " + executable.executable_path
self.msg = msg.format(application.full_label, application.full_name)
self.details = details
exc_mgs = str(self.msg)
if details:
# Is good idea to pass new line symbol to exception message?
exc_mgs += "\n" + details
self.exc_msg = exc_mgs
super(ApplictionExecutableNotFound, self).__init__(exc_mgs)
class ApplicationLaunchFailed(Exception):
"""Application launch failed due to known reason.
Message should be self explanatory as traceback won't be shown.
"""
pass
class ApplicationGroup:
"""Hold information about application group.
Application group wraps different versions(variants) of application.
e.g. "maya" is group and "maya_2020" is variant.
Group hold `host_name` which is implementation name used in pype. Also
holds `enabled` if whole app group is enabled or `icon` for application
icon path in resources.
Group has also `environment` which hold same environments for all variants.
Args:
name (str): Groups' name.
data (dict): Group defying data loaded from settings.
manager (ApplicationManager): Manager that created the group.
"""
def __init__(self, name, data, manager):
self.name = name
self.manager = manager
self._data = data
self.enabled = data.get("enabled", True)
self.label = data.get("label") or None
self.icon = data.get("icon") or None
self._environment = data.get("environment") or {}
host_name = data.get("host_name", None)
self.is_host = host_name is not None
self.host_name = host_name
variants = data.get("variants") or {}
key_label_mapping = variants.pop(M_DYNAMIC_KEY_LABEL, {})
for variant_name, variant_data in variants.items():
if variant_name in METADATA_KEYS:
continue
if "variant_label" not in variant_data:
variant_label = key_label_mapping.get(variant_name)
if variant_label:
variant_data["variant_label"] = variant_label
variants[variant_name] = Application(
variant_name, variant_data, self
)
self.variants = variants
def __repr__(self):
return "<{}> - {}".format(self.__class__.__name__, self.name)
def __iter__(self):
for variant in self.variants.values():
yield variant
@property
def environment(self):
return copy.deepcopy(self._environment)
class Application:
"""Hold information about application.
Object by itself does nothing special.
Args:
name (str): Specific version (or variant) of application.
e.g. "maya2020", "nuke11.3", etc.
data (dict): Data for the version containing information about
executables, variant label or if is enabled.
Only required key is `executables`.
group (ApplicationGroup): App group object that created the application
and under which application belongs.
"""
def __init__(self, name, data, group):
self.name = name
self.group = group
self._data = data
enabled = False
if group.enabled:
enabled = data.get("enabled", True)
self.enabled = enabled
self.use_python_2 = data.get("use_python_2", False)
self.label = data.get("variant_label") or name
self.full_name = "/".join((group.name, name))
if group.label:
full_label = " ".join((group.label, self.label))
else:
full_label = self.label
self.full_label = full_label
self._environment = data.get("environment") or {}
arguments = data.get("arguments")
if isinstance(arguments, dict):
arguments = arguments.get(platform.system().lower())
if not arguments:
arguments = []
self.arguments = arguments
if "executables" not in data:
self.executables = [
UndefinedApplicationExecutable()
]
return
_executables = data["executables"]
if isinstance(_executables, dict):
_executables = _executables.get(platform.system().lower())
if not _executables:
_executables = []
executables = []
for executable in _executables:
executables.append(ApplicationExecutable(executable))
self.executables = executables
def __repr__(self):
return "<{}> - {}".format(self.__class__.__name__, self.full_name)
@property
def environment(self):
return copy.deepcopy(self._environment)
@property
def manager(self):
return self.group.manager
@property
def host_name(self):
return self.group.host_name
@property
def icon(self):
return self.group.icon
@property
def is_host(self):
return self.group.is_host
def find_executable(self):
"""Try to find existing executable for application.
Returns (str): Path to executable from `executables` or None if any
exists.
"""
for executable in self.executables:
if executable.exists():
return executable
return None
def launch(self, *args, **kwargs):
"""Launch the application.
For this purpose is used manager's launch method to keep logic at one
place.
Arguments must match with manager's launch method. That's why *args
**kwargs are used.
Returns:
subprocess.Popen: Return executed process as Popen object.
"""
return self.manager.launch(self.full_name, *args, **kwargs)
class ApplicationManager:
"""Load applications and tools and store them by their full name.
Args:
system_settings (dict): Preloaded system settings. When passed manager
will always use these values. Gives ability to create manager
using different settings.
"""
def __init__(self, system_settings=None):
self.log = PypeLogger.get_logger(self.__class__.__name__)
self.app_groups = {}
self.applications = {}
self.tool_groups = {}
self.tools = {}
self._system_settings = system_settings
self.refresh()
def set_system_settings(self, system_settings):
"""Ability to change init system settings.
This will trigger refresh of manager.
"""
self._system_settings = system_settings
self.refresh()
def refresh(self):
"""Refresh applications from settings."""
self.app_groups.clear()
self.applications.clear()
self.tool_groups.clear()
self.tools.clear()
if self._system_settings is not None:
settings = copy.deepcopy(self._system_settings)
else:
settings = get_system_settings(
clear_metadata=False, exclude_locals=False
)
all_app_defs = {}
# Prepare known applications
app_defs = settings["applications"]
additional_apps = {}
for group_name, variant_defs in app_defs.items():
if group_name in METADATA_KEYS:
continue
if group_name == "additional_apps":
additional_apps = variant_defs
else:
all_app_defs[group_name] = variant_defs
# Prepare additional applications
# - First find dynamic keys that can be used as labels of group
dynamic_keys = {}
for group_name, variant_defs in additional_apps.items():
if group_name == M_DYNAMIC_KEY_LABEL:
dynamic_keys = variant_defs
break
# Add additional apps to known applications
for group_name, variant_defs in additional_apps.items():
if group_name in METADATA_KEYS:
continue
# Determine group label
label = variant_defs.get("label")
if not label:
# Look for label set in dynamic labels
label = dynamic_keys.get(group_name)
if not label:
label = group_name
variant_defs["label"] = label
all_app_defs[group_name] = variant_defs
for group_name, variant_defs in all_app_defs.items():
if group_name in METADATA_KEYS:
continue
group = ApplicationGroup(group_name, variant_defs, self)
self.app_groups[group_name] = group
for app in group:
self.applications[app.full_name] = app
tools_definitions = settings["tools"]["tool_groups"]
tool_label_mapping = tools_definitions.pop(M_DYNAMIC_KEY_LABEL, {})
for tool_group_name, tool_group_data in tools_definitions.items():
if not tool_group_name or tool_group_name in METADATA_KEYS:
continue
tool_group_label = (
tool_label_mapping.get(tool_group_name) or tool_group_name
)
group = EnvironmentToolGroup(
tool_group_name, tool_group_label, tool_group_data, self
)
self.tool_groups[tool_group_name] = group
for tool in group:
self.tools[tool.full_name] = tool
def launch(self, app_name, **data):
"""Launch procedure.
For host application it's expected to contain "project_name",
"asset_name" and "task_name".
Args:
app_name (str): Name of application that should be launched.
**data (dict): Any additional data. Data may be used during
preparation to store objects usable in multiple places.
Raises:
ApplicationNotFound: Application was not found by entered
argument `app_name`.
ApplictionExecutableNotFound: Executables in application definition
were not found on this machine.
ApplicationLaunchFailed: Something important for application launch
failed. Exception should contain explanation message,
traceback should not be needed.
"""
app = self.applications.get(app_name)
if not app:
raise ApplicationNotFound(app_name)
executable = app.find_executable()
if not executable:
raise ApplictionExecutableNotFound(app)
context = ApplicationLaunchContext(
app, executable, **data
)
return context.launch()
class EnvironmentToolGroup:
"""Hold information about environment tool group.
Environment tool group may hold different variants of same tool and set
environments that are same for all of them.
e.g. "mtoa" may have different versions but all environments except one
are same.
Args:
name (str): Name of the tool group.
data (dict): Group's information with it's variants.
manager (ApplicationManager): Manager that creates the group.
"""
def __init__(self, name, label, data, manager):
self.name = name
self.label = label
self._data = data
self.manager = manager
self._environment = data["environment"]
variants = data.get("variants") or {}
label_by_key = variants.pop(M_DYNAMIC_KEY_LABEL, {})
variants_by_name = {}
for variant_name, variant_data in variants.items():
if variant_name in METADATA_KEYS:
continue
variant_label = label_by_key.get(variant_name) or variant_name
tool = EnvironmentTool(
variant_name, variant_label, variant_data, self
)
variants_by_name[variant_name] = tool
self.variants = variants_by_name
def __repr__(self):
return "<{}> - {}".format(self.__class__.__name__, self.name)
def __iter__(self):
for variant in self.variants.values():
yield variant
@property
def environment(self):
return copy.deepcopy(self._environment)
class EnvironmentTool:
"""Hold information about application tool.
Structure of tool information.
Args:
name (str): Name of the tool.
variant_data (dict): Variant data with environments and
host and app variant filters.
group (str): Name of group which wraps tool.
"""
def __init__(self, name, label, variant_data, group):
# Backwards compatibility 3.9.1 - 3.9.2
# - 'variant_data' contained only environments but contain also host
# and application variant filters
host_names = variant_data.get("host_names", [])
app_variants = variant_data.get("app_variants", [])
if "environment" in variant_data:
environment = variant_data["environment"]
else:
environment = variant_data
self.host_names = host_names
self.app_variants = app_variants
self.name = name
self.variant_label = label
self.label = " ".join((group.label, label))
self.group = group
self._environment = environment
self.full_name = "/".join((group.name, name))
def __repr__(self):
return "<{}> - {}".format(self.__class__.__name__, self.full_name)
@property
def environment(self):
return copy.deepcopy(self._environment)
def is_valid_for_app(self, app):
"""Is tool valid for application.
Args:
app (Application): Application for which are prepared environments.
"""
if self.app_variants and app.full_name not in self.app_variants:
return False
if self.host_names and app.host_name not in self.host_names:
return False
return True
class ApplicationExecutable:
"""Representation of executable loaded from settings."""
def __init__(self, executable):
# Try to format executable with environments
try:
executable = executable.format(**os.environ)
except Exception:
pass
# On MacOS check if exists path to executable when ends with `.app`
# - it is common that path will lead to "/Applications/Blender" but
# real path is "/Applications/Blender.app"
if platform.system().lower() == "darwin":
executable = self.macos_executable_prep(executable)
self.executable_path = executable
def __str__(self):
return self.executable_path
def __repr__(self):
return "<{}> {}".format(self.__class__.__name__, self.executable_path)
@staticmethod
def macos_executable_prep(executable):
"""Try to find full path to executable file.
Real executable is stored in '*.app/Contents/MacOS/<executable>'.
Having path to '*.app' gives ability to read it's plist info and
use "CFBundleExecutable" key from plist to know what is "executable."
Plist is stored in '*.app/Contents/Info.plist'.
This is because some '*.app' directories don't have same permissions
as real executable.
"""
# Try to find if there is `.app` file
if not os.path.exists(executable):
_executable = executable + ".app"
if os.path.exists(_executable):
executable = _executable
# Try to find real executable if executable has `Contents` subfolder
contents_dir = os.path.join(executable, "Contents")
if os.path.exists(contents_dir):
executable_filename = None
# Load plist file and check for bundle executable
plist_filepath = os.path.join(contents_dir, "Info.plist")
if os.path.exists(plist_filepath):
import plistlib
parsed_plist = plistlib.readPlist(plist_filepath)
executable_filename = parsed_plist.get("CFBundleExecutable")
if executable_filename:
executable = os.path.join(
contents_dir, "MacOS", executable_filename
)
return executable
def as_args(self):
return [self.executable_path]
def _realpath(self):
"""Check if path is valid executable path."""
# Check for executable in PATH
result = find_executable(self.executable_path)
if result is not None:
return result
# This is not 100% validation but it is better than remove ability to
# launch .bat, .sh or extentionless files
if os.path.exists(self.executable_path):
return self.executable_path
return None
def exists(self):
if not self.executable_path:
return False
return bool(self._realpath())
class UndefinedApplicationExecutable(ApplicationExecutable):
"""Some applications do not require executable path from settings.
In that case this class is used to "fake" existing executable.
"""
def __init__(self):
pass
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
def as_args(self):
return []
def exists(self):
return True
@six.add_metaclass(ABCMeta)
class LaunchHook:
"""Abstract base class of launch hook."""
# Order of prelaunch hook, will be executed as last if set to None.
order = None
# List of host implementations, skipped if empty.
hosts = []
# List of application groups
app_groups = []
# List of specific application names
app_names = []
# List of platform availability, skipped if empty.
platforms = []
def __init__(self, launch_context):
"""Constructor of launch hook.
Always should be called
"""
self.log = PypeLogger().get_logger(self.__class__.__name__)
self.launch_context = launch_context
is_valid = self.class_validation(launch_context)
if is_valid:
is_valid = self.validate()
self.is_valid = is_valid
@classmethod
def class_validation(cls, launch_context):
"""Validation of class attributes by launch context.
Args:
launch_context (ApplicationLaunchContext): Context of launching
application.
Returns:
bool: Is launch hook valid for the context by class attributes.
"""
if cls.platforms:
low_platforms = tuple(
_platform.lower()
for _platform in cls.platforms
)
if platform.system().lower() not in low_platforms:
return False
if cls.hosts:
if launch_context.host_name not in cls.hosts:
return False
if cls.app_groups:
if launch_context.app_group.name not in cls.app_groups:
return False
if cls.app_names:
if launch_context.app_name not in cls.app_names:
return False
return True
@property
def data(self):
return self.launch_context.data
@property
def application(self):
return getattr(self.launch_context, "application", None)
@property
def manager(self):
return getattr(self.application, "manager", None)
@property
def host_name(self):
return getattr(self.application, "host_name", None)
@property
def app_group(self):
return getattr(self.application, "group", None)
@property
def app_name(self):
return getattr(self.application, "full_name", None)
@property
def modules_manager(self):
return getattr(self.launch_context, "modules_manager", None)
def validate(self):
"""Optional validation of launch hook on initialization.
Returns:
bool: Hook is valid (True) or invalid (False).
"""
# QUESTION Not sure if this method has any usable potential.
# - maybe result can be based on settings
return True
@abstractmethod
def execute(self, *args, **kwargs):
"""Abstract execute method where logic of hook is."""
pass
class PreLaunchHook(LaunchHook):
"""Abstract class of prelaunch hook.
This launch hook will be processed before application is launched.
If any exception will happen during processing the application won't be
launched.
"""
class PostLaunchHook(LaunchHook):
"""Abstract class of postlaunch hook.
This launch hook will be processed after application is launched.
Nothing will happen if any exception will happen during processing. And
processing of other postlaunch hooks won't stop either.
"""
class ApplicationLaunchContext:
"""Context of launching application.
Main purpose of context is to prepare launch arguments and keyword arguments
for new process. Most important part of keyword arguments preparations
are environment variables.
During the whole process is possible to use `data` attribute to store
object usable in multiple places.
Launch arguments are strings in list. It is possible to "chain" argument
when order of them matters. That is possible to do with adding list where
order is right and should not change.
NOTE: This is recommendation, not requirement.
e.g.: `["nuke.exe", "--NukeX"]` -> In this case any part of process may
insert argument between `nuke.exe` and `--NukeX`. To keep them together
it is better to wrap them in another list: `[["nuke.exe", "--NukeX"]]`.
Args:
application (Application): Application definition.
executable (ApplicationExecutable): Object with path to executable.
**data (dict): Any additional data. Data may be used during
preparation to store objects usable in multiple places.
"""
def __init__(self, application, executable, env_group=None, **data):
from openpype.modules import ModulesManager
# Application object
self.application = application
self.modules_manager = ModulesManager()
# Logger
logger_name = "{}-{}".format(self.__class__.__name__, self.app_name)
self.log = PypeLogger.get_logger(logger_name)
self.executable = executable
if env_group is None:
env_group = DEFAULT_ENV_SUBGROUP
self.env_group = env_group
self.data = dict(data)
# subprocess.Popen launch arguments (first argument in constructor)
self.launch_args = executable.as_args()
self.launch_args.extend(application.arguments)
if self.data.get("app_args"):
self.launch_args.extend(self.data.pop("app_args"))
# Handle launch environemtns
env = self.data.pop("env", None)
if env is not None and not isinstance(env, dict):
self.log.warning((
"Passed `env` kwarg has invalid type: {}. Expected: `dict`."
" Using `os.environ` instead."
).format(str(type(env))))
env = None
if env is None:
env = os.environ
# subprocess.Popen keyword arguments
self.kwargs = {
"env": {
key: str(value)
for key, value in env.items()
}
}
if platform.system().lower() == "windows":
# Detach new process from currently running process on Windows
flags = (
subprocess.CREATE_NEW_PROCESS_GROUP
| subprocess.DETACHED_PROCESS
)
self.kwargs["creationflags"] = flags
if not sys.stdout:
self.kwargs["stdout"] = subprocess.DEVNULL
self.kwargs["stderr"] = subprocess.DEVNULL
self.prelaunch_hooks = None
self.postlaunch_hooks = None
self.process = None
@property
def env(self):
if (
"env" not in self.kwargs
or self.kwargs["env"] is None
):
self.kwargs["env"] = {}
return self.kwargs["env"]
@env.setter
def env(self, value):
if not isinstance(value, dict):
raise ValueError(
"'env' attribute expect 'dict' object. Got: {}".format(
str(type(value))
)
)
self.kwargs["env"] = value
def paths_to_launch_hooks(self):
"""Directory paths where to look for launch hooks."""
# This method has potential to be part of application manager (maybe).
paths = []
# TODO load additional studio paths from settings
import openpype
pype_dir = os.path.dirname(os.path.abspath(openpype.__file__))
# --- START: Backwards compatibility ---
hooks_dir = os.path.join(pype_dir, "hooks")
subfolder_names = ["global"]
if self.host_name:
subfolder_names.append(self.host_name)
for subfolder_name in subfolder_names:
path = os.path.join(hooks_dir, subfolder_name)
if (
os.path.exists(path)
and os.path.isdir(path)
and path not in paths
):
paths.append(path)
# --- END: Backwards compatibility ---
subfolders_list = [
["hooks"]
]
if self.host_name:
subfolders_list.append(["hosts", self.host_name, "hooks"])
for subfolders in subfolders_list:
path = os.path.join(pype_dir, *subfolders)
if (
os.path.exists(path)
and os.path.isdir(path)
and path not in paths
):
paths.append(path)
# Load modules paths
paths.extend(self.modules_manager.collect_launch_hook_paths())
return paths
def discover_launch_hooks(self, force=False):
"""Load and prepare launch hooks."""
if (
self.prelaunch_hooks is not None
or self.postlaunch_hooks is not None
):
if not force:
self.log.info("Launch hooks were already discovered.")
return
self.prelaunch_hooks.clear()
self.postlaunch_hooks.clear()
self.log.debug("Discovery of launch hooks started.")
paths = self.paths_to_launch_hooks()
self.log.debug("Paths searched for launch hooks:\n{}".format(
"\n".join("- {}".format(path) for path in paths)
))
all_classes = {
"pre": [],
"post": []
}
for path in paths:
if not os.path.exists(path):
self.log.info(
"Path to launch hooks does not exist: \"{}\"".format(path)
)
continue
modules, _crashed = modules_from_path(path)
for _filepath, module in modules:
all_classes["pre"].extend(
classes_from_module(PreLaunchHook, module)
)
all_classes["post"].extend(
classes_from_module(PostLaunchHook, module)
)
for launch_type, classes in all_classes.items():
hooks_with_order = []
hooks_without_order = []
for klass in classes:
try:
hook = klass(self)
if not hook.is_valid:
self.log.debug(
"Skipped hook invalid for current launch context: "
"{}".format(klass.__name__)
)
continue
if inspect.isabstract(hook):
self.log.debug("Skipped abstract hook: {}".format(
klass.__name__
))
continue
# Separate hooks by pre/post class
if hook.order is None:
hooks_without_order.append(hook)
else:
hooks_with_order.append(hook)
except Exception:
self.log.warning(
"Initialization of hook failed: "
"{}".format(klass.__name__),
exc_info=True
)
# Sort hooks with order by order
ordered_hooks = list(sorted(
hooks_with_order, key=lambda obj: obj.order
))
# Extend ordered hooks with hooks without defined order
ordered_hooks.extend(hooks_without_order)
if launch_type == "pre":
self.prelaunch_hooks = ordered_hooks
else:
self.postlaunch_hooks = ordered_hooks
self.log.debug("Found {} prelaunch and {} postlaunch hooks.".format(
len(self.prelaunch_hooks), len(self.postlaunch_hooks)
))
@property
def app_name(self):
return self.application.name
@property
def host_name(self):
return self.application.host_name
@property
def app_group(self):
return self.application.group
@property
def manager(self):
return self.application.manager
def _run_process(self):
# Windows and MacOS have easier process start
low_platform = platform.system().lower()
if low_platform in ("windows", "darwin"):
return subprocess.Popen(self.launch_args, **self.kwargs)
# Linux uses mid process
# - it is possible that the mid process executable is not
# available for this version of OpenPype in that case use standard
# launch
launch_args = get_linux_launcher_args()
if launch_args is None:
return subprocess.Popen(self.launch_args, **self.kwargs)
# Prepare data that will be passed to midprocess
# - store arguments to a json and pass path to json as last argument
# - pass environments to set
app_env = self.kwargs.pop("env", {})
json_data = {
"args": self.launch_args,
"env": app_env
}
if app_env:
# Filter environments of subprocess
self.kwargs["env"] = {
key: value
for key, value in os.environ.items()
if key in app_env
}
# Create temp file
json_temp = tempfile.NamedTemporaryFile(
mode="w", prefix="op_app_args", suffix=".json", delete=False
)
json_temp.close()
json_temp_filpath = json_temp.name
with open(json_temp_filpath, "w") as stream:
json.dump(json_data, stream)
launch_args.append(json_temp_filpath)
# Create mid-process which will launch application
process = subprocess.Popen(launch_args, **self.kwargs)
# Wait until the process finishes
# - This is important! The process would stay in "open" state.
process.wait()
# Remove the temp file
os.remove(json_temp_filpath)
# Return process which is already terminated
return process
def launch(self):
"""Collect data for new process and then create it.
This method must not be executed more than once.
Returns:
subprocess.Popen: Created process as Popen object.
"""
if self.process is not None:
self.log.warning("Application was already launched.")
return
# Discover launch hooks
self.discover_launch_hooks()
# Execute prelaunch hooks
for prelaunch_hook in self.prelaunch_hooks:
self.log.debug("Executing prelaunch hook: {}".format(
str(prelaunch_hook.__class__.__name__)
))
prelaunch_hook.execute()
self.log.debug("All prelaunch hook executed. Starting new process.")
# Prepare subprocess args
args_len_str = ""
if isinstance(self.launch_args, str):
args = self.launch_args
else:
args = self.clear_launch_args(self.launch_args)
args_len_str = " ({})".format(len(args))
self.log.info(
"Launching \"{}\" with args{}: {}".format(
self.app_name, args_len_str, args
)
)
self.launch_args = args
# Run process
self.process = self._run_process()
# Process post launch hooks
for postlaunch_hook in self.postlaunch_hooks:
self.log.debug("Executing postlaunch hook: {}".format(
str(postlaunch_hook.__class__.__name__)
))
# TODO how to handle errors?
# - store to variable to let them accessible?
try:
postlaunch_hook.execute()
except Exception:
self.log.warning(
"After launch procedures were not successful.",
exc_info=True
)
self.log.debug("Launch of {} finished.".format(self.app_name))
return self.process
@staticmethod
def clear_launch_args(args):
"""Collect launch arguments to final order.
Launch argument should be list that may contain another lists this
function will upack inner lists and keep ordering.
```
# source
[ [ arg1, [ arg2, arg3 ] ], arg4, [arg5, arg6]]
# result
[ arg1, arg2, arg3, arg4, arg5, arg6]
Args:
args (list): Source arguments in list may contain inner lists.
Return:
list: Unpacked arguments.
"""
if isinstance(args, str):
return args
all_cleared = False
while not all_cleared:
all_cleared = True
new_args = []
for arg in args:
if isinstance(arg, (list, tuple, set)):
all_cleared = False
for _arg in arg:
new_args.append(_arg)
else:
new_args.append(arg)
args = new_args
return args
class MissingRequiredKey(KeyError):
pass
class EnvironmentPrepData(dict):
"""Helper dictionary for storin temp data during environment prep.
Args:
data (dict): Data must contain required keys.
"""
required_keys = (
"project_doc", "asset_doc", "task_name", "app", "anatomy"
)
def __init__(self, data):
for key in self.required_keys:
if key not in data:
raise MissingRequiredKey(key)
if not data.get("log"):
data["log"] = get_logger()
if data.get("env") is None:
data["env"] = os.environ.copy()
if "system_settings" not in data:
data["system_settings"] = get_system_settings()
super(EnvironmentPrepData, self).__init__(data)
def get_app_environments_for_context(
project_name, asset_name, task_name, app_name, env_group=None, env=None
):
"""Prepare environment variables by context.
Args:
project_name (str): Name of project.
asset_name (str): Name of asset.
task_name (str): Name of task.
app_name (str): Name of application that is launched and can be found
by ApplicationManager.
env (dict): Initial environment variables. `os.environ` is used when
not passed.
Returns:
dict: Environments for passed context and application.
"""
from openpype.pipeline import AvalonMongoDB
# Avalon database connection
dbcon = AvalonMongoDB()
dbcon.Session["AVALON_PROJECT"] = project_name
dbcon.install()
# Project document
project_doc = dbcon.find_one({"type": "project"})
asset_doc = dbcon.find_one({
"type": "asset",
"name": asset_name
})
# Prepare app object which can be obtained only from ApplciationManager
app_manager = ApplicationManager()
app = app_manager.applications[app_name]
# Project's anatomy
anatomy = Anatomy(project_name)
data = EnvironmentPrepData({
"project_name": project_name,
"asset_name": asset_name,
"task_name": task_name,
"app": app,
"dbcon": dbcon,
"project_doc": project_doc,
"asset_doc": asset_doc,
"anatomy": anatomy,
"env": env
})
prepare_app_environments(data, env_group)
prepare_context_environments(data, env_group)
# Discard avalon connection
dbcon.uninstall()
return data["env"]
def _merge_env(env, current_env):
"""Modified function(merge) from acre module."""
import acre
result = current_env.copy()
for key, value in env.items():
# Keep missing keys by not filling `missing` kwarg
value = acre.lib.partial_format(value, data=current_env)
result[key] = value
return result
def _add_python_version_paths(app, env, logger):
"""Add vendor packages specific for a Python version."""
# Skip adding if host name is not set
if not app.host_name:
return
# Add Python 2/3 modules
openpype_root = os.getenv("OPENPYPE_REPOS_ROOT")
python_vendor_dir = os.path.join(
openpype_root,
"openpype",
"vendor",
"python"
)
if app.use_python_2:
pythonpath = os.path.join(python_vendor_dir, "python_2")
else:
pythonpath = os.path.join(python_vendor_dir, "python_3")
if not os.path.exists(pythonpath):
return
logger.debug("Adding Python version specific paths to PYTHONPATH")
python_paths = [pythonpath]
# Load PYTHONPATH from current launch context
python_path = env.get("PYTHONPATH")
if python_path:
python_paths.append(python_path)
# Set new PYTHONPATH to launch context environments
env["PYTHONPATH"] = os.pathsep.join(python_paths)
def prepare_app_environments(data, env_group=None, implementation_envs=True):
"""Modify launch environments based on launched app and context.
Args:
data (EnvironmentPrepData): Dictionary where result and intermediate
result will be stored.
"""
import acre
app = data["app"]
log = data["log"]
source_env = data["env"].copy()
_add_python_version_paths(app, source_env, log)
# Use environments from local settings
filtered_local_envs = {}
system_settings = data["system_settings"]
whitelist_envs = system_settings["general"].get("local_env_white_list")
if whitelist_envs:
local_settings = get_local_settings()
local_envs = local_settings.get("environments") or {}
filtered_local_envs = {
key: value
for key, value in local_envs.items()
if key in whitelist_envs
}
# Apply local environment variables for already existing values
for key, value in filtered_local_envs.items():
if key in source_env:
source_env[key] = value
# `added_env_keys` has debug purpose
added_env_keys = {app.group.name, app.name}
# Environments for application
environments = [
app.group.environment,
app.environment
]
asset_doc = data.get("asset_doc")
# Add tools environments
groups_by_name = {}
tool_by_group_name = collections.defaultdict(dict)
if asset_doc:
# Make sure each tool group can be added only once
for key in asset_doc["data"].get("tools_env") or []:
tool = app.manager.tools.get(key)
if not tool or not tool.is_valid_for_app(app):
continue
groups_by_name[tool.group.name] = tool.group
tool_by_group_name[tool.group.name][tool.name] = tool
for group_name in sorted(groups_by_name.keys()):
group = groups_by_name[group_name]
environments.append(group.environment)
added_env_keys.add(group_name)
for tool_name in sorted(tool_by_group_name[group_name].keys()):
tool = tool_by_group_name[group_name][tool_name]
environments.append(tool.environment)
added_env_keys.add(tool.name)
log.debug(
"Will add environments for apps and tools: {}".format(
", ".join(added_env_keys)
)
)
env_values = {}
for _env_values in environments:
if not _env_values:
continue
# Choose right platform
tool_env = parse_environments(_env_values, env_group)
# Apply local environment variables
# - must happen between all values because they may be used during
# merge
for key, value in filtered_local_envs.items():
if key in tool_env:
tool_env[key] = value
# Merge dictionaries
env_values = _merge_env(tool_env, env_values)
merged_env = _merge_env(env_values, source_env)
loaded_env = acre.compute(merged_env, cleanup=False)
final_env = None
# Add host specific environments
if app.host_name and implementation_envs:
module = __import__("openpype.hosts", fromlist=[app.host_name])
host_module = getattr(module, app.host_name, None)
add_implementation_envs = None
if host_module:
add_implementation_envs = getattr(
host_module, "add_implementation_envs", None
)
if add_implementation_envs:
# Function may only modify passed dict without returning value
final_env = add_implementation_envs(loaded_env, app)
if final_env is None:
final_env = loaded_env
keys_to_remove = set(source_env.keys()) - set(final_env.keys())
# Update env
data["env"].update(final_env)
for key in keys_to_remove:
data["env"].pop(key, None)
def apply_project_environments_value(
project_name, env, project_settings=None, env_group=None
):
"""Apply project specific environments on passed environments.
The environments are applied on passed `env` argument value so it is not
required to apply changes back.
Args:
project_name (str): Name of project for which environments should be
received.
env (dict): Environment values on which project specific environments
will be applied.
project_settings (dict): Project settings for passed project name.
Optional if project settings are already prepared.
Returns:
dict: Passed env values with applied project environments.
Raises:
KeyError: If project settings do not contain keys for project specific
environments.
"""
import acre
if project_settings is None:
project_settings = get_project_settings(project_name)
env_value = project_settings["global"]["project_environments"]
if env_value:
parsed_value = parse_environments(env_value, env_group)
env.update(acre.compute(
_merge_env(parsed_value, env),
cleanup=False
))
return env
def prepare_context_environments(data, env_group=None):
"""Modify launch environments with context data for launched host.
Args:
data (EnvironmentPrepData): Dictionary where result and intermediate
result will be stored.
"""
# Context environments
log = data["log"]
project_doc = data["project_doc"]
asset_doc = data["asset_doc"]
task_name = data["task_name"]
if (
not project_doc
or not asset_doc
or not task_name
):
log.info(
"Skipping context environments preparation."
" Launch context does not contain required data."
)
return
# Load project specific environments
project_name = project_doc["name"]
project_settings = get_project_settings(project_name)
data["project_settings"] = project_settings
# Apply project specific environments on current env value
apply_project_environments_value(
project_name, data["env"], project_settings, env_group
)
app = data["app"]
context_env = {
"AVALON_PROJECT": project_doc["name"],
"AVALON_ASSET": asset_doc["name"],
"AVALON_TASK": task_name,
"AVALON_APP_NAME": app.full_name
}
log.debug(
"Context environments set:\n{}".format(
json.dumps(context_env, indent=4)
)
)
data["env"].update(context_env)
if not app.is_host:
return
workdir_data = get_workdir_data(
project_doc, asset_doc, task_name, app.host_name
)
data["workdir_data"] = workdir_data
anatomy = data["anatomy"]
task_type = workdir_data["task"]["type"]
# Temp solution how to pass task type to `_prepare_last_workfile`
data["task_type"] = task_type
try:
workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
except Exception as exc:
raise ApplicationLaunchFailed(
"Error in anatomy.format: {}".format(str(exc))
)
if not os.path.exists(workdir):
log.debug(
"Creating workdir folder: \"{}\"".format(workdir)
)
try:
os.makedirs(workdir)
except Exception as exc:
raise ApplicationLaunchFailed(
"Couldn't create workdir because: {}".format(str(exc))
)
data["env"]["AVALON_APP"] = app.host_name
data["env"]["AVALON_WORKDIR"] = workdir
_prepare_last_workfile(data, workdir)
def _prepare_last_workfile(data, workdir):
"""last workfile workflow preparation.
Function check if should care about last workfile workflow and tries
to find the last workfile. Both information are stored to `data` and
environments.
Last workfile is filled always (with version 1) even if any workfile
exists yet.
Args:
data (EnvironmentPrepData): Dictionary where result and intermediate
result will be stored.
workdir (str): Path to folder where workfiles should be stored.
"""
from openpype.pipeline import HOST_WORKFILE_EXTENSIONS
log = data["log"]
_workdir_data = data.get("workdir_data")
if not _workdir_data:
log.info(
"Skipping last workfile preparation."
" Key `workdir_data` not filled."
)
return
app = data["app"]
workdir_data = copy.deepcopy(_workdir_data)
project_name = data["project_name"]
task_name = data["task_name"]
task_type = data["task_type"]
start_last_workfile = data.get("start_last_workfile")
if start_last_workfile is None:
start_last_workfile = should_start_last_workfile(
project_name, app.host_name, task_name, task_type
)
else:
log.info("Opening of last workfile was disabled by user")
data["start_last_workfile"] = start_last_workfile
workfile_startup = should_workfile_tool_start(
project_name, app.host_name, task_name, task_type
)
data["workfile_startup"] = workfile_startup
# Store boolean as "0"(False) or "1"(True)
data["env"]["AVALON_OPEN_LAST_WORKFILE"] = (
str(int(bool(start_last_workfile)))
)
data["env"]["OPENPYPE_WORKFILE_TOOL_ON_START"] = (
str(int(bool(workfile_startup)))
)
_sub_msg = "" if start_last_workfile else " not"
log.debug(
"Last workfile should{} be opened on start.".format(_sub_msg)
)
# Last workfile path
last_workfile_path = data.get("last_workfile_path") or ""
if not last_workfile_path:
extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name)
if extensions:
anatomy = data["anatomy"]
project_settings = data["project_settings"]
task_type = workdir_data["task"]["type"]
template_key = get_workfile_template_key(
task_type, app.host_name, project_settings=project_settings
)
# Find last workfile
file_template = str(anatomy.templates[template_key]["file"])
workdir_data.update({
"version": 1,
"user": get_openpype_username(),
"ext": extensions[0]
})
last_workfile_path = get_last_workfile(
workdir, file_template, workdir_data, extensions, True
)
if os.path.exists(last_workfile_path):
log.debug((
"Workfiles for launch context does not exists"
" yet but path will be set."
))
log.debug(
"Setting last workfile path: {}".format(last_workfile_path)
)
data["env"]["AVALON_LAST_WORKFILE"] = last_workfile_path
data["last_workfile_path"] = last_workfile_path
def should_start_last_workfile(
project_name, host_name, task_name, task_type, default_output=False
):
"""Define if host should start last version workfile if possible.
Default output is `False`. Can be overridden with environment variable
`AVALON_OPEN_LAST_WORKFILE`, valid values without case sensitivity are
`"0", "1", "true", "false", "yes", "no"`.
Args:
project_name (str): Name of project.
host_name (str): Name of host which is launched. In avalon's
application context it's value stored in app definition under
key `"application_dir"`. Is not case sensitive.
task_name (str): Name of task which is used for launching the host.
Task name is not case sensitive.
Returns:
bool: True if host should start workfile.
"""
project_settings = get_project_settings(project_name)
profiles = (
project_settings
["global"]
["tools"]
["Workfiles"]
["last_workfile_on_startup"]
)
if not profiles:
return default_output
filter_data = {
"tasks": task_name,
"task_types": task_type,
"hosts": host_name
}
matching_item = filter_profiles(profiles, filter_data)
output = None
if matching_item:
output = matching_item.get("enabled")
if output is None:
return default_output
return output
def should_workfile_tool_start(
project_name, host_name, task_name, task_type, default_output=False
):
"""Define if host should start workfile tool at host launch.
Default output is `False`. Can be overridden with environment variable
`OPENPYPE_WORKFILE_TOOL_ON_START`, valid values without case sensitivity are
`"0", "1", "true", "false", "yes", "no"`.
Args:
project_name (str): Name of project.
host_name (str): Name of host which is launched. In avalon's
application context it's value stored in app definition under
key `"application_dir"`. Is not case sensitive.
task_name (str): Name of task which is used for launching the host.
Task name is not case sensitive.
Returns:
bool: True if host should start workfile.
"""
project_settings = get_project_settings(project_name)
profiles = (
project_settings
["global"]
["tools"]
["Workfiles"]
["open_workfile_tool_on_startup"]
)
if not profiles:
return default_output
filter_data = {
"tasks": task_name,
"task_types": task_type,
"hosts": host_name
}
matching_item = filter_profiles(profiles, filter_data)
output = None
if matching_item:
output = matching_item.get("enabled")
if output is None:
return default_output
return output
def get_non_python_host_kwargs(kwargs, allow_console=True):
"""Explicit setting of kwargs for Popen for AE/PS/Harmony.
Expected behavior
- openpype_console opens window with logs
- openpype_gui has stdout/stderr available for capturing
Args:
kwargs (dict) or None
allow_console (bool): use False for inner Popen opening app itself or
it will open additional console (at least for Harmony)
"""
if kwargs is None:
kwargs = {}
if platform.system().lower() != "windows":
return kwargs
executable_path = os.environ.get("OPENPYPE_EXECUTABLE")
executable_filename = ""
if executable_path:
executable_filename = os.path.basename(executable_path)
if "openpype_gui" in executable_filename:
kwargs.update({
"creationflags": subprocess.CREATE_NO_WINDOW,
"stdout": subprocess.DEVNULL,
"stderr": subprocess.DEVNULL
})
elif allow_console:
kwargs.update({
"creationflags": subprocess.CREATE_NEW_CONSOLE
})
return kwargs
```
#### File: tools/workfiles/files_widget.py
```python
import os
import logging
import shutil
import Qt
from Qt import QtWidgets, QtCore
from openpype.tools.utils import PlaceholderLineEdit
from openpype.tools.utils.delegates import PrettyTimeDelegate
from openpype.lib import (
emit_event,
Anatomy,
get_workfile_template_key,
create_workdir_extra_folders,
)
from openpype.lib.avalon_context import (
update_current_task,
compute_session_changes
)
from openpype.pipeline import (
registered_host,
legacy_io,
)
from .model import (
WorkAreaFilesModel,
PublishFilesModel,
FILEPATH_ROLE,
DATE_MODIFIED_ROLE,
)
from .save_as_dialog import SaveAsDialog
log = logging.getLogger(__name__)
class FilesView(QtWidgets.QTreeView):
doubleClickedLeft = QtCore.Signal()
doubleClickedRight = QtCore.Signal()
def mouseDoubleClickEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.doubleClickedLeft.emit()
elif event.button() == QtCore.Qt.RightButton:
self.doubleClickedRight.emit()
return super(FilesView, self).mouseDoubleClickEvent(event)
class SelectContextOverlay(QtWidgets.QFrame):
def __init__(self, parent):
super(SelectContextOverlay, self).__init__(parent)
self.setObjectName("WorkfilesPublishedContextSelect")
label_widget = QtWidgets.QLabel(
"Please choose context on the left<br/><",
self
)
label_widget.setAlignment(QtCore.Qt.AlignCenter)
layout = QtWidgets.QHBoxLayout(self)
layout.addWidget(label_widget, 1, QtCore.Qt.AlignCenter)
label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
parent.installEventFilter(self)
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.Resize:
self.resize(obj.size())
return super(SelectContextOverlay, self).eventFilter(obj, event)
class FilesWidget(QtWidgets.QWidget):
"""A widget displaying files that allows to save and open files."""
file_selected = QtCore.Signal(str)
file_opened = QtCore.Signal()
workfile_created = QtCore.Signal(str)
published_visible_changed = QtCore.Signal(bool)
def __init__(self, parent):
super(FilesWidget, self).__init__(parent)
# Setup
self._asset_id = None
self._asset_doc = None
self._task_name = None
self._task_type = None
# Pype's anatomy object for current project
self.anatomy = Anatomy(legacy_io.Session["AVALON_PROJECT"])
# Template key used to get work template from anatomy templates
self.template_key = "work"
# This is not root but workfile directory
self._workfiles_root = None
self._workdir_path = None
self.host = registered_host()
# Whether to automatically select the latest modified
# file on a refresh of the files model.
self.auto_select_latest_modified = True
# Avoid crash in Blender and store the message box
# (setting parent doesn't work as it hides the message box)
self._messagebox = None
# Filtering input
filter_widget = QtWidgets.QWidget(self)
published_checkbox = QtWidgets.QCheckBox("Published", filter_widget)
filter_input = PlaceholderLineEdit(filter_widget)
filter_input.setPlaceholderText("Filter files..")
filter_layout = QtWidgets.QHBoxLayout(filter_widget)
filter_layout.setContentsMargins(0, 0, 0, 0)
filter_layout.addWidget(filter_input, 1)
filter_layout.addWidget(published_checkbox, 0)
# Create the Files models
extensions = set(self.host.file_extensions())
views_widget = QtWidgets.QWidget(self)
# --- Workarea view ---
workarea_files_model = WorkAreaFilesModel(extensions)
# Create proxy model for files to be able sort and filter
workarea_proxy_model = QtCore.QSortFilterProxyModel()
workarea_proxy_model.setSourceModel(workarea_files_model)
workarea_proxy_model.setDynamicSortFilter(True)
workarea_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
# Set up the file list tree view
workarea_files_view = FilesView(views_widget)
workarea_files_view.setModel(workarea_proxy_model)
workarea_files_view.setSortingEnabled(True)
workarea_files_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Date modified delegate
workarea_time_delegate = PrettyTimeDelegate()
workarea_files_view.setItemDelegateForColumn(1, workarea_time_delegate)
# smaller indentation
workarea_files_view.setIndentation(3)
# Default to a wider first filename column it is what we mostly care
# about and the date modified is relatively small anyway.
workarea_files_view.setColumnWidth(0, 330)
# --- Publish files view ---
publish_files_model = PublishFilesModel(
extensions, legacy_io, self.anatomy
)
publish_proxy_model = QtCore.QSortFilterProxyModel()
publish_proxy_model.setSourceModel(publish_files_model)
publish_proxy_model.setDynamicSortFilter(True)
publish_proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
publish_files_view = FilesView(views_widget)
publish_files_view.setModel(publish_proxy_model)
publish_files_view.setSortingEnabled(True)
publish_files_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Date modified delegate
publish_time_delegate = PrettyTimeDelegate()
publish_files_view.setItemDelegateForColumn(1, publish_time_delegate)
# smaller indentation
publish_files_view.setIndentation(3)
# Default to a wider first filename column it is what we mostly care
# about and the date modified is relatively small anyway.
publish_files_view.setColumnWidth(0, 330)
publish_context_overlay = SelectContextOverlay(views_widget)
publish_context_overlay.setVisible(False)
views_layout = QtWidgets.QHBoxLayout(views_widget)
views_layout.setContentsMargins(0, 0, 0, 0)
views_layout.addWidget(workarea_files_view, 1)
views_layout.addWidget(publish_files_view, 1)
# Home Page
# Build buttons widget for files widget
btns_widget = QtWidgets.QWidget(self)
workarea_btns_widget = QtWidgets.QWidget(btns_widget)
btn_save = QtWidgets.QPushButton("Save As", workarea_btns_widget)
btn_browse = QtWidgets.QPushButton("Browse", workarea_btns_widget)
btn_open = QtWidgets.QPushButton("Open", workarea_btns_widget)
workarea_btns_layout = QtWidgets.QHBoxLayout(workarea_btns_widget)
workarea_btns_layout.setContentsMargins(0, 0, 0, 0)
workarea_btns_layout.addWidget(btn_open, 1)
workarea_btns_layout.addWidget(btn_browse, 1)
workarea_btns_layout.addWidget(btn_save, 1)
publish_btns_widget = QtWidgets.QWidget(btns_widget)
btn_save_as_published = QtWidgets.QPushButton(
"Copy && Open", publish_btns_widget
)
btn_change_context = QtWidgets.QPushButton(
"Choose different context", publish_btns_widget
)
btn_select_context_published = QtWidgets.QPushButton(
"Copy && Open", publish_btns_widget
)
btn_cancel_published = QtWidgets.QPushButton(
"Cancel", publish_btns_widget
)
publish_btns_layout = QtWidgets.QHBoxLayout(publish_btns_widget)
publish_btns_layout.setContentsMargins(0, 0, 0, 0)
publish_btns_layout.addWidget(btn_save_as_published, 1)
publish_btns_layout.addWidget(btn_change_context, 1)
publish_btns_layout.addWidget(btn_select_context_published, 1)
publish_btns_layout.addWidget(btn_cancel_published, 1)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.addWidget(workarea_btns_widget, 1)
btns_layout.addWidget(publish_btns_widget, 1)
# Build files widgets for home page
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(filter_widget, 0)
main_layout.addWidget(views_widget, 1)
main_layout.addWidget(btns_widget, 0)
# Register signal callbacks
published_checkbox.stateChanged.connect(self._on_published_change)
filter_input.textChanged.connect(self._on_filter_text_change)
workarea_files_view.doubleClickedLeft.connect(
self._on_workarea_open_pressed
)
workarea_files_view.customContextMenuRequested.connect(
self._on_workarea_context_menu
)
workarea_files_view.selectionModel().selectionChanged.connect(
self.on_file_select
)
btn_open.pressed.connect(self._on_workarea_open_pressed)
btn_browse.pressed.connect(self.on_browse_pressed)
btn_save.pressed.connect(self._on_save_as_pressed)
btn_save_as_published.pressed.connect(
self._on_published_save_as_pressed
)
btn_change_context.pressed.connect(
self._on_publish_change_context_pressed
)
btn_select_context_published.pressed.connect(
self._on_publish_select_context_pressed
)
btn_cancel_published.pressed.connect(
self._on_publish_cancel_pressed
)
# Store attributes
self._published_checkbox = published_checkbox
self._filter_input = filter_input
self._workarea_time_delegate = workarea_time_delegate
self._workarea_files_view = workarea_files_view
self._workarea_files_model = workarea_files_model
self._workarea_proxy_model = workarea_proxy_model
self._publish_time_delegate = publish_time_delegate
self._publish_files_view = publish_files_view
self._publish_files_model = publish_files_model
self._publish_proxy_model = publish_proxy_model
self._publish_context_overlay = publish_context_overlay
self._workarea_btns_widget = workarea_btns_widget
self._publish_btns_widget = publish_btns_widget
self._btn_open = btn_open
self._btn_browse = btn_browse
self._btn_save = btn_save
self._btn_save_as_published = btn_save_as_published
self._btn_change_context = btn_change_context
self._btn_select_context_published = btn_select_context_published
self._btn_cancel_published = btn_cancel_published
# Create a proxy widget for files widget
self.setFocusProxy(btn_open)
# Hide publish files widgets
publish_files_view.setVisible(False)
publish_btns_widget.setVisible(False)
btn_select_context_published.setVisible(False)
btn_cancel_published.setVisible(False)
self._publish_context_select_mode = False
@property
def published_enabled(self):
return self._published_checkbox.isChecked()
def _on_published_change(self):
published_enabled = self.published_enabled
self._workarea_files_view.setVisible(not published_enabled)
self._workarea_btns_widget.setVisible(not published_enabled)
self._publish_files_view.setVisible(published_enabled)
self._publish_btns_widget.setVisible(published_enabled)
self._update_filtering()
self._update_asset_task()
self.published_visible_changed.emit(published_enabled)
self._select_last_modified_file()
def _on_filter_text_change(self):
self._update_filtering()
def _update_filtering(self):
text = self._filter_input.text()
if self.published_enabled:
self._publish_proxy_model.setFilterFixedString(text)
else:
self._workarea_proxy_model.setFilterFixedString(text)
def set_save_enabled(self, enabled):
self._btn_save.setEnabled(enabled)
if not enabled and self._published_checkbox.isChecked():
self._published_checkbox.setChecked(False)
self._published_checkbox.setVisible(enabled)
def set_asset_task(self, asset_id, task_name, task_type):
if asset_id != self._asset_id:
self._asset_doc = None
self._asset_id = asset_id
self._task_name = task_name
self._task_type = task_type
self._update_asset_task()
def _update_asset_task(self):
if self.published_enabled and not self._publish_context_select_mode:
self._publish_files_model.set_context(
self._asset_id, self._task_name
)
has_valid_items = self._publish_files_model.has_valid_items()
self._btn_save_as_published.setEnabled(has_valid_items)
self._btn_change_context.setEnabled(has_valid_items)
else:
# Define a custom session so we can query the work root
# for a "Work area" that is not our current Session.
# This way we can browse it even before we enter it.
if self._asset_id and self._task_name and self._task_type:
session = self._get_session()
self._workdir_path = session["AVALON_WORKDIR"]
self._workfiles_root = self.host.work_root(session)
self._workarea_files_model.set_root(self._workfiles_root)
else:
self._workarea_files_model.set_root(None)
# Disable/Enable buttons based on available files in model
has_valid_items = self._workarea_files_model.has_valid_items()
self._btn_browse.setEnabled(has_valid_items)
self._btn_open.setEnabled(has_valid_items)
if self._publish_context_select_mode:
self._btn_select_context_published.setEnabled(
bool(self._asset_id) and bool(self._task_name)
)
return
# Manually trigger file selection
if not has_valid_items:
self.on_file_select()
def _get_asset_doc(self):
if self._asset_id is None:
return None
if self._asset_doc is None:
self._asset_doc = legacy_io.find_one({"_id": self._asset_id})
return self._asset_doc
def _get_session(self):
"""Return a modified session for the current asset and task"""
session = legacy_io.Session.copy()
self.template_key = get_workfile_template_key(
self._task_type,
session["AVALON_APP"],
project_name=session["AVALON_PROJECT"]
)
changes = compute_session_changes(
session,
asset=self._get_asset_doc(),
task=self._task_name,
template_key=self.template_key
)
session.update(changes)
return session
def _enter_session(self):
"""Enter the asset and task session currently selected"""
session = legacy_io.Session.copy()
changes = compute_session_changes(
session,
asset=self._get_asset_doc(),
task=self._task_name,
template_key=self.template_key
)
if not changes:
# Return early if we're already in the right Session context
# to avoid any unwanted Task Changed callbacks to be triggered.
return
update_current_task(
asset=self._get_asset_doc(),
task=self._task_name,
template_key=self.template_key
)
def open_file(self, filepath):
host = self.host
if host.has_unsaved_changes():
result = self.save_changes_prompt()
if result is None:
# Cancel operation
return False
# Save first if has changes
if result:
current_file = host.current_file()
if not current_file:
# If the user requested to save the current scene
# we can't actually automatically do so if the current
# file has not been saved with a name yet. So we'll have
# to opt out.
log.error("Can't save scene with no filename. Please "
"first save your work file using 'Save As'.")
return
# Save current scene, continue to open file
host.save_file(current_file)
self._enter_session()
host.open_file(filepath)
self.file_opened.emit()
def save_changes_prompt(self):
self._messagebox = messagebox = QtWidgets.QMessageBox(parent=self)
messagebox.setWindowFlags(messagebox.windowFlags() |
QtCore.Qt.FramelessWindowHint)
messagebox.setIcon(messagebox.Warning)
messagebox.setWindowTitle("Unsaved Changes!")
messagebox.setText(
"There are unsaved changes to the current file."
"\nDo you want to save the changes?"
)
messagebox.setStandardButtons(
messagebox.Yes | messagebox.No | messagebox.Cancel
)
result = messagebox.exec_()
if result == messagebox.Yes:
return True
if result == messagebox.No:
return False
return None
def get_filename(self):
"""Show save dialog to define filename for save or duplicate
Returns:
str: The filename to create.
"""
session = self._get_session()
if self.published_enabled:
filepath = self._get_selected_filepath()
extensions = [os.path.splitext(filepath)[1]]
else:
extensions = self.host.file_extensions()
window = SaveAsDialog(
parent=self,
root=self._workfiles_root,
anatomy=self.anatomy,
template_key=self.template_key,
extensions=extensions,
session=session
)
window.exec_()
return window.get_result()
def on_duplicate_pressed(self):
work_file = self.get_filename()
if not work_file:
return
src = self._get_selected_filepath()
dst = os.path.join(self._workfiles_root, work_file)
shutil.copy(src, dst)
self.workfile_created.emit(dst)
self.refresh()
def _get_selected_filepath(self):
"""Return current filepath selected in view"""
if self.published_enabled:
source_view = self._publish_files_view
else:
source_view = self._workarea_files_view
selection = source_view.selectionModel()
index = selection.currentIndex()
if not index.isValid():
return
return index.data(FILEPATH_ROLE)
def _on_workarea_open_pressed(self):
path = self._get_selected_filepath()
if not path:
print("No file selected to open..")
return
self.open_file(path)
def on_browse_pressed(self):
ext_filter = "Work File (*{0})".format(
" *".join(self.host.file_extensions())
)
kwargs = {
"caption": "Work Files",
"filter": ext_filter
}
if Qt.__binding__ in ("PySide", "PySide2"):
kwargs["dir"] = self._workfiles_root
else:
kwargs["directory"] = self._workfiles_root
work_file = QtWidgets.QFileDialog.getOpenFileName(**kwargs)[0]
if work_file:
self.open_file(work_file)
def _on_save_as_pressed(self):
self._save_as_with_dialog()
def _save_as_with_dialog(self):
work_filename = self.get_filename()
if not work_filename:
return None
src_path = self._get_selected_filepath()
# Trigger before save event
emit_event(
"workfile.save.before",
{"filename": work_filename, "workdir_path": self._workdir_path},
source="workfiles.tool"
)
# Make sure workfiles root is updated
# - this triggers 'workio.work_root(...)' which may change value of
# '_workfiles_root'
self.set_asset_task(
self._asset_id, self._task_name, self._task_type
)
# Create workfiles root folder
if not os.path.exists(self._workfiles_root):
log.debug("Initializing Work Directory: %s", self._workfiles_root)
os.makedirs(self._workfiles_root)
# Prepare full path to workfile and save it
filepath = os.path.join(
os.path.normpath(self._workfiles_root), work_filename
)
# Update session if context has changed
self._enter_session()
if not self.published_enabled:
self.host.save_file(filepath)
else:
shutil.copy(src_path, filepath)
self.host.open_file(filepath)
# Create extra folders
create_workdir_extra_folders(
self._workdir_path,
legacy_io.Session["AVALON_APP"],
self._task_type,
self._task_name,
legacy_io.Session["AVALON_PROJECT"]
)
# Trigger after save events
emit_event(
"workfile.save.after",
{"filename": work_filename, "workdir_path": self._workdir_path},
source="workfiles.tool"
)
self.workfile_created.emit(filepath)
# Refresh files model
if self.published_enabled:
self._published_checkbox.setChecked(False)
else:
self.refresh()
return filepath
def _on_published_save_as_pressed(self):
self._save_as_with_dialog()
def _set_publish_context_select_mode(self, enabled):
self._publish_context_select_mode = enabled
# Show buttons related to context selection
self._publish_context_overlay.setVisible(enabled)
self._btn_cancel_published.setVisible(enabled)
self._btn_select_context_published.setVisible(enabled)
# Change enabled state based on select context
self._btn_select_context_published.setEnabled(
bool(self._asset_id) and bool(self._task_name)
)
self._btn_save_as_published.setVisible(not enabled)
self._btn_change_context.setVisible(not enabled)
# Change views and disable workarea view if enabled
self._workarea_files_view.setEnabled(not enabled)
if self.published_enabled:
self._workarea_files_view.setVisible(enabled)
self._publish_files_view.setVisible(not enabled)
else:
self._workarea_files_view.setVisible(True)
self._publish_files_view.setVisible(False)
# Disable filter widgets
self._published_checkbox.setEnabled(not enabled)
self._filter_input.setEnabled(not enabled)
def _on_publish_change_context_pressed(self):
self._set_publish_context_select_mode(True)
def _on_publish_select_context_pressed(self):
result = self._save_as_with_dialog()
if result is not None:
self._set_publish_context_select_mode(False)
self._update_asset_task()
def _on_publish_cancel_pressed(self):
self._set_publish_context_select_mode(False)
self._update_asset_task()
def on_file_select(self):
self.file_selected.emit(self._get_selected_filepath())
def refresh(self):
"""Refresh listed files for current selection in the interface"""
if self.published_enabled:
self._publish_files_model.refresh()
else:
self._workarea_files_model.refresh()
if self.auto_select_latest_modified:
self._select_last_modified_file()
def _on_workarea_context_menu(self, point):
index = self._workarea_files_view.indexAt(point)
if not index.isValid():
return
if not index.flags() & QtCore.Qt.ItemIsEnabled:
return
menu = QtWidgets.QMenu(self)
# Duplicate
action = QtWidgets.QAction("Duplicate", menu)
tip = "Duplicate selected file."
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(self.on_duplicate_pressed)
menu.addAction(action)
# Show the context action menu
global_point = self._workarea_files_view.mapToGlobal(point)
action = menu.exec_(global_point)
if not action:
return
def _select_last_modified_file(self):
"""Utility function to select the file with latest date modified"""
if self.published_enabled:
source_view = self._publish_files_view
else:
source_view = self._workarea_files_view
model = source_view.model()
highest_index = None
highest = 0
for row in range(model.rowCount()):
index = model.index(row, 0, parent=QtCore.QModelIndex())
if not index.isValid():
continue
modified = index.data(DATE_MODIFIED_ROLE)
if modified is not None and modified > highest:
highest_index = index
highest = modified
if highest_index:
source_view.setCurrentIndex(highest_index)
```
|
{
"source": "jcu-eresearch/Edgar",
"score": 2
}
|
#### File: importing/edgar_importing/birdlife_shapefile_import.py
```python
import csv
import argparse
import json
import logging
import textwrap
from edgar_importing import db
from edgar_importing import ala
from geoalchemy import WKTSpatialElement
import geoalchemy.functions
import sqlalchemy
import shapely.wkt
from shapely.geometry import Polygon, MultiPolygon
# map of BLA categories to db classification enum values
CLASSIFICATIONS_BY_BLA_CATEGORIES = {
1: 'core',
2: 'invalid', # aka suspect
3: 'vagrant',
4: 'historic',
5: 'irruptive',
6: 'introduced',
7: 'core', # only applies to one species, not 100% sure what classification 7 is
8: 'vagrant' # aka escaped
}
# global logger for this module
_log = logging.getLogger(__name__)
class Taxon(object):
def __init__(self, common=None, sci=None):
self.common_name = common
self.sci_name = sci
self.db_id = None
self.polys_by_classification = {}
def __repr__(self):
return '<Taxon db_id="{dbid}" sci="{sci}" common="{common}" />'.format(
dbid=self.db_id,
sci=self.sci_name,
common=self.common_name)
def _get_sci_name_part(self, idx):
if self.sci_name is None:
return None
parts = self.sci_name.split()
if len(parts) < 2 or len(parts) > 3:
raise RuntimeError("Can't split sciname: " + repr(self))
assert idx < len(parts)
return parts[idx]
@property
def genus(self):
return self._get_sci_name_part(0)
@property
def species(self):
return self._get_sci_name_part(-1)
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Loads Birdlife Australia shapefiles into the database as
vettings.
The shapefile first needs to be loaded into the database using the
`shp2pgsql` command provided by PostGIS. Assuming the database is named
"edgar", and the shapefile is named "TaxonPolys1.shp", the following
command will load the shapefile into the database:
shp2pgsql TaxonPolys1.shp birdlife_import | sudo -u postgres psql edgar'''
))
parser.add_argument('config', type=str, help='''The path to the JSON config
file.''')
parser.add_argument('csv', type=str, help='''The path the the CSV file,
which was converted from the TaxonList_May11.xlsx file supplied by Birdlife
Australia''')
parser.add_argument('user_id', type=int, help='''The id Birdlife
Australia user. This user will own the vettings that are added to the
database.''')
parser.add_argument('--species_translations', type=str, help='''A CSV that
translates Birdlife Australia (BLA) species names into names that ALA
recognises. First column is the exact BLA species name (from the SpSciName
column of the other CSV). The second column is the ALA species name. The
first row of this file will be ignored, so use it for column
headings.''')
return parser.parse_args()
def load_taxons_by_spno(csv_path, translations):
taxons = {}
with open(csv_path, 'rb') as f:
reader = csv.DictReader(f)
for row in reader:
is_species = (row['TaxonLevel'] == 'sp' and
len(row['PopCode']) == 0 and
len(row['SpSciName']) > 0)
if not is_species:
continue
taxon_spno = int(row['SpNo'])
if taxon_spno in taxons:
raise RuntimeError('Duplicate SpNo: ' + str(taxon_spno))
species_name = row['SpSciName']
if species_name in translations:
_log.info('Translating species "%s" into "%s"', species_name,
translations[species_name])
species_name = translations[species_name]
species = ala.species_for_scientific_name(species_name, convert_subspecies=True)
if species is None:
_log.warning("Can't find species '%s' at ALA", species_name)
continue
taxons[taxon_spno] = Taxon(common=species.common_name,
sci=species.scientific_name)
return taxons
def load_db_species_ids_by_genus_and_species():
ids = {}
for result in db.species.select().execute():
parts = result['scientific_name'].split()
assert 2 <= len(parts) <= 3
genus = parts[0].encode('utf-8').upper()
species = parts[-1].encode('utf-8').upper()
if genus not in ids:
_log.debug('DB genus: %s', genus)
ids[genus] = {}
_log.debug('DB species: %s', species)
ids[genus][species] = result['id']
return ids
def set_db_id_for_taxons(taxons):
db_ids = load_db_species_ids_by_genus_and_species()
for t in taxons:
genus = t.genus.upper()
species = t.species.upper()
if genus in db_ids:
if species in db_ids[genus]:
t.db_id = db_ids[genus][species]
def classification_for_bla_row(row):
category = row['rnge']
assert category in CLASSIFICATIONS_BY_BLA_CATEGORIES
return CLASSIFICATIONS_BY_BLA_CATEGORIES[category]
def polys_for_spno(spno):
q = sqlalchemy.select([
'rnge',
'brrnge',
'ST_AsText(the_geom) as the_geom'])\
.select_from(db.birdlife_import)\
.where(db.birdlife_import.c.spno == spno)\
.execute()
for row in q:
if row['the_geom'] is None:
_log.warning('Found row with no geometry: %s', str(dict(row)))
continue
poly = shapely.wkt.loads(row['the_geom'])
if not poly.is_valid:
poly = poly.buffer(0) # can turn invalid polygons into valid ones
if not poly.is_valid:
_log.warning('Found invalid polygon on row: %s', str(dict(row)))
continue
# db only accepts multipolygons, but shape can contain both
# polygons and multipolygons
if isinstance(poly, Polygon):
poly = MultiPolygon([poly])
yield classification_for_bla_row(row), poly
def insert_vettings_for_taxon(taxon, spno, user_id):
if taxon.db_id is None:
_log.warning('Skipping species with no db_id: %s', taxon.sci_name)
return
num_inserted = 0
for classification, poly in polys_for_spno(spno):
q = db.vettings.insert().values(
user_id=user_id,
species_id=taxon.db_id,
comment='Polygons imported from Birdlife Australia',
classification=classification,
area=WKTSpatialElement(poly.wkt, 4326)
).execute()
num_inserted += 1
_log.info('Inserted %d vettings for %s', num_inserted, taxon.sci_name)
def load_translations(csv_path):
translations = {}
with open(csv_path, 'rb') as f:
reader = csv.reader(f)
reader.next() # skip row of headings
for row in reader:
assert len(row) == 2
translations[row[0].strip()] = row[1].strip()
return translations
def main():
logging.basicConfig()
logging.root.setLevel(logging.INFO)
args = parse_args()
# connect to db
with open(args.config, 'rb') as f:
db.connect(json.load(f))
# load translations if present
translations = {}
if args.species_translations is not None:
translations = load_translations(args.species_translations)
_log.info('Loaded %d species name translations', len(translations))
# lookup taxons (species) in BLA and local db
taxons = load_taxons_by_spno(args.csv, translations)
set_db_id_for_taxons(taxons.itervalues())
# wipe existing vettings
db.vettings.delete().where(db.vettings.c.user_id == args.user_id).execute()
# create new vettings
for spno, taxon in taxons.iteritems():
insert_vettings_for_taxon(taxon, spno, args.user_id)
```
#### File: importing/edgar_importing/costa_rica_db_wipe_and_import.py
```python
from edgar_importing import db
import json
import sys
import csv
from datetime import datetime
import logging.handlers
import re
def main():
# make sure this isn't run accidentally
if '--go' not in sys.argv:
print
print "Wipes the database clean and fills database with Costa Rica data."
print
print "Assumes input csv is called costa_rica_import.csv, and is in the"
print "same folder as config.json. The folder you're in now.."
print
print "Usage:"
print "\t{0} --go".format(sys.argv[0])
print
sys.exit()
import_file_path = 'costa_rica_import.csv'
import_threshold_file_path = 'costa_rica_import_threshold.csv'
log = logging.getLogger()
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
species_count = 0
occurrences_count = 0
# take note of import start time
import_d = datetime.utcnow()
# connect
with open('config.json', 'rb') as f:
db.connect(json.load(f))
# wipe
db.species.delete().execute()
db.sources.delete().execute()
db.occurrences.delete().execute()
# insert COSTA_RICA_CSV with last_import_time.
result = db.sources.insert().execute(
name='COSTA_RICA_CSV',
last_import_time=import_d)
db_source_id = result.lastrowid
# open threshold csv..
with open(import_threshold_file_path, 'rb') as tf:
# open the costa_rica csv..
with open(import_file_path, 'rb') as f:
reader = csv.reader(f)
# skip the header
header = reader.next()
# iterate over the csv rows
for csv_row_array in reader:
in_collection_code = csv_row_array.pop(0)
in_catalog_number = csv_row_array.pop(0)
in_occurrence_remarks = csv_row_array.pop(0)
in_record_number = csv_row_array.pop(0)
in_event_date = csv_row_array.pop(0)
in_location_id = csv_row_array.pop(0)
in_state_province = csv_row_array.pop(0)
in_county = csv_row_array.pop(0)
in_municipality = csv_row_array.pop(0)
in_locality = csv_row_array.pop(0)
in_decimal_latitude = csv_row_array.pop(0)
in_decimal_longitude = csv_row_array.pop(0)
in_scientific_name = csv_row_array.pop(0)
in_kingdom = csv_row_array.pop(0)
in_phylum = csv_row_array.pop(0)
in_class = csv_row_array.pop(0)
in_order = csv_row_array.pop(0)
in_family = csv_row_array.pop(0)
in_genus = csv_row_array.pop(0)
in_specific_epithet = csv_row_array.pop(0)
in_infraspecific_epithet = csv_row_array.pop(0)
in_taxon_rank = csv_row_array.pop(0)
# Add species if necessary..
# Look up species by scientific_name
row = db.species.select('id')\
.where(db.species.c.scientific_name == in_scientific_name)\
.execute().fetchone()
db_species_id = None
if row is None:
# If we couldn't find it..
# so add the species
tf.seek(0)
threshold_reader = csv.reader(tf)
in_threshold = 1 # The max (will wipe out all values)
for threshold_csv_row_array in threshold_reader:
in_species_name = threshold_csv_row_array[0]
in_threshold = threshold_csv_row_array[1]
# compare species sci_names
conv_in_scientific_name = in_scientific_name.strip()
conv_in_scientific_name = conv_in_scientific_name.replace('.', '')
conv_in_scientific_name = conv_in_scientific_name.replace(' ', '_')
#print conv_in_scientific_name
#print in_species_name
#print '...........'
if conv_in_scientific_name == in_species_name:
print '************'
print in_species_name
if in_threshold == 'na':
in_threshold = '1'
print in_threshold
break
sys.stdout.flush()
result = db.species.insert().execute(
scientific_name=in_scientific_name,
distribution_threshold=in_threshold,
)
species_count = species_count + 1
db_species_id = result.lastrowid
else:
# We found it, grab the species id
db_species_id = row['id']
# insert the occurrence into the db.
# NOTE: Some records have empty in_record_numbers.
# The sql db validates source_id vs source_record_id
# data, so if we have an empty source_record_id, leave it as unspecified
#
occurrences_count = occurrences_count + 1
if in_record_number.strip() != '':
result = db.occurrences.insert().execute(
species_id=db_species_id,
latitude=in_decimal_latitude,
longitude=in_decimal_longitude,
source_id=db_source_id,
source_record_id=in_record_number,
classification='irruptive'
)
else:
result = db.occurrences.insert().execute(
species_id=db_species_id,
latitude=in_decimal_latitude,
longitude=in_decimal_longitude,
source_id=db_source_id,
# source_record_id=in_record_number,
classification='irruptive'
)
log.debug("Species: %i", species_count)
log.debug("Occurrences: %i", occurrences_count)
```
#### File: importing/edgar_importing/costa_rica_threshold_processor.py
```python
import sys
import csv
import os
import glob
from datetime import datetime
import logging.handlers
def main():
models_base_path = os.path.join('/', 'scratch', 'jc155857', 'CostaRica', 'models')
species_listing = os.listdir(models_base_path)
for infile in species_listing:
# print infile
model_csv_file = os.path.join(models_base_path, infile, 'outputs', 'maxentResults.csv')
# print model_csv_file
reader = csv.reader(open(model_csv_file, "rb"))
header = reader.next()
content = reader.next()
col_pos = 0;
for column in header:
if column == 'Equate entropy of thresholded and original distributions logistic threshold':
#print column
#print col_pos
break;
col_pos += 1
threshold = content[col_pos]
print infile + "," + threshold
```
#### File: importing/edgar_importing/vettingd.py
```python
from edgar_importing import db
import sqlalchemy
import argparse
import logging
import json
import time
import datetime
import shapely
import shapely.prepared
import shapely.wkt
import shapely.geometry
from pprint import pprint
def parse_args():
parser = argparse.ArgumentParser(description='''Recalculates occurrence
record classifications based on vettings''')
parser.add_argument('config', type=str, help='''The JSON config file''')
return parser.parse_args()
def main():
args = parse_args()
logging.basicConfig()
logging.root.setLevel(logging.INFO)
with open(args.config, 'rb') as f:
db.connect(json.load(f))
while True:
next_species = next_species_row_to_vet()
if next_species is None:
log_info('=========== No species need vetting. Sleeping for a while.')
time.sleep(60)
else:
vet_species(next_species)
db.engine.dispose()
def next_species_row_to_vet():
return db.engine.execute('''
SELECT * FROM species
WHERE needs_vetting_since IS NOT NULL
ORDER BY needs_vetting_since ASC
LIMIT 1
''').fetchone()
def vet_species(species):
log_info('>>>>>>>>>>> Started vetting species %d: %s',
species['id'], species['scientific_name'])
connection = db.engine.connect()
transaction = connection.begin()
try:
vet_species_inner(species, connection)
log_info('Committing transaction')
transaction.commit()
except:
logging.warning('Performing rollback due to exception',)
transaction.rollback()
raise
finally:
connection.close()
log_info('<<<<<<<<<<< Finished')
def vet_species_inner(species, connection):
vettings = ordered_vettings_for_species_id(species['id'], connection)
query = occurrences_for_species_id(species['id'], connection)
log_info('Vetting %d occurrences through %d vettings', query.rowcount,
len(vettings))
rows_remaining = query.rowcount
rows_changed = 0
rows_contentious = 0
for occrow in query:
changed, contentious = update_occurrence(occrow, vettings, connection)
if changed: rows_changed += 1
if contentious: rows_contentious += 1
rows_remaining -= 1
if rows_remaining % 10000 == 0 and rows_remaining > 0:
log_info('%d occurrences remaining', rows_remaining)
log_info('Dirtied %d occurrences in total', rows_changed)
connection.execute('''
UPDATE species
SET num_dirty_occurrences = num_dirty_occurrences + {changed},
needs_vetting_since = NULL,
num_contentious_occurrences = {cont}
WHERE id = {sid};
'''.format(
changed=rows_changed,
cont=rows_contentious,
sid=species['id']
))
def update_occurrence(occrow, ordered_vettings, connection):
contentious = False
classification = None
location = shapely.wkt.loads(occrow['location'])
# for each vetting, ordered most-authoritive first
for vetting in ordered_vettings:
# check if the vetting applies to this occurrences' location
if vetting.area.intersects(location):
# first, look for classification (if not found previously)
if classification is None:
classification = vetting.classification
# if this is an admin vetting, don't check the other vettings.
# this will keep `contention=False` for all occurrences within
# vettings by admins
if vetting.is_by_admin:
break
# second, look for contention (if not found previously)
elif classification != vetting.classification:
contentious = True
# if both classification and contention are found, no need
# to check the rest of the polygons
break
# if no vettings affect this occurrence, use source_classification
if classification is None:
classification = occrow['source_classification']
# only update db if something changed
if classification != occrow['classification'] or contentious != occrow['contentious']:
connection.execute('''
UPDATE occurrences
SET contentious = {cont},
classification = '{classi}'
WHERE id = {occid}
'''.format(
cont=('TRUE' if contentious else 'FALSE'),
classi=classification,
occid=occrow['id']
))
return True, contentious
else:
return False, contentious
def ordered_vettings_for_species_id(species_id, connection):
vettings = []
query = connection.execute('''
SELECT
vettings.classification AS classi,
ST_AsText(ST_SimplifyPreserveTopology(vettings.area, 0.001)) AS area,
users.is_admin as is_admin
FROM
vettings INNER JOIN users ON vettings.user_id = users.id
WHERE
vettings.species_id = {sid}
AND vettings.ignored IS NULL
AND users.can_vet
ORDER BY
users.is_admin DESC,
users.authority DESC,
vettings.modified DESC
'''.format(sid=int(species_id)))
for row in query:
vettings.append(Vetting(row['classi'], row['area'], row['is_admin']))
return vettings
def occurrences_for_species_id(species_id, connection):
return connection.execute('''
SELECT
id,
classification,
source_classification,
contentious,
ST_AsText(location) AS location
FROM occurrences
WHERE species_id = {sid}
'''.format(sid=species_id))
class Vetting(object):
def __init__(self, classi, wkt_area, is_by_admin):
self.classification = classi
self.area = shapely.prepared.prep(shapely.wkt.loads(wkt_area))
self.is_by_admin = is_by_admin
def log_info(msg, *args, **kwargs):
logging.info(datetime.datetime.today().strftime('%H:%M:%S: ') + msg, *args,
**kwargs)
```
#### File: importing/edgar_importing/vetting_syncd.py
```python
from edgar_importing import db
import socket
import datetime
import time
import sqlalchemy
import argparse
import logging
import json
import urllib2
def parse_args():
parser = argparse.ArgumentParser(description='''Sends vettings to ALA
when they are added, modified, or deleted.''')
parser.add_argument('config', type=str, help='''The JSON config file''')
return parser.parse_args()
def main():
args = parse_args()
logging.basicConfig()
logging.root.setLevel(logging.INFO)
with open(args.config, 'rb') as f:
config = json.load(f)
db.connect(config)
if 'alaVettingSyncUrl' not in config or len(config['alaVettingSyncUrl']) == 0:
logging.critical('"alaVettingSyncUrl" must be present in the config')
return
if 'alaApiKey' not in config or len(config['alaApiKey']) == 0:
logging.critical('"alaApiKey" must be present in the config')
return
while True:
next_vetting = next_vetting_to_sync()
if next_vetting is None:
log_info('=========== No vettings to send. Sleeping for a while.')
time.sleep(60)
else:
send_vetting(next_vetting, config['alaVettingSyncUrl'], config['alaApiKey'])
db.engine.dispose()
time.sleep(5)
def next_vetting_to_sync():
return db.engine.execute('''
SELECT
vettings.id as id,
vettings.created as created,
vettings.modified as modified,
vettings.deleted as deleted,
vettings.ignored as ignored,
vettings.last_ala_sync as last_ala_sync,
vettings.comment as comment,
vettings.classification as classification,
ST_AsText(vettings.area) as area,
species.scientific_name as scientific_name,
users.email as email,
users.authority as authority,
users.is_admin as is_admin
FROM vettings
INNER JOIN users ON vettings.user_id = users.id
INNER JOIN species ON vettings.species_id = species.id
WHERE
users.email IS NOT NULL AND -- ignores non-ALA users
(
vettings.last_ala_sync IS NULL -- new vettings
OR vettings.modified > vettings.last_ala_sync -- modified vettings
OR vettings.deleted IS NOT NULL -- deleted vettings
)
LIMIT 1
''').fetchone()
def send_vetting(vetting, ala_url, ala_api_key):
log_info('>>>>>>>>>>> Sending vetting %d, by "%s" for species "%s"',
vetting['id'],
vetting['email'],
vetting['scientific_name'])
# new vettings
if vetting['last_ala_sync'] is None:
if send_existing_vetting(vetting, 'new', ala_url, ala_api_key):
update_sync_date_on_vetting(vetting['id'])
# deleted vettings
elif vetting['deleted'] is not None:
if send_deleted_vetting(vetting, ala_url, ala_api_key):
delete_vetting(vetting['id'])
# modified vettings
else:
if send_existing_vetting(vetting, 'modified', ala_url, ala_api_key):
update_sync_date_on_vetting(vetting['id'])
log_info('<<<<<<<<<<< Finished')
def send_existing_vetting(vetting, status, ala_url, ala_api_key):
log_info('Sending status="%s" message', status)
lastModified = vetting['modified']
if lastModified is None:
lastModified = vetting['created']
assert lastModified is not None
# remove the microseconds (whole seconds only)
if lastModified.microsecond > 0:
lastModified -= datetime.timedelta(microseconds=lastModified.microsecond)
return send_json(ala_url, {
'id': vetting['id'],
'status': status,
'lastModified': lastModified.isoformat(),
'apiKey': ala_api_key,
'ignored': (False if vetting['ignored'] is None else True),
'species': vetting['scientific_name'],
'classification': vetting['classification'],
'comment': vetting['comment'],
'user':{
'email': vetting['email'],
'authority': vetting['authority'],
'isAdmin': vetting['is_admin']
},
'area': vetting['area']
})
def send_deleted_vetting(vetting, ala_url, ala_api_key):
log_info('Sending status="deleted" message')
if vetting['last_ala_sync'] is None:
# never sent to ALA in the first place, so don't need to send deletion
# message
return True
return send_json(ala_url, {
'id': vetting['id'],
'status': 'deleted',
'lastModified': vetting['deleted'].isoformat(),
'apiKey': ala_api_key
})
def send_json(ala_url, json_object):
assert ala_url is not None
assert 'id' in json_object
assert 'status' in json_object
assert 'lastModified' in json_object
assert 'apiKey' in json_object
request = urllib2.Request(ala_url, json.dumps(json_object), {
'Content-Type': 'application/json',
'User-Agent': 'Edgar/Python-urllib2'
})
try:
response = urllib2.urlopen(request, timeout=20.0)
return response.getcode() == 200
except urllib2.HTTPError as e:
logging.warning('Failed to send vetting. HTTP response code = %d', e.code)
except Exception as e:
logging.warning('Failed to send vetting due to exception: %s', str(e))
return False
def delete_vetting(vetting_id):
log_info('Deleting vetting from local database')
db.vettings.delete().where(db.vettings.c.id == vetting_id).execute()
def update_sync_date_on_vetting(vetting_id):
log_info('Updating last_ala_sync for vetting')
db.engine.execute('''
UPDATE vettings
SET last_ala_sync = NOW()
WHERE id = {vid};
'''.format(vid=int(vetting_id)))
def log_info(msg, *args, **kwargs):
logging.info(datetime.datetime.today().strftime('%H:%M:%S: ') + msg, *args,
**kwargs)
```
#### File: modelling/src/hpc.py
```python
import os
import os.path
from subprocess import Popen, PIPE
import time
from datetime import datetime
import socket
import httplib, urllib
import urllib2
from datetime import datetime
import logging
import logging.handlers
import db
import csv
import json
import tempfile
import ala
import paramiko
import ssh
import re
from hpc_config import HPCConfig
import sqlalchemy
from sqlalchemy import distinct
from sqlalchemy import or_
log = logging.getLogger()
# Set a default timeout for all socket requests
socketTimeout = 10
socket.setdefaulttimeout(socketTimeout)
# A container for our HPC Job Statuses
# Any job status not defined here is a qstat status
class HPCJobStatus:
queued = "QUEUED"
finishedSuccess = "FINISHED_SUCCESS"
finishedFailure = "FINISHED_FAILURE"
running = "R"
class Job:
# How long until a job should be considered failed
# Note: Needs to take into consideration HPC may be full
# and I may have to wait in the queue.
expireJobAfterXSeconds = ( 24 * 60 * 60 ) # 24 hours
@staticmethod
def getNextSpeciesId():
log.debug("Determining the next species Id, %s", HPCConfig.nextSpeciesURL)
try:
values = {}
data = urllib.urlencode(values)
req = urllib2.Request(HPCConfig.nextSpeciesURL, data)
connection = urllib2.urlopen(req)
responseCode = connection.getcode()
log.debug("Response code: %s", responseCode)
if responseCode == 200:
speciesToModel = connection.read()
log.debug("Determined next species to model is: %s", speciesToModel)
return speciesToModel
elif responseCode == 204:
log.debug("No species to model")
return None
else:
log.warn("Unexpected response code. Response code should have been 200 or 204")
return None
except (urllib2.URLError, urllib2.HTTPError, socket.timeout) as e:
log.warn("Error reading next species URL: %s", e)
return None
def __init__(self, speciesId):
self.speciesId = speciesId
self.jobId = None
self.jobStatus = None
self.jobStatusMsg = ""
self.speciesCommonName = ""
self.speciesSciName = ""
self.jobQueuedTime = None
self.metaDataTempfile = None
self.privateTempfile = None
self.publicTempfile = None
self._writeCSVSpeciesJobFile()
def getSafeSpeciesName(self):
speciesName = self.speciesCommonName + " (" + self.speciesSciName + ")"
cleanName = re.sub(r"[^A-Za-z0-9'_., ()-]", '_', speciesName)
strippedCleanName = cleanName.strip()
return strippedCleanName
def getSpeciesNameForMetaData(self):
speciesName = self.speciesCommonName + " (" + self.speciesSciName + ")"
strippedCleanName = speciesName.strip()
return strippedCleanName
def getMetaDataSourceDict(self, sourceName, sourceHomePage):
resultURL = None
# Special handle the case where the source is the ALA
# We can build a species specific URL for these guys
if (sourceName == 'ALA') :
speciesSciName = self.speciesSciName
processedName = re.sub(r"[ ]", '+', speciesSciName)
processedName = processedName.strip()
resultURL = "http://bie.ala.org.au/species/" + processedName
resultNotes = "ALA - Species page for " + self.getSpeciesNameForMetaData()
# Else..
# Just use the defined source home page URL
else :
resultURL = sourceHomePage
resultNotes = "" + sourceName + " - home page"
return { "identifier" : { "type": "uri", "value": resultURL }, "notes" : resultNotes }
def _setJobId(self, jobId):
self.jobId = jobId
return self.jobId
def _setSpeciesCommonName(self, speciesCommonName):
self.speciesCommonName = speciesCommonName or ""
return self.speciesCommonName
def _setSpeciesSciName(self, speciesSciName):
self.speciesSciName = speciesSciName or ""
return self.speciesSciName
def _setDirtyOccurrences(self, dirtyOccurrences):
self.dirtyOccurrences = dirtyOccurrences
return self.dirtyOccurrences
def _setJobQueuedTimeToNow(self):
self.jobQueuedTime = time.time()
return self.jobQueuedTime
def _setJobStatus(self, status):
self.jobStatus = status
self._lastUpdatedJobStatus = time.time()
return self.jobStatus
def _setJobExpired(self):
self._setJobStatus(HPCJobStatus.finishedFailure)
self.jobStatusMsg = "Job took too long to complete (expired)"
return None
def _recordQueuedJob(self, jobId):
self._setJobQueuedTimeToNow()
self._setJobId(jobId)
self._setJobStatus(HPCJobStatus.queued)
return True
def _setPublicTempfile(self, f):
if self.publicTempfile == None:
self.publicTempfile = f
else:
raise Exception("Can't set publicTempfile for a job more than once")
return self.publicTempfile
def _setPrivateTempfile(self, f):
if self.privateTempfile == None:
self.privateTempfile = f
else:
raise Exception("Can't set privateTempfile for a job more than once")
return self.privateTempfile
def _setMetaDataTempfile(self, f):
if self.metaDataTempfile == None:
self.metaDataTempfile = f
else:
raise Exception("Can't set metaDataTempfile for a job more than once")
return self.metaDataTempfile
def _writeCSVSpeciesJobFile(self):
try:
# Connect the DB
HPCConfig.connectDB()
try:
# Select the species row that matches this job's species
species_row = db.species.select()\
.where(db.species.c.id == self.speciesId)\
.execute().fetchone()
if species_row == None:
# We didn't find the species in the table..
# this shouldn't happen...
raise Exception("Couldn't find species with id " + self.speciesId + " in table. This shouldn't happen.")
else:
# We found it
# Now record the no. of dirtyOccurrences
dirtyOccurrences = species_row['num_dirty_occurrences']
self._setDirtyOccurrences(dirtyOccurrences)
self._setSpeciesCommonName(species_row['common_name'])
self._setSpeciesSciName(species_row['scientific_name'])
log.debug("Found %s dirtyOccurrences for species %s", dirtyOccurrences, self.speciesId)
# Create tempfiles to write our csv content to
priv_f = tempfile.NamedTemporaryFile(delete=False)
pub_f = tempfile.NamedTemporaryFile(delete=False)
metaData_f = tempfile.NamedTemporaryFile(delete=False)
try:
# Remember the path to the csv file
self._setPrivateTempfile(priv_f.name)
self._setPublicTempfile(pub_f.name)
self._setMetaDataTempfile(metaData_f.name)
log.debug("Writing public csv to: %s", pub_f.name)
log.debug("Writing private csv to: %s", priv_f.name)
log.debug("Writing meta data json to: %s", metaData_f.name)
# Write the metadata
# Get access to the sources for this species
# SELECT DISTINCT url, name, source_id FROM occurrences, sources WHERE occurrences.source_id=sources.id AND species_id=1;
source_rows = sqlalchemy.select(['url', 'name', 'source_id']).\
select_from(db.occurrences.join(db.sources)).\
where(db.occurrences.c.species_id == self.speciesId).\
distinct().\
execute()
meta_data_source_array = []
# Append to our meta data source array each source we found
for source_row in source_rows :
source_url = source_row['url']
source_name = source_row['name']
meta_data_source_array.append(
self.getMetaDataSourceDict(source_name, source_url)
)
# Dump the source metadata
metaDataString = json.dumps({
"harvester": {
"type": "directory",
"metadata": {
"occurrences": [{
"species_name" : self.getSpeciesNameForMetaData(),
"data_source_website" : meta_data_source_array
}],
"suitability": [{
"species_name" : self.getSpeciesNameForMetaData(),
"data_source_website" : meta_data_source_array
}]
}
}
})
metaData_f.write(metaDataString)
pub_writer = csv.writer(pub_f)
priv_writer = csv.writer(priv_f)
# Write the header
pub_writer.writerow(["LATDEC", "LONGDEC", "DATE", "BASIS", "CLASSIFICATION", "SOURCE"])
priv_writer.writerow(["SPPCODE", "LATDEC", "LONGDEC"])
# Select the occurrences for this species
occurrence_rows = sqlalchemy.select([
'ST_X(location) as longitude',
'ST_Y(location) as latitude',
'ST_X(sensitive_location) as sensitive_longitude',
'ST_Y(sensitive_location) as sensitive_latitude',
'date',
'basis',
'classification',
'sources.name as source']).\
select_from(db.occurrences.outerjoin(db.sensitive_occurrences).outerjoin(db.sources)).\
where(db.occurrences.c.species_id == self.speciesId).\
where(or_(db.occurrences.c.classification == 'unknown', db.occurrences.c.classification >= 'core')).\
execute()
# Iterate over the occurrences, and write them to the csv
for occurrence_row in occurrence_rows:
pub_lat = occurrence_row['latitude']
pub_lng = occurrence_row['longitude']
pub_date = ('' if occurrence_row['date'] is None else occurrence_row['date'].isoformat())
pub_basis = ('' if occurrence_row['basis'] is None else occurrence_row['basis'])
pub_classi = ('' if occurrence_row['classification'] is None else occurrence_row['classification'])
pub_source = ('' if occurrence_row['source'] is None else occurrence_row['source'])
pub_writer.writerow([pub_lat, pub_lng, pub_date, pub_basis, pub_classi, pub_source])
if occurrence_row['sensitive_longitude'] is None:
priv_lat = occurrence_row['latitude']
priv_lon = occurrence_row['longitude']
else:
priv_lat = occurrence_row['sensitive_latitude']
priv_lon = occurrence_row['sensitive_longitude']
priv_writer.writerow([self.speciesId, priv_lat, priv_lon])
finally:
# Be a good file citizen, and close the file handle
pub_f.close()
priv_f.close()
metaData_f.close()
finally:
# Dispose the DB
HPCConfig.disposeDB();
except Exception as e:
log.warn("Exception while trying to write CSV file species. Exception: %s", e)
raise
# Allow someone to use this class with the *with* syntax
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
# If we had tempfile/s, delete it
def cleanup(self):
if self.publicTempfile:
try:
os.unlink(self.publicTempfile)
os.path.exists(self.publicTempfile)
except Exception as e:
log.warn("Exception while deleting public tmpfile (%s) for job. Exception: %s", self.publicTempfile, e)
if self.privateTempfile:
try:
os.unlink(self.privateTempfile)
os.path.exists(self.privateTempfile)
except Exception as e:
log.warn("Exception while deleting private tmpfile (%s) for job. Exception: %s", self.privateTempfile, e)
if self.metaDataTempfile:
try:
os.unlink(self.metaDataTempfile)
os.path.exists(self.metaDataTempfile)
except Exception as e:
log.warn("Exception while deleting meta data tmpfile (%s) for job. Exception: %s", self.metaDataTempfile, e)
# Has this job expired?
def isExpired(self):
return ( ( time.time() - self.jobQueuedTime ) > HPCJob.expireJobAfterXSeconds )
# Is this job done?
def isDone(self):
return ( self.jobStatus == HPCJobStatus.finishedSuccess or
self.jobStatus == HPCJobStatus.finishedFailure )
# Send the job's status to the cake app.
# Returns true if we sent the status update correctly.
# Returns false if we failed to send the status update.
def reportStatusToCakeApp(self):
try:
url = HPCConfig.getSpeciesReportURL(self.speciesId)
log.debug("url: %s", url)
values = {
'job_status': self.jobStatus,
'job_status_message': self.jobStatusMsg,
'dirty_occurrences': self.dirtyOccurrences
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
connection = urllib2.urlopen(req)
responseContent = connection.read()
responseCode = connection.getcode()
if responseCode == 200:
log.debug("Reported job status (%s), response: %s", self.jobStatus, responseContent)
return True
else:
log.warn("Failed to report job status, response: %s", responseContent)
return False
except (urllib2.URLError, urllib2.HTTPError, socket.timeout) as e:
log.warn("Error reporting job status: %s", e)
return False
def queue(self):
raise NotImplementedError("queue not implemented")
def checkStatus(self):
raise NotImplementedError("checkStatus not implemented")
class LocalHPCJob(Job):
def __init__(self, speciesId):
Job.__init__(self, speciesId)
self.popen = None
# Queue this job on the HPC
# Returns true if we queued the job
# Returns false if we failed to queue the job
def queue(self):
log.debug("Queueing job for %s", self.speciesId)
try:
self.popen = Popen([HPCConfig.localQueueJobScriptPath, self.speciesId, self.getSafeSpeciesName(), HPCConfig.workingDir, self.privateTempfile, self.publicTempfile, self.metaDataTempfile], stdout=PIPE, stderr=PIPE)
self._recordQueuedJob(self.popen.pid)
log.debug("Succesfully queued job (job_id: %s)", self.jobId)
return True
except Exception as e:
log.error("Failed to queue job. Exception: %s", e)
return False;
# Check the status of this job.
# Returns true if we updated the status
# Returns false otherise
def checkStatus(self):
log.debug("Checking status of job %s (%s)", self.jobId, self.speciesId)
if self.isDone():
# The job is done, no need to check status
return True
elif self.isExpired():
# The job is too old, expire it
log.warn("Current job took too long to complete, expiring job.")
self._setJobExpired()
return True
else:
self.popen.poll()
returnCode = self.popen.returncode
if returnCode == None:
self._setJobStatus(HPCJobStatus.running)
elif returnCode == 0:
self._setJobStatus(HPCJobStatus.finishedSuccess)
else:
log.error(
(
"Job failed.\n\t" +
"exit_code: %s\n\t" +
"stdout: %s\n\t" +
"stderr: %s"
), returnCode, self.popen.stdout.readlines(), self.popen.stderr.readlines()
)
self._setJobStatus(HPCJobStatus.finishedFailure)
return True
class HPCJob(Job):
# Queue this job on the HPC
# Returns true if we queued the job
# Returns false if we failed to queue the job
def queue(self):
log.debug("Queueing job for %s", self.speciesId)
client_scp = ssh.Connection(HPCConfig.sshHPCDestination, username=HPCConfig.sshUser)
client_scp.put(self.privateTempfile, self.privateTempfile)
client_scp.put(self.publicTempfile, self.publicTempfile)
client_scp.put(self.metaDataTempfile, self.metaDataTempfile)
client_scp.close()
# Connect to the HPC
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(HPCConfig.sshHPCDestination, username=HPCConfig.sshUser)
# Run the hpc queue script
sshCmd = HPCConfig.queueJobScriptPath + ' "' + self.speciesId + '" "' + self.getSafeSpeciesName() + '" "' + HPCConfig.workingDir + '" "' + self.privateTempfile + '" "' + self.publicTempfile + '" "' + self.metaDataTempfile + '"'
log.debug("ssh command: %s", sshCmd)
chan = client.get_transport().open_session()
chan.exec_command(sshCmd)
returnCode = chan.recv_exit_status()
# TODO remove magic numbers..
stdout = chan.recv(1024)
stderr = chan.recv_stderr(1024)
log.debug("Queue Return Code: %s", returnCode)
log.debug("Queue Output: %s", stdout)
client.close()
if returnCode == 0:
self._recordQueuedJob(stdout)
log.debug("Succesfully queued job (job_id: %s)", self.jobId)
return True;
else:
log.error(
(
"Failed to queue job.\n\t" +
"exit_code: %s\n\t" +
"stdout: %s\n\t" +
"stderr: %s"
), returnCode, stdout, stderr
)
return False;
# Check the status of this job.
# Returns true if we updated the status
# Returns false otherise
def checkStatus(self):
log.debug("Checking status of job %s (%s)", self.jobId, self.speciesId)
if self.isDone():
# The job is done, no need to check status
return True
elif self.isExpired():
# The job is too old, expire it
log.warn("Current job took too long to complete, expiring job.")
self._setJobExpired()
return True
else:
# Run the hpc check status script
sshCmd = HPCConfig.checkJobStatusScriptPath + " '" + self.jobId + "' '" + HPCConfig.workingDir + "'"
# Connect to the HPC
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(HPCConfig.sshHPCDestination, username=HPCConfig.sshUser)
log.debug("ssh command: %s", sshCmd)
chan = client.get_transport().open_session()
chan.exec_command(sshCmd)
returnCode = chan.recv_exit_status()
# TODO remove magic numbers..
stdout = chan.recv(1024)
stderr = chan.recv_stderr(1024)
log.debug("Check Status Return Code: %s", returnCode)
log.debug("Check Status Output: %s", stdout)
client.close()
if returnCode == 0:
self._setJobStatus(stdout)
log.debug("Succesfully checked job status %s (job_id: %s)", self.jobStatus, self.jobId)
return True;
else:
log.error(
(
"Failed to check job status.\n\t" +
"exit_code: %s\n\t" +
"stdout: %s\n\t" +
"stderr: %s"
), returnCode, stdout, stderr
)
return False;
```
|
{
"source": "jcu-eresearch/jcu.dc24.ingesterapi",
"score": 3
}
|
#### File: jcudc24ingesterapi/models/tests.py
```python
import unittest
from jcudc24ingesterapi.models.data_entry import DataEntry
from jcudc24ingesterapi.models.data_sources import PullDataSource
from jcudc24ingesterapi.models.dataset import Dataset
class TestIngesterModels(unittest.TestCase):
"""Test the validation of the domain objects"""
def test_metadata(self):
pass
def test_data_entry(self):
pass
def test_data_sources(self):
pass
def test_dataset(self):
# Basic instanciation
ds = Dataset()
ds = Dataset(data_source=PullDataSource())
self.assertRaises(TypeError, Dataset, data_source=1)
def test_sampling(self):
pass
def test_locations(self):
pass
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jcu-eresearch/TDH-dc24-ingester-platform",
"score": 3
}
|
#### File: dc24_ingester_platform/ingester/processor.py
```python
import os
import sys
import sandbox
# FIXME These imports seem to be what end up in the script
import datetime
import time
from jcudc24ingesterapi.models.data_entry import DataEntry, FileObject
def create_sandbox(cwd):
sb = sandbox.Sandbox()
sb.config.allowPath(cwd)
sb.config.allowModule("jcudc24ingesterapi", "jcudc24ingesterapi.models", "jcudc24ingesterapi.models.data_entry")
sb.config.enable("exit")
return sb
def run_script(script, cwd, data_entry):
"""Runs the script provided (source code as string) and returns
an array of additional data entries, including the original
data_entry that may have been altered.
"""
#sb = create_sandbox(cwd)
#return sb.call(_run_script, script, cwd, data_entry)
return run_script_local(script, cwd, data_entry)
def _run_script(script, cwd, data_entry):
code = compile(script, "<string>", "exec")
exec(code)
return process(cwd, data_entry)
def run_script_local(script, cwd, data_entry):
code = compile(script, "<string>", "exec")
local = {}
exec(code, globals(), local)
return local["process"](cwd, data_entry)
```
#### File: dc24_ingester_platform/ingester/sampling.py
```python
import logging
import time
from jcudc24ingesterapi.ingester_platform_api import get_properties
logger = logging.getLogger("dc24_ingester_platform.ingester.sampling")
class Sampler(object):
"""A Sampler is an object that takes a configuration and state
and uses this to determine whether a dataset is due for a new sample"""
state = None # Holds the state of the Sampler. This is persisted by the ingester.
def __init__(self, config=None, state=None):
self.state = {}
if config != None:
for param in get_properties(config):
setattr(self, param, getattr(config, param))
def sample(self, sample_time, dataset):
"""Returns True or False depending on whether a sample should be made"""
raise NotImplementedError("sample is not implemented for "+str(type(self)))
class NoSuchSampler(Exception):
"""An exception that occurs when there is no sampler available."""
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class PeriodicSampler(Sampler):
rate = None # The rate of the sampler in s
def sample(self, sampler_time, dataset):
"""Run only if the rate worth of seconds has passed since the last run
>>> import datetime
>>> s = PeriodicSampler()
>>> s.rate = 10
>>> dt = datetime.datetime.now()
>>> s.sample(dt, None)
True
>>> s.sample(dt, None)
False
>>> dt = dt + datetime.timedelta(seconds=11)
>>> s.sample(dt, None)
True
"""
run = False
now = time.mktime(sampler_time.utctimetuple())
if "last_run" not in self.state or (float(self.state["last_run"]) + self.rate) < now:
run = True
self.state["last_run"] = now
return run
samplers = {"periodic_sampling":PeriodicSampler}
def create_sampler(sampler_config, state):
"""Create the correct configured sampler from the provided dict"""
if sampler_config.__xmlrpc_class__ not in samplers:
raise NoSuchSampler("Sampler '%s' not found"%(sampler_config.__xmlrpc_class__))
return samplers[sampler_config.__xmlrpc_class__](sampler_config, state)
```
#### File: TDH-dc24-ingester-platform/dc24_ingester_platform/__init__.py
```python
class IngesterError(Exception):
"""Generic persistence exception"""
def __init__(self, msg):
Exception.__init__(self, msg)
```
#### File: TDH-dc24-ingester-platform/dc24_ingester_platform/mock.py
```python
import logging
import time
import datetime
from twisted.web import xmlrpc
from dc24_ingester_platform.ingester.data_sources import DataSource
from dc24_ingester_platform.service import IIngesterService
from jcudc24ingesterapi.models.dataset import Dataset
from jcudc24ingesterapi.models.data_sources import _DataSource
from jcudc24ingesterapi.models.sampling import PeriodicSampling
from jcudc24ingesterapi.models.data_entry import DataEntry
logger = logging.getLogger(__name__)
class MockDataSource(DataSource):
def fetch(self, cwd, service=None):
time.sleep(20)
return [DataEntry(timestamp=datetime.datetime.now())]
def data_source_factory(data_source_config, state, parameters):
return MockDataSource(state, parameters, data_source_config)
class MockService(IIngesterService):
def __init__(self):
self.obs_listeners = []
self.data_source_state = {}
self.sampler_state = {}
self.datasets = []
self.setup_datasets()
def setup_datasets(self):
ds = Dataset()
ds.data_source = _DataSource()
ds.id = 1
ds.data_source.__xmlrpc_class__ = "mock_data_source"
ds.data_source.sampling = PeriodicSampling(10000)
self.datasets.append(ds)
def register_observation_listener(self, listener):
self.obs_listeners.append(listener)
def unregister_observation_listener(self, listener):
self.obs_listeners.remove(listener)
def persist_sampler_state(self, ds_id, state):
self.sampler_state[ds_id] = state
def get_sampler_state(self, ds_id):
return self.sampler_state[ds_id] if ds_id in self.sampler_state else {}
def persist_data_source_state(self, ds_id, state):
self.data_source_state[ds_id] = state
def get_data_source_state(self, ds_id):
return self.data_source_state[ds_id] if ds_id in self.data_source_state else {}
def get_active_datasets(self):
return self.datasets
def log_ingester_event(self, dataset_id, timestamp, level, message):
logger.info("[%s] %s"%(level, message))
return
def persist(self, entry, cwd):
logger.info("Got entry: "+str(entry))
def create_ingest_task(self, ds_id, params, cwd):
self.datasets[0].running = True
return 0
def mark_ingress_complete(self, task_id):
self.datasets[0].running = False
def mark_ingest_complete(self, task_id):
pass
class MockServer(xmlrpc.XMLRPC):
def __init__(self, service):
"""Initialise the management service.
:param service: Service Facade instance being exposed by this XMLRPC service
"""
xmlrpc.XMLRPC.__init__(self, allowNone=True)
self.service = service
def makeMockService():
return MockService()
def makeMockServer(service):
return MockServer(service)
```
#### File: dc24_ingester_platform/service/repodam.py
```python
from dc24_ingester_platform.service import BaseRepositoryService, method,\
ingesterdb
import decimal
import logging
from jcudc24ingesterapi.models.dataset import Dataset
import os
import shutil
import dam
import time
from jcudc24ingesterapi.models.data_entry import FileObject, DataEntry
from dc24_ingester_platform.utils import parse_timestamp
from jcudc24ingesterapi.ingester_exceptions import UnknownObjectError, PersistenceError
from jcudc24ingesterapi.search import SearchResults
from jcudc24ingesterapi.models.metadata import DatasetMetadataEntry, DataEntryMetadataEntry
logger = logging.getLogger(__name__)
def obj_to_dict(obj, klass=None):
"""Maps an object of base class BaseManagementObject to a dict.
"""
ret = {}
for attr in dir(obj):
if attr.startswith("_") or attr == "metadata": continue
if type(getattr(obj, attr)) in (str, int, float, unicode, dict):
ret[attr] = getattr(obj, attr)
elif type(getattr(obj, attr)) == decimal.Decimal:
ret[attr] = float(getattr(obj, attr))
if klass != None: ret["class"] = klass
elif hasattr(obj, "__xmlrpc_class__"): ret["class"] = obj.__xmlrpc_class__
return ret
def dict_to_object(dic, obj):
for attr in dir(obj):
if attr.startswith("_"): continue
if dic.has_key(attr): setattr(obj, attr, dic[attr])
def merge_parameters(col_orig, col_new, klass, name_attr="name", value_attr="value"):
"""This method updates col_orig removing any that aren't in col_new, updating those that are, and adding new ones
using klass as the constructor
col_new is a dict
col_orig is a list
klass is a type
"""
working = col_new.copy()
to_del = []
for obj in col_orig:
if getattr(obj,name_attr) in working:
# Update
setattr(obj, value_attr, working[obj.name])
del working[obj.name]
else:
# Delete pending
to_del.append(obj)
# Delete
for obj in to_del:
col_orig.remove(obj)
# Add
for k in working:
obj = klass()
setattr(obj, name_attr, k)
setattr(obj, value_attr, working[k])
col_orig.append(obj)
class RepositoryDAM(BaseRepositoryService):
"""This service provides DAO operations for the ingester service.
All objects/DTOs passed in and out of this service are dicts. This service protects the storage layer.
"""
def __init__(self, url, resettable=False):
self._url = url
# A list of new obj ids that will be deleted on reset
self.new_objs = []
self.resettable = resettable
def connection(self):
return dam.DAM(self._url, version="1.2")
def reset(self):
if not self.resettable: return
logger.info("Deleting items from the DAM")
self.new_objs.reverse()
with self.connection() as repo:
repo.delete(self.new_objs)
self.new_objs = []
def mark_for_reset(self, oid):
if not self.resettable: return
if oid not in self.new_objs:
self.new_objs.append(oid)
@method("persist", "schema")
def persist_schema(self, schema):
try:
if hasattr(schema, "repository_id") and schema.repository_id != None:
logger.error("Can't update schemas")
return schema.repository_id
attrs = [{"name":attr.name, "identifier":attr.name, "type":attr.kind} for attr in schema.attributes]
parents = [p.repository_id for p in schema.extends]
for attr in attrs:
if attr["type"] in ("integer", "double"):
attr["type"] = "numerical"
dam_schema = {"dam_type":"Schema",
"type":"Dataset",
"name":schema.name if schema.name != None else "tdh_%d"%schema.id,
"identifier":"tdh_%d"%schema.id,
"attributes":attrs,
"extends": parents}
with self.connection() as repo:
dam_schema = repo.ingest(dam_schema)
self.mark_for_reset(dam_schema["id"])
return dam_schema["id"]
except dam.DuplicateEntityException as e:
logger.exception("Exception while persisting schema")
raise PersistenceError("Error persisting schema: DuplicateEntityException %s"%(str(e)))
except dam.DAMException as e:
logger.exception("Exception while persisting schema")
raise PersistenceError("Error persisting schema: %s"%(str(e)))
@method("persist", "location")
def persist_location(self, location):
try:
dam_location = {"dam_type":"Location",
"name":location.name,
"latitude":location.latitude,
"longitude":location.longitude,
"zones":[]}
if hasattr(location, "repository_id") and location.repository_id != None:
dam_location["id"] = location.repository_id
with self.connection() as repo:
dam_location = repo.ingest(dam_location)
self.mark_for_reset(dam_location["id"])
return dam_location["id"]
except dam.DuplicateEntityException as e:
logger.exception("Exception while persisting location")
raise PersistenceError("Error persisting location: DuplicateEntityException %s"%(str(e)))
except dam.DAMException as e:
logger.exception("Exception while persisting location")
raise PersistenceError("Error persisting location: %s"%(str(e)))
@method("persist", "dataset")
def persist_dataset(self, dataset, schema, location):
try:
dam_dataset = {"dam_type":"Dataset",
"location":location.repository_id,
"zone":"",
"schema":schema.repository_id}
if hasattr(dataset, "repository_id") and dataset.repository_id != None:
dam_dataset["id"] = dataset.repository_id
with self.connection() as repo:
dam_dataset = repo.ingest(dam_dataset)
self.mark_for_reset(dam_dataset["id"])
return dam_dataset["id"]
except dam.DuplicateEntityException as e:
logger.exception("Exception while persisting dataset")
raise PersistenceError("Error persisting dataset: DuplicateEntityException %s"%(str(e)))
except dam.DAMException as e:
logger.exception("Exception while persisting dataset")
raise PersistenceError("Error persisting dataset: %s"%(str(e)))
def _persist_attributes(self, obs, attributes, cwd):
with self.connection() as repo:
for attr_name in attributes:
attr = {"name":attr_name} # DAM Attribute
if isinstance(attributes[attr_name], FileObject):
attr["originalFileName"] = attributes[attr_name].file_name
with open(os.path.join(cwd, attributes[attr_name].f_path), "rb") as f:
repo.ingest_attribute(obs["id"], attr, f)
else:
attr["value"] = attributes[attr_name]
repo.ingest_attribute(obs["id"], attr)
def persist_data_entry(self, dataset, schema, data_entry, cwd):
# Check the attributes are actually in the schema
self.validate_schema(data_entry.data, schema.attrs)
try:
with self.connection() as repo:
dam_obs = {"dam_type":"Observation",
"dataset":dataset.repository_id,
"time":dam.format_time(data_entry.timestamp)}
dam_obs = repo.ingest(dam_obs, lock=True)
self._persist_attributes(dam_obs, data_entry.data, cwd)
repo.unlock(dam_obs["id"])
self.mark_for_reset(dam_obs["id"])
return self.get_data_entry(dataset.id, dam_obs["id"])
except dam.DuplicateEntityException as e:
logger.exception("Exception while persisting data entry")
raise PersistenceError("Error persisting data entry: DuplicateEntityException %s"%(str(e)))
except dam.DAMException as e:
logger.exception("Exception while persisting data entry")
raise PersistenceError("Error persisting data entry: %s"%(str(e)))
def get_data_entry(self, dataset_id, data_entry_id):
try:
with self.connection() as repo:
dam_obj = repo.getTuples(data_entry_id)
except dam.DAMException as e:
logger.exception("Exception while getting data entry")
raise PersistenceError("Error getting data entry: %s"%(str(e)))
if dam_obj == None and len(dam_obj) == 1: return None
dam_obj = dam_obj[0]
data_entry = DataEntry()
data_entry.id = data_entry_id
data_entry.dataset = dataset_id
data_entry.timestamp = parse_timestamp(dam_obj["metadata"]["time"])
self._copy_attrs(dam_obj["data"], data_entry)
return data_entry
def _copy_attrs(self, attrs, dst):
for attr in attrs:
if "size" in attr:
fo = FileObject()
fo.f_name = attr["name"]
fo.mime_type = attr["mimeType"]
fo.file_name = attr["originalFileName"]
dst.data[attr["name"]] = fo
else:
dst.data[attr["name"]] = attr["value"]
def get_data_entry_stream(self, dataset_id, data_entry_id, attr):
repo = self.connection()
return repo.retrieve_attribute(data_entry_id, attr, close_connection=True)
def find_data_entries(self, dataset, offset, limit, start_time=None, end_time=None):
try:
with self.connection() as repo:
start_time = dam.format_time(start_time) if start_time is not None else None
end_time = dam.format_time(end_time) if end_time is not None else None
dam_objs = repo.retrieve_tuples("data", dataset=dataset.repository_id,
offset=offset, limit=limit, startTime=start_time, endTime=end_time)
except dam.DAMException as e:
logger.exception("Exception while getting data entries")
raise PersistenceError("Error getting data entries: %s"%(str(e)))
ret = []
for dam_obj in dam_objs["results"]:
data_entry = DataEntry()
data_entry.id = dam_obj["metadata"]["id"]
data_entry.dataset = dataset.id
data_entry.timestamp = parse_timestamp(dam_obj["metadata"]["time"])
self._copy_attrs(dam_obj["data"], data_entry)
ret.append(data_entry)
return SearchResults(ret, dam_objs["offset"], dam_objs["limit"], dam_objs["count"])
def find_dataset_metadata(self, dataset, offset, limit):
return self._find_object_metadata(dataset, offset, limit, DatasetMetadataEntry, self.service.get_dataset)
def find_data_entry_metadata(self, data_entry, offset, limit):
return self._find_object_metadata(data_entry, offset, limit, DataEntryMetadataEntry)
def _find_object_metadata(self, obj, offset, limit, factory, lookup=None):
try:
with self.connection() as repo:
dam_objs = repo.retrieve_tuples("object_metadata", subject=obj.id,
offset=offset, limit=limit)
except dam.DAMException as e:
logger.exception("Exception while getting data entries")
raise PersistenceError("Error getting data entries: %s"%(str(e)))
ret = []
for dam_obj in dam_objs["results"]:
subject_id = dam_obj["metadata"]["subject"] if lookup == None else lookup(dam_obj["metadata"]["subject"])
schema_id = self.service.find_schemas(repository_id = dam_obj["metadata"]["schema"])[0].id
md = factory(object_id=subject_id, metadata_schema_id=schema_id, id=dam_obj["metadata"]["id"])
self._copy_attrs(dam_obj["data"], md)
ret.append(md)
return SearchResults(ret, dam_objs["offset"], dam_objs["limit"], dam_objs["count"])
def persist_data_entry_metadata(self, dataset, schema, attrs, cwd):
return self._persist_metadata(dataset, schema, attrs, cwd)
def persist_dataset_metadata(self, dataset, schema, attrs, cwd):
return self._persist_metadata(dataset, schema, attrs, cwd)
def _persist_metadata(self, dataset, schema, attrs, cwd):
"""Persist object metadata returning the ID of the new object"""
# Check the attributes are actually in the schema
self.validate_schema(attrs, schema.attrs)
try:
with self.connection() as repo:
obj_md = {"dam_type":"ObjectMetaData",
"schema":schema.repository_id}
if isinstance(dataset, Dataset):
obj_md["subject"] = dataset.repository_id
elif isinstance(dataset, DataEntry):
obj_md['subject'] = dataset.id
obj_md = repo.ingest(obj_md, lock=True)
self._persist_attributes(obj_md, attrs, cwd)
repo.unlock(obj_md["id"])
self.mark_for_reset(obj_md["id"])
except dam.DuplicateEntityException as e:
logger.exception("Exception while persisting object metadata")
raise PersistenceError("Error persisting object metadata: DuplicateEntityException %s"%(str(e)))
except dam.DAMException as e:
logger.exception("Exception while persisting object metadata")
raise PersistenceError("Error persisting object metadata: %s"%(str(e)))
return self._retrieve_object_metadata(obj_md["id"])
def _retrieve_object_metadata(self, md_id):
try:
with self.connection() as repo:
obj_md = repo.getTuples(md_id)[0]
subject = repo.get(obj_md["metadata"]["subject"])
schemas = self.service.find_schemas(repository_id = obj_md["metadata"]["schema"])
if len(schemas) != 1:
for schema in schemas:
logger.error("Schema to repo mapping %d -> %s"%(schema.id, schema.repository_id))
raise UnknownObjectError("Expected 1 schema for repository ID %s, got %d"%(obj_md["metadata"]["schema"], len(schemas)))
schema = schemas[0]
if subject["dam_type"] == "Observation":
entry = DataEntryMetadataEntry(object_id=obj_md["metadata"]["subject"], metadata_schema_id=schema.id, id=md_id)
elif subject["dam_type"] == "Dataset":
entry = DatasetMetadataEntry(object_id=obj_md["metadata"]["subject"], metadata_schema_id=schema.id, id=md_id)
else:
raise UnknownObjectError("Not a valid metadata object to retrieve")
self._copy_attrs(obj_md["data"], entry)
return entry
except dam.DAMException as e:
logger.exception("Exception while getting object metadata")
raise PersistenceError("Error getting object metadata: %s"%(str(e)))
```
#### File: dc24_ingester_platform/service/repodb.py
```python
from dc24_ingester_platform.utils import format_timestamp, parse_timestamp
from dc24_ingester_platform.service import BaseRepositoryService
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DECIMAL, Boolean, ForeignKey, DateTime
import sqlalchemy.orm as orm
from sqlalchemy import create_engine
from sqlalchemy.orm.exc import NoResultFound
import decimal
import logging
import os
import shutil
from jcudc24ingesterapi.schemas.data_types import FileDataType
from jcudc24ingesterapi.models.data_entry import DataEntry, FileObject
from jcudc24ingesterapi.models.metadata import DatasetMetadataEntry, DataEntryMetadataEntry
from jcudc24ingesterapi.schemas import ConcreteSchema
from jcudc24ingesterapi.search import SearchResults
logger = logging.getLogger(__name__)
Base = declarative_base()
def obj_to_dict(obj, klass=None):
"""Maps an object of base class BaseManagementObject to a dict.
"""
ret = {}
for attr in dir(obj):
if attr.startswith("_") or attr == "metadata": continue
if type(getattr(obj, attr)) in (str, int, float, unicode, dict):
ret[attr] = getattr(obj, attr)
elif type(getattr(obj, attr)) == decimal.Decimal:
ret[attr] = float(getattr(obj, attr))
if klass != None: ret["class"] = klass
elif hasattr(obj, "__xmlrpc_class__"): ret["class"] = obj.__xmlrpc_class__
return ret
def dict_to_object(dic, obj):
for attr in dir(obj):
if attr.startswith("_"): continue
if dic.has_key(attr): setattr(obj, attr, dic[attr])
class DatasetMetadata(Base):
__tablename__ = "DATASET_METADATA"
id = Column(Integer, primary_key=True)
dataset = Column(Integer)
schema = Column(Integer)
attrs = orm.relationship("DatasetMetadataAttr")
class DatasetMetadataAttr(Base):
__tablename__ = "DATASET_METADATA_ATTRS"
id = Column(Integer, primary_key=True)
metadata_entry = Column(Integer, ForeignKey('DATASET_METADATA.id'))
name = Column(String(255))
value = Column(String(255))
class Observation(Base):
__tablename__ = "OBSERVATIONS"
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime)
dataset = Column(Integer)
attrs = orm.relationship("ObservationAttr")
class ObservationAttr(Base):
__tablename__ = "OBSERVATION_ATTRS"
id = Column(Integer, primary_key=True)
observation = Column(Integer, ForeignKey('OBSERVATIONS.id'))
name = Column(String(255))
value = Column(String(255))
class DataEntryMetadata(Base):
__tablename__ = "DATA_ENTRY_METADATA"
id = Column(Integer, primary_key=True)
data_entry = Column(Integer, ForeignKey('OBSERVATIONS.id'))
schema = Column(Integer)
attrs = orm.relationship("DataEntryMetadataAttr")
class DataEntryMetadataAttr(Base):
__tablename__ = "DATA_ENTRY_METADATA_ATTRS"
id = Column(Integer, primary_key=True)
metadata_entry = Column(Integer, ForeignKey('DATA_ENTRY_METADATA.id'))
name = Column(String(255))
value = Column(String(255))
def merge_parameters(col_orig, col_new, klass, name_attr="name", value_attr="value"):
"""This method updates col_orig removing any that aren't in col_new, updating those that are, and adding new ones
using klass as the constructor
col_new is a dict
col_orig is a list
klass is a type
"""
working = col_new.copy()
to_del = []
for obj in col_orig:
if getattr(obj,name_attr) in working:
# Update
setattr(obj, value_attr, working[obj.name].f_path if isinstance(working[obj.name], FileObject) else working[obj.name])
del working[obj.name]
else:
# Delete pending
to_del.append(obj)
# Delete
for obj in to_del:
col_orig.remove(obj)
# Add
for k in working:
obj = klass()
setattr(obj, name_attr, k)
setattr(obj, value_attr, working[obj.name].f_path if isinstance(working[obj.name], FileObject) else working[obj.name])
col_orig.append(obj)
class RepositoryDB(BaseRepositoryService):
"""This service provides DAO operations for the ingester service.
All objects/DTOs passed in and out of this service are dicts. This service protects the storage layer.
"""
def __init__(self, config):
self.engine = create_engine(config["db"])
self.repo = config["files"]
if not os.path.exists(self.repo):
os.makedirs(self.repo)
Observation.metadata.create_all(self.engine, checkfirst=True)
def reset(self):
Observation.metadata.drop_all(self.engine, checkfirst=True)
Observation.metadata.create_all(self.engine, checkfirst=True)
def copy_files(self, attrs, schema, cwd, obj, obj_type):
"""Copy file attributes into place and update the File Objects
to point to the destination path."""
obj_path = os.path.join(self.repo, obj_type)
if not os.path.exists(obj_path): os.makedirs(obj_path)
for k in attrs:
if isinstance(schema[k], FileDataType):
dest_file_name = os.path.join(obj_path, "%d-%s"%(obj.id, k))
shutil.copyfile(os.path.join(cwd, attrs[k].f_path), dest_file_name)
attrs[k].f_path = dest_file_name
def find_data_entries(self, dataset, offset, limit, start_time=None, end_time=None):
"""Find all observations within this dataset that match the given criteria"""
s = orm.sessionmaker(bind=self.engine)()
try:
dataset = self.service.get_dataset(dataset.id)
schema = ConcreteSchema(self.service.get_schema_tree(dataset.schema))
objs = s.query(Observation).filter(Observation.dataset == dataset.id)
if start_time != None:
objs = objs.filter(Observation.timestamp >= start_time)
if end_time != None:
objs = objs.filter(Observation.timestamp <= end_time)
count = objs.count()
objs = objs.limit(limit).offset(offset)
return SearchResults([self._create_data_entry(obs, schema) for obs in objs.all()], offset, limit, count)
finally:
s.close()
def find_dataset_metadata(self, dataset, offset, limit):
s = orm.sessionmaker(bind=self.engine)()
try:
objs = s.query(DatasetMetadata).filter(DatasetMetadata.dataset == dataset.id)
count = objs.count()
return SearchResults([self._create_dataset_metadata(s, obj) for obj in objs.offset(offset).limit(limit).all()], \
offset, limit, count)
finally:
s.close()
def find_data_entry_metadata(self, data_entry, offset, limit):
s = orm.sessionmaker(bind=self.engine)()
try:
objs = s.query(DataEntryMetadata).filter(DataEntryMetadata.data_entry == data_entry.id)
count = objs.count()
return SearchResults([self._create_data_entry_metadata(obj) for obj in objs.offset(offset).limit(limit).all()], \
offset, limit, count)
finally:
s.close()
def _create_dataset_metadata(self, session, obj):
"""Internal method for creating the DataEntry domain object from a database
observation
"""
schema = ConcreteSchema(self.service.get_schema_tree(obj.schema))
entry = DatasetMetadataEntry()
entry.metadata_schema = obj.schema
entry.id = obj.id
entry.object_id = obj.dataset
for attr in obj.attrs:
if isinstance(schema.attrs[attr.name], FileDataType):
entry[attr.name] = FileObject(f_path=attr.value)
else:
entry[attr.name] = attr.value
return entry
def _create_data_entry_metadata(self, session, obj):
"""Internal method for creating the DataEntry domain object from a database
observation
"""
schema = ConcreteSchema(self.service.get_schema_tree(obj.schema))
entry = DataEntryMetadataEntry()
entry.metadata_schema = obj.schema
entry.id = obj.id
entry.object_id = obj.data_entry
for attr in obj.attrs:
if isinstance(schema.attrs[attr.name], FileDataType):
entry[attr.name] = FileObject(f_path=attr.value)
else:
entry[attr.name] = attr.value
return entry
def persist_data_entry(self, dataset, schema, data_entry, cwd):
# Check the attributes are actually in the schema
self.validate_schema(data_entry.data, schema.attrs)
session = orm.sessionmaker(bind=self.engine)()
try:
obs = Observation()
obs.timestamp = data_entry.timestamp
obs.dataset = dataset.id
session.add(obs)
session.flush()
# Copy all files into place
self.copy_files(data_entry.data, schema.attrs, cwd, obs, "data_entry")
merge_parameters(obs.attrs, data_entry.data, ObservationAttr)
session.merge(obs)
session.flush()
session.commit()
return self._get_data_entry(obs.dataset, obs.id, session)
finally:
session.close()
def get_data_entry(self, dataset_id, data_entry_id):
session = orm.sessionmaker(bind=self.engine)()
try:
return self._get_data_entry(dataset_id, data_entry_id, session)
except NoResultFound, e:
return None
finally:
session.close()
def get_data_entry_stream(self, dataset_id, data_entry_id, attr):
"""Get a file stream for the data entry"""
data_entry = self.get_data_entry(dataset_id, data_entry_id)
if data_entry == None: return None
return open(data_entry[attr].f_path, "rb")
def _get_data_entry(self, dataset_id, data_entry_id, session):
obs = session.query(Observation).filter(Observation.dataset == dataset_id,
Observation.id == data_entry_id).one()
dataset = self.service.get_dataset(obs.dataset)
schema = ConcreteSchema(self.service.get_schema_tree(dataset.schema))
return self._create_data_entry(obs, schema)
def _create_data_entry(self, obs, schema):
"""Internal method for creating the DataEntry domain object from a database
observation
"""
entry = DataEntry()
entry.dataset = obs.dataset
entry.id = obs.id
entry.timestamp = obs.timestamp
for attr in obs.attrs:
if isinstance(schema.attrs[attr.name], FileDataType):
entry[attr.name] = FileObject(f_path=attr.value)
else:
entry[attr.name] = attr.value
return entry
def persist_dataset_metadata(self, dataset, schema, attrs, cwd):
# Check the attributes are actually in the schema
self.validate_schema(attrs, schema.attrs)
s = orm.sessionmaker(bind=self.engine)()
try:
md = DatasetMetadata()
md.dataset = dataset.id
md.schema = schema.id
s.add(md)
s.flush()
# Copy all files into place
self.copy_files(attrs, schema.attrs, cwd, md, "dataset_metadata")
merge_parameters(md.attrs, attrs, DatasetMetadataAttr)
s.merge(md)
s.flush()
s.commit()
entry = DatasetMetadataEntry(object_id=md.dataset, metadata_schema_id=md.schema, id=md.id)
for attr in md.attrs:
if isinstance(schema.attrs[attr.name], FileDataType):
entry[attr.name] = FileObject(f_path=attr.value)
else:
entry[attr.name] = attr.value
return entry
finally:
s.close()
def persist_data_entry_metadata(self, data_entry, schema, attrs, cwd):
# Check the attributes are actually in the schema
self.validate_schema(attrs, schema.attrs)
s = orm.sessionmaker(bind=self.engine)()
try:
md = DataEntryMetadata()
md.data_entry = data_entry.id
md.schema = schema.id
s.add(md)
s.flush()
# Copy all files into place
self.copy_files(attrs, schema.attrs, cwd, md, "data_entry_metadata")
merge_parameters(md.attrs, attrs, DataEntryMetadataAttr)
s.merge(md)
s.flush()
s.commit()
entry = DataEntryMetadataEntry(object_id=md.data_entry, metadata_schema_id=md.schema, id=md.id)
for attr in md.attrs:
if isinstance(schema.attrs[attr.name], FileDataType):
entry[attr.name] = FileObject(f_path=attr.value)
else:
entry[attr.name] = attr.value
return entry
finally:
s.close()
```
|
{
"source": "jcu-eresearch/TDH-rich-data-capture",
"score": 2
}
|
#### File: TDH-rich-data-capture/jcudc24provisioning/__init__.py
```python
from paste.deploy.converters import asint
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import NO_PERMISSION_REQUIRED
from controllers.sftp_filesend import SFTPFileSend
from jcudc24provisioning.controllers.authentication import RootFactory, DefaultPermissions
from jcudc24provisioning.controllers.authentication import ShibbolethAuthenticationPolicy, get_user
from jcudc24provisioning.scripts.create_redbox_config import create_json_config
from pyramid_mailer.mailer import Mailer
import os
global global_settings
import logging
logger = logging.getLogger(__name__)
# This line is only required for activiting the virtualenv within the IntelliJ IDE
try:
# execfile("D:/Repositories/JCU-DC24/venv/Scripts/activate_this.py", dict(__file__="D:/Repositories/JCU-DC24/venv/Scripts/activate_this.py"))
execfile("../../venv/Scripts/activate_this.py", dict(__file__="../../venv/Scripts/activate_this.py"))
except Exception as e:
logger.exception("Virtual env activation failed, please update the activate_this.py address in the base __init__ if developing on a windows machine.")
import jcudc24provisioning
from deform.form import Form
from pyramid.config import Configurator
from pkg_resources import resource_filename
from pyramid_beaker import session_factory_from_settings, set_cache_regions_from_settings
from pkg_resources import declare_namespace
import sys
import scripts.initializedb
declare_namespace('jcudc24provisioning')
__author__ = '<NAME>'
def set_global_settings(settings):
"""
Responsible for validating settings, and if there are no error,
setting the global settings variable
"""
errors = []
warns = []
# Data portal
if "dataportal.home_url" not in settings:
warns.append("dataportal.home_url not set - linkages to the data portal home page will be disabled")
if "dataportal.dataset_url" not in settings:
warns.append("dataportal.dataset_url not set - linkages to the data portal datasets will be disabled")
elif "{0}" not in settings["dataportal.dataset_url"]:
warns.append("dataportal.dataset_url doesn't take a dataset id parameter - linkages to the data portal datasets will be disabled")
del settings["dataportal.dataset_url"]
#
for warn in warns:
logger.warn(warn)
if len(errors) > 0:
for error in errors:
logger.error(error)
raise Exception("There were configuration errors, see logging")
jcudc24provisioning.global_settings = settings
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
set_global_settings(settings)
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN)
logging.captureWarnings(True)
# execfile("D:/Repositories/JCU-DC24/venv/Scripts/activate_this.py", dict(__file__="D:/Repositories/JCU-DC24/venv/Scripts/activate_this.py"))
#def main():
scripts.initializedb.initialise_all_db(settings)
deform_templates = resource_filename('deform', 'templates')
search_path = (resource_filename('jcudc24provisioning', 'templates/widgets'),resource_filename('jcudc24provisioning', 'templates/widgets/readonly'), resource_filename('jcudc24provisioning', 'templates/custom_widgets'), resource_filename('jcudc24provisioning', 'templates/custom_widgets/readonly'), deform_templates)
Form.set_zpt_renderer(search_path, encoding="latin-1")
set_cache_regions_from_settings(settings)
my_session_factory = session_factory_from_settings(settings)
config = Configurator(settings=settings, session_factory = my_session_factory, root_factory=RootFactory)
# ---------------Project/Workflow pages------------------------
config.add_route('create', '/project/create') # Project creation wizard - templates, pre-fill etc.
config.add_route('general', '/project/{project_id}/general') # Project creation wizard - templates, pre-fill etc.
config.add_route('description', '/project/{project_id}/description') # descriptions
config.add_route('information', '/project/{project_id}/information') # metadata or associated information
config.add_route('methods', '/project/{project_id}/methods') # Data collection methods
config.add_route('datasets', '/project/{project_id}/datasets') # Datasets or collections of data
config.add_route('submit', '/project/{project_id}/submit') # Submit, review and approval
# Project action pages
config.add_route('dataset_log', '/project/{project_id}/logs/dataset_{dataset_id}_logs.txt')
config.add_route('logs', '/project/{project_id}/logs')
config.add_route('dataset_calibration', '/project/{project_id}/dataset/{dataset_id}/calibration/*calibration_id')
config.add_route('dataset', '/project/{project_id}/dataset/*dataset_id') # Datasets or collections of data
config.add_route('ingester_dataset', '/project/{project_id}/dataset/{dataset_id}') # View current settings for exported dataset.
config.add_route('data_calibration', '/project/{project_id}/datasets/{dataset_id}/data/{id_list}/calibration/*calibration_id')
config.add_route('data', '/project/{project_id}/datasets/{dataset_id}/data/*data_id')
config.add_route('permissions', '/project/{project_id}/permissions')
config.add_route('notifications', '/project/{project_id}/notifications')
config.add_route('duplicate', '/project/{project_id}/duplicate')
config.add_route('create_template', '/project/{project_id}/create_template')
config.add_route('search', '/search*search_info')
config.add_route('dataset_record', '/project/{project_id}/datasets/{dataset_id}/record') # Datasets or collections of data
config.add_route('delete_record', '/project/{project_id}/datasets/{dataset_id}/delete_record') # Datasets or collections of data
# config.add_route('browse', '/browse')
# config.add_route('browse_projects', '/browse/projects/*search_info')
# config.add_route('browse', '/browse/datasets')
# config.add_route('browse', '/browse/data')
# config.add_route('browse', '/browse/data/calibrations')
# Exception handler
config.add_route('workflow_exception', '/project/{route:.*}')
# --------------JSON Search views--------------------------------
config.add_route('get_model', '/get_model/{object_type}/{id}', xhr=True)
config.add_route('get_ingester_logs', '/get_ingester_logs/{dam_id}/{filtering:.*}', xhr=True)
config.add_route('add_method_from_template', '/add/{project_id}/{method_id}', xhr=True)
config.add_route('get_activities', '/mint/activities/{search_terms}', xhr=True)
config.add_route('get_parties', '/mint/parties/{search_terms}', xhr=True)
config.add_route('get_from_identifier', '/mint/{identifier:.*}', xhr=True)
config.add_route('dashboard', '/') # Home page/user dashboard
config.add_route('user', '/user')
config.add_route('login', '/login') # Login page
config.add_route('login_shibboleth', '/login/shibboleth') # Login page
config.add_route('logout', '/logout') # logout and redirect page
config.add_route('admin', '/admin') # administer user permissions + view admin required items
config.add_route('help', '/help') # administer user permissions + view admin required items
config.add_route('lock_page', '/lock_page/{user_id}/*url')
config.add_route('unlock_page', '/unlock_page/{lock_id}')
config.add_route('record_data', '/{metadata_id}')
config.add_route('record_data_portal', '/{metadata_id}/data') # Redirect to the data portal
# --------------Static resources--------------------------------
config.add_static_view('deform_static', 'deform:static', cache_max_age=0)
config.add_static_view('static', 'jcudc24provisioning:static')
config.add_static_view('project_uploads', "jcudc24provisioning:project_uploads", permission=DefaultPermissions.VIEW_PROJECT)
authn_policy = ShibbolethAuthenticationPolicy(settings)
authz_policy = ACLAuthorizationPolicy()
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
config.set_default_permission(NO_PERMISSION_REQUIRED)
config.add_request_method(get_user, 'user', reify=True)
config.scan()
json_config = create_json_config()
f = open(settings.get("redbox.local_config_file"), 'w')
f.write(json_config)
# Upload the json cofiguration to ReDBox
# hostname = global_settings.get("redbox.ssh_address") # remote hostname where SSH server is running
# port = asint(global_settings.get("redbox.ssh_port"))
# rsa_private_key = global_settings.get("redbox.rsa_private_key")
# username = global_settings.get("redbox.ssh_username")
# password = global_settings.get("redbox.ssh_password")
# file_send = SFTPFileSend(hostname, port, username, password=password, rsa_private_key=rsa_private_key)
# file_send.upload_file(settings.get("redbox.local_config_file"), settings.get("redbox.ssh_config_file"))
# file_send.close()
# try:
# InitialiseDatabase()
# except Exception as e:
# logger.exception("Error initialising database: %s", e)
# sys.exit()
# Create the temporary folders if they don't already exist
if not os.path.exists(settings.get("tmpdir")):
os.mkdir(settings.get("tmpdir"))
if not os.path.exists(settings.get("pyramid_deform.tempdir")):
os.mkdir(settings.get("pyramid_deform.tempdir"))
if not os.path.exists(settings.get("mint.tmpdir")):
os.mkdir(settings.get("mint.tmpdir"))
if not os.path.exists(settings.get("redbox.tmpdir")):
os.mkdir(settings.get("redbox.tmpdir"))
if not os.path.exists(settings.get("workflows.files")):
os.mkdir(settings.get("workflows.files"))
# if not os.path.exists(settings.get("mail.queue_path")):
# os.mkdir(settings.get("mail.queue_path"))
return config.make_wsgi_app()
#if __name__ == '__main__':
# app = main("serve ../development.ini")
# server = make_server('0.0.0.0', 8080, app)
# server.serve_forever()
```
#### File: TDH-rich-data-capture/jcudc24provisioning/resources.py
```python
from fanstatic import Library
from fanstatic import Resource
from fanstatic import Group
from js import jqueryui
from js.jquery_form import jquery_form
from js.jquery import jquery
import js.deform
def open_layers_lookup(url):
return """<script type="text/javascript" src="http://maps.google.com/maps/api/js?v=3&sensor=false"></script>"""
library = Library('jcudc24provisioning', 'static')
#Third-party resources
open_layers = Resource(
library,
'libraries/openlayers/OpenLayers.js',
bottom=False,
)
enmasse_widgets = Resource(
library,
'scripts/widgets.js',
bottom=False,
depends=(jquery, open_layers, js.deform.deform_js)
)
# Fix the removal of browser for newer versions of jquery (datepicker relies on it).
jquery_mb_browser = Resource(
library,
'libraries/jquery.mb.browser-master/jquery.mb.browser.js',
bottom=False,
minified="libraries/jquery.mb.browser-master/jquery.mb.browser.min.js",
depends=(jquery, open_layers, js.deform.deform_js)
)
regex_mask = Resource(
library,
'libraries/jquery-regex-mask-plugin-master/regex-mask-plugin.js',
bottom=False,
depends=()
)
deform_css = Resource(
library,
'css/deform.css',
depends=(js.deform.deform_css,)
)
project_css = Resource(
library,
'css/project.css',
)
website_css = Resource(
library,
'css/website.css',
)
template_css = Resource(
library,
'css/template.css',
)
#Local resources
open_layers_js = Resource(library, 'libraries/open_layers.js', renderer=open_layers_lookup)
enmasse_css = Group([
deform_css,
project_css,
website_css,
template_css,
])
enmasse_requirements = Group([
enmasse_css,
jquery_mb_browser,
jquery,
jqueryui.jqueryui,
jqueryui.ui_datepicker,
jqueryui.ui_datepicker_en_AU,
jqueryui.base,
jqueryui.ui_lightness,
js.deform.deform,
jquery_form,
enmasse_widgets
])
enmasse_forms = Group([
enmasse_requirements,
regex_mask,
])
```
#### File: jcudc24provisioning/scripts/initializedb.py
```python
from jcudc24ingesterapi.schemas.metadata_schemas import DataEntryMetadataSchema, DatasetMetadataSchema
from jcudc24ingesterapi.schemas.data_entry_schemas import DataEntrySchema
import os
import os
import sys
import transaction
import random
from jcudc24provisioning.controllers.authentication import DefaultPermissions, DefaultRoles
from jcudc24provisioning.models import DBSession, Base
from jcudc24provisioning.models.project import Location, ProjectTemplate, Project, Dataset, MethodSchema, MethodSchemaField, Project, MethodTemplate, Method, PullDataSource, DatasetDataSource
from jcudc24ingesterapi.schemas.data_types import Double
from jcudc24provisioning.models import website
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from jcudc24provisioning.models.website import User, Role, Permission
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
initialise_all_db(settings)
def initialise_all_db(settings):
"""
Initialise the database connection, create all tables if they don't exist then initialise default data if it hasn't
already been.
:param settings:
:return:
"""
# Initialise the database connection.
engine = engine_from_config(settings, 'sqlalchemy.', pool_recycle=3600)
DBSession.configure(bind=engine)
# Test if the database has already been initialised with default data (is this the first time its run?)
initialised = engine.dialect.has_table(engine.connect(), "project")
# Create all database tables if they don't exist (on first run)
Base.metadata.create_all(engine)
# If this is the first run, initialise all default database data.
if not initialised:
with transaction.manager:
session = DBSession()
initialise_default_schemas(session)
initialise_project_templates(session)
initialise_method_templates(session)
initialise_security(session)
transaction.commit()
def initialise_security(session):
"""
Initialise the default permissions, roles and users.
:param session: Database session to add new data to.
:return: None
"""
# Loop through all permissions in the DefaultPermissions class and add them to the database
defaults = DefaultPermissions()
for name in dir(defaults):
if name.startswith("_"):
continue
name, description = getattr(defaults, name)
permission = Permission(name, description)
session.add(permission)
# Loop through all roles in the DefaultPermissions class and add them to the database
defaults = DefaultRoles()
for name in dir(defaults):
if name.startswith("_") or name == 'name':
continue
name, description, permissions = getattr(defaults, name)
permission_objects = session.query(Permission).filter(Permission.name.in_([permission_name for (permission_name, permission_description) in permissions])).all()
role = Role(name, description, permission_objects)
session.add(role)
# Add the default/testing users.
session.flush()
user = User("A User", "user", "user", "<EMAIL>")
session.add(user)
admin = User("Administrator", "admin", "admin", "<EMAIL>", roles=session.query(Role).filter(Role.name==DefaultRoles().ADMIN[0]).all())
session.add(admin)
def initialise_default_schemas(session):
"""
Initialise all default MethodSchema standardised fields (parent schemas).
:param session: Session to add the created schemas to the database with.
:return: None
"""
#-----------------Location Offset Schema----------------------
location_offsets_schema = MethodSchema()
location_offsets_schema.schema_type = DataEntrySchema.__xmlrpc_class__
location_offsets_schema.name = "XYZ Location Offsets"
location_offsets_schema.template_schema = True
x_offset_field = MethodSchemaField()
x_offset_field.type = Double.__xmlrpc_class__
x_offset_field.units = "meters"
x_offset_field.name = "X Offset"
x_offset_field.placeholder = "eg. 23.4"
x_offset_field.default = 0
location_offsets_schema.custom_fields.append(x_offset_field)
y_offset_field = MethodSchemaField()
y_offset_field.type = Double.__xmlrpc_class__
y_offset_field.units = "meters"
y_offset_field.name = "Z Offset"
y_offset_field.placeholder = "eg. 23.4"
y_offset_field.default = 0
location_offsets_schema.custom_fields.append(y_offset_field)
z_offset_field = MethodSchemaField()
z_offset_field.type = Double.__xmlrpc_class__
z_offset_field.units = "meters"
z_offset_field.name = "Z Offset"
z_offset_field.placeholder = "eg. 23.4"
z_offset_field.default = 0
location_offsets_schema.custom_fields.append(z_offset_field)
session.add(location_offsets_schema)
session.flush()
#----------Temperature schema--------------
temp_schema = MethodSchema()
temp_schema.name = "Temperature"
temp_schema.template_schema = True
temp_schema.schema_type = DataEntrySchema.__xmlrpc_class__
temp_field = MethodSchemaField()
temp_field.type = "decimal"
temp_field.units = "Celcius"
temp_field.name = "Temperature"
temp_schema.custom_fields.append(temp_field)
session.add(temp_schema)
#----------Humidity schema--------------
humidity_schema = MethodSchema()
humidity_schema.name = "Humidity"
humidity_schema.template_schema = True
humidity_schema.schema_type = DataEntrySchema.__xmlrpc_class__
humidity_field = MethodSchemaField()
humidity_field.type = "decimal"
humidity_field.units = "%"
humidity_field.name = "Humidity"
humidity_schema.custom_fields.append(humidity_field)
session.add(humidity_schema)
#----------Moisture schema--------------
moisture_schema = MethodSchema()
moisture_schema.name = "Moisture"
moisture_schema.template_schema = True
moisture_schema.schema_type = DataEntrySchema.__xmlrpc_class__
moisture_field = MethodSchemaField()
moisture_field.type = "decimal"
moisture_field.units = "%"
moisture_field.name = "Moisture"
moisture_schema.custom_fields.append(moisture_field)
session.add(moisture_schema)
#----------Altitude schema--------------
altitude_schema = MethodSchema()
altitude_schema.name = "Altitude"
altitude_schema.template_schema = True
altitude_schema.schema_type = DataEntrySchema.__xmlrpc_class__
altitude_field = MethodSchemaField()
altitude_field.type = "decimal"
altitude_field.units = "Meters above Mean Sea Level (MSL)"
altitude_field.name = "Altitude"
altitude_schema.custom_fields.append(altitude_field)
session.add(altitude_schema)
#----------Distance schema--------------
distance_schema = MethodSchema()
distance_schema.name = "Distance"
distance_schema.template_schema = True
distance_schema.schema_type = DataEntrySchema.__xmlrpc_class__
distance_field = MethodSchemaField()
distance_field.type = "decimal"
distance_field.units = "Meters"
distance_field.name = "Distance"
distance_schema.custom_fields.append(distance_field)
session.add(distance_schema)
#----------Light Intensity schema--------------
luminosity_schema = MethodSchema()
luminosity_schema.name = "Luminosity"
luminosity_schema.template_schema = True
luminosity_schema.schema_type = DataEntrySchema.__xmlrpc_class__
luminosity_field = MethodSchemaField()
luminosity_field.type = "decimal"
luminosity_field.units = "candela (cd)"
luminosity_field.name = "Luminosity"
luminosity_schema.custom_fields.append(luminosity_field)
session.add(luminosity_schema)
#----------Weight schema--------------
weight_schema = MethodSchema()
weight_schema.name = "Weight"
weight_schema.template_schema = True
weight_schema.schema_type = DataEntrySchema.__xmlrpc_class__
weight_field = MethodSchemaField()
weight_field.type = "decimal"
weight_field.units = "kg"
weight_field.name = "Weight"
weight_schema.custom_fields.append(weight_field)
session.add(weight_schema)
#----------Density schema--------------
density_schema = MethodSchema()
density_schema.name = "Density"
density_schema.template_schema = True
density_schema.schema_type = DataEntrySchema.__xmlrpc_class__
density_field = MethodSchemaField()
density_field.type = "decimal"
density_field.units = "kg/m^3"
density_field.name = "Density"
density_schema.custom_fields.append(density_field)
session.add(density_schema)
#------------Data Quality Assurance Schema----------
data_quality = MethodSchema()
data_quality.name = "Data Quality"
data_quality.template_schema = False
data_quality.schema_type = DataEntryMetadataSchema.__xmlrpc_class__
quality_field = MethodSchemaField()
quality_field.type = "decimal"
quality_field.name = "Value"
data_quality.custom_fields.append(quality_field)
description_field = MethodSchemaField()
description_field.type = "text_area"
description_field.name = "Description"
data_quality.custom_fields.append(description_field)
session.add(data_quality)
#------------Dataset calibration/changes schema-------------------
dataset_calibration = MethodSchema()
dataset_calibration.name = "Dataset Calibration"
dataset_calibration.template_schema = False
dataset_calibration.schema_type = DatasetMetadataSchema.__xmlrpc_class__
date = MethodSchemaField()
date.type = "date"
date.name = "Date"
dataset_calibration.custom_fields.append(date)
# Textual representation of an array of changes.
changes = MethodSchemaField()
changes.type = "text_area"
changes.name = "Description"
dataset_calibration.custom_fields.append(changes)
session.add(dataset_calibration)
session.flush()
def initialise_project_templates(session):
"""
Initialise the default project templates, this could be updated to be organisation specific.
:param session: Database connection to add the created templates to.
:return: None
"""
blank_project = Project()
blank_project.template_only = True
session.add(blank_project) # Add an empty project as a blank template
session.flush()
blank_template = ProjectTemplate()
blank_template.template_id = blank_project.id
blank_template.category = "Blank (No auto-fill)"
blank_template.name = "Blank Template"
blank_template.description = "An empty template that allows you to start from scratch (only for advanced "\
"users or if no other template is relevent)."
session.add(blank_template) # Add an empty project as a blank template
# add blank templates for testing, delete when production ready
placeholder_template_names = [
"DRO",
"Australian Wet Tropics",
"TERN Supersite",
"The Wallace Initiative",
"Tropical Futures",
]
count = 0
for name in placeholder_template_names:
for i in range(random.randint(2, 5)):
template = ProjectTemplate()
template.template_id = blank_project.id
template.category = name
template.name = name + " Placeholder Template " + str(count) + " (Testing Only)"
count += 1
template.description = "An empty template that allows you to start from scratch (only for advanced "\
"users or if no other template is relevent)."
session.add(template) # Add an empty project as a blank template
def initialise_method_templates(session):
"""
Initialise the default method templates.
:param session: Database connection to add the created templates to
:return: None
"""
blank_template = session.query(MethodTemplate).filter_by(name="Blank Template").first()
if not blank_template:
blank_method = Method()
# blank_method.method_description = "Test description"
session.add(blank_method) # Add an empty project as a blank template
blank_dataset = Dataset()
blank_dataset.name = "Test Title"
session.add(blank_dataset) # Add an empty project as a blank template
session.flush()
blank_template = MethodTemplate()
blank_template.template_id = blank_method.id
blank_template.dataset_id = blank_dataset.id
blank_template.category = "Blank (No pre-fill)"
blank_template.name = "Blank Template"
blank_template.description = "An empty template that allows you to start from scratch (only for advanced "\
"users or if no other template is relevent)."
session.add(blank_template) # Add an empty project as a blank template
tree_template = session.query(MethodTemplate).filter_by(name="Artificial Tree").first()
if not tree_template:
tree_method = Method()
tree_method.method_name = "Artificial Sensor Tree"
tree_method.method_description = "Collection method for ingesting aggregated tree sensor data from an external file server."
tree_method.data_source = PullDataSource.__tablename__
tree_schema = MethodSchema()
tree_schema.name = "ArtificialTree"
tree_data_field = MethodSchemaField()
tree_data_field.type = "file"
tree_data_field.units = "text"
tree_data_field.name = "TreeData"
tree_data_field.description = "Aggregated data of all sensors for an artificial tree."
tree_schema.custom_fields = [tree_data_field]
tree_method.data_type = tree_schema
# blank_method.method_description = "Test description"
session.add(tree_method) # Add an empty project as a blank template
tree_dataset = Dataset()
tree_dataset.name = "Raw Artificial Tree Data"
tree_datasource = PullDataSource()
tree_datasource.uri = "http://emu.hpc.jcu.edu.au/tree/split/"
tree_datasource.filename_pattern = ""
tree_datasource.selected_sampling = PullDataSource.periodic_sampling.key
tree_datasource.periodic_sampling = "1"
tree_dataset.pull_data_source = tree_datasource
session.add(tree_dataset) # Add an empty project as a blank template
session.flush()
tree_template = MethodTemplate()
tree_template.template_id = tree_method.id
tree_template.dataset_id = tree_dataset.id
tree_template.category = "Artificial Tree"
tree_template.name = "Artificial Tree"
tree_template.description = "Template for setting up ingestion from an artificial tree."
session.add(tree_template) # Add an empty project as a blank template
sensor_template = session.query(MethodTemplate).filter_by(name="Artificial Tree Sensor").first()
if not sensor_template:
sensor_method = Method()
sensor_method.method_name = "Artificial Tree Sensor"
sensor_method.method_description = "Filter and index one sensor station from the aggregated artificial tree data."
sensor_method.data_source = DatasetDataSource.__tablename__
sensor_method.data_type = session.query(MethodSchema).filter_by(name="Temperature").first()
# blank_method.method_description = "Test description"
session.add(sensor_method) # Add an empty project as a blank template
sensor_dataset = Dataset()
sensor_dataset.name = "Artificial Tree Sensor"
sensor_datasource = DatasetDataSource()
sensor_datasource.custom_processing_parameters = "file_field=TreeData, temp_field=Temperature, sensor_id=28180E08030000BE"
sensor_dataset.dataset_data_source = sensor_datasource
session.add(sensor_dataset) # Add an empty project as a blank template
session.flush()
sensor_template = MethodTemplate()
sensor_template.template_id = sensor_method.id
sensor_template.dataset_id = sensor_dataset.id
sensor_template.category = "Artificial Tree"
sensor_template.name = "Artificial Tree Sensor"
sensor_template.description = "Templates setting up post-processing and indexing of one artificial tree sensor from the aggregated artificial tree data."
session.add(sensor_template) # Add an empty project as a blank template
placeholder_template_names = [
"DRO",
"Australian Wet Tropics",
"TERN Supersite",
"The Wallace Initiative",
"Tropical Futures",
]
templates = session.query(MethodTemplate).all()
print len(templates)
if len(templates) <= 1:
count = 0
for name in placeholder_template_names:
for i in range(random.randint(2, 5)):
template = MethodTemplate()
template.template_id = blank_template.id
template.dataset_id = blank_template.dataset_id
template.category = name
template.name = name + " Placeholder Template " + str(count) + " (Testing Only)"
count += 1
template.description = "An empty template that allows you to start from scratch (only for advanced "\
"users or if no other template is relevent)."
session.add(template) # Add an empty project as a blank template
```
|
{
"source": "jcu-eresearch/vecnet",
"score": 3
}
|
#### File: vecnet/script/filter-repository-dump.py
```python
import string
import sys
import argparse
class Stat:
def __init__(self, label, parent=None):
self.count = 0
self.size = 0
self.label = 0
self.parent = parent
def update(self, size):
self.count += 1
self.size += size
if self.parent:
self.parent.update(size)
everything = Stat('total')
months = {}
parser = argparse.ArgumentParser(description='Summarize ingest counts from gf dump file. Output written to stdout.')
parser.add_argument('infile',
type=argparse.FileType('r'),
nargs='?',
default=sys.stdin,
help='The input dump file. defaults to stdin.')
args = parser.parse_args()
with args.infile as f:
for line in f.readlines():
items = line.split(',')
if len(items) < 6:
continue
noid,cd,md,mime,size,user,label = items[:7]
if user in ['<EMAIL>','<EMAIL>','<EMAIL>']:
continue
try:
size = int(size)
except ValueError:
continue
month = cd[:7]
try:
months[month].update(size)
except:
months[month] = Stat(month,everything)
months[month].update(size)
m = months.keys()
m.sort()
print "month,ingest_count,ingest_bytes"
for mm in m:
print "%s,%d,%d" % (mm,months[mm].count,months[mm].size)
```
|
{
"source": "jcugat/httpx",
"score": 2
}
|
#### File: tests/client/test_headers.py
```python
import typing
import httpcore
import pytest
from httpx import AsyncClient, Headers, __version__
from httpx._content_streams import ContentStream, JSONStream
class MockTransport(httpcore.AsyncHTTPTransport):
async def request(
self,
method: bytes,
url: typing.Tuple[bytes, bytes, int, bytes],
headers: typing.List[typing.Tuple[bytes, bytes]],
stream: ContentStream,
timeout: typing.Dict[str, typing.Optional[float]] = None,
) -> typing.Tuple[
bytes, int, bytes, typing.List[typing.Tuple[bytes, bytes]], ContentStream
]:
headers_dict = {
key.decode("ascii"): value.decode("ascii") for key, value in headers
}
body = JSONStream({"headers": headers_dict})
return b"HTTP/1.1", 200, b"OK", [], body
@pytest.mark.asyncio
async def test_client_header():
"""
Set a header in the Client.
"""
url = "http://example.org/echo_headers"
headers = {"Example-Header": "example-value"}
client = AsyncClient(transport=MockTransport(), headers=headers)
response = await client.get(url)
assert response.status_code == 200
assert response.json() == {
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"connection": "keep-alive",
"example-header": "example-value",
"host": "example.org",
"user-agent": f"python-httpx/{__version__}",
}
}
@pytest.mark.asyncio
async def test_header_merge():
url = "http://example.org/echo_headers"
client_headers = {"User-Agent": "python-myclient/0.2.1"}
request_headers = {"X-Auth-Token": "FooBarBazToken"}
client = AsyncClient(transport=MockTransport(), headers=client_headers)
response = await client.get(url, headers=request_headers)
assert response.status_code == 200
assert response.json() == {
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"connection": "keep-alive",
"host": "example.org",
"user-agent": "python-myclient/0.2.1",
"x-auth-token": "<PASSWORD>",
}
}
@pytest.mark.asyncio
async def test_header_merge_conflicting_headers():
url = "http://example.org/echo_headers"
client_headers = {"X-Auth-Token": "FooBar"}
request_headers = {"X-Auth-Token": "BazToken"}
client = AsyncClient(transport=MockTransport(), headers=client_headers)
response = await client.get(url, headers=request_headers)
assert response.status_code == 200
assert response.json() == {
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"connection": "keep-alive",
"host": "example.org",
"user-agent": f"python-httpx/{__version__}",
"x-auth-token": "BazToken",
}
}
@pytest.mark.asyncio
async def test_header_update():
url = "http://example.org/echo_headers"
client = AsyncClient(transport=MockTransport())
first_response = await client.get(url)
client.headers.update(
{"User-Agent": "python-myclient/0.2.1", "Another-Header": "AThing"}
)
second_response = await client.get(url)
assert first_response.status_code == 200
assert first_response.json() == {
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"connection": "keep-alive",
"host": "example.org",
"user-agent": f"python-httpx/{__version__}",
}
}
assert second_response.status_code == 200
assert second_response.json() == {
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"another-header": "AThing",
"connection": "keep-alive",
"host": "example.org",
"user-agent": "python-myclient/0.2.1",
}
}
def test_header_does_not_exist():
headers = Headers({"foo": "bar"})
with pytest.raises(KeyError):
del headers["baz"]
@pytest.mark.asyncio
async def test_host_with_auth_and_port_in_url():
"""
The Host header should only include the hostname, or hostname:port
(for non-default ports only). Any userinfo or default port should not
be present.
"""
url = "http://username:[email protected]:80/echo_headers"
client = AsyncClient(transport=MockTransport())
response = await client.get(url)
assert response.status_code == 200
assert response.json() == {
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"connection": "keep-alive",
"host": "example.org",
"user-agent": f"python-httpx/{__version__}",
"authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ=",
}
}
@pytest.mark.asyncio
async def test_host_with_non_default_port_in_url():
"""
If the URL includes a non-default port, then it should be included in
the Host header.
"""
url = "http://username:[email protected]:123/echo_headers"
client = AsyncClient(transport=MockTransport())
response = await client.get(url)
assert response.status_code == 200
assert response.json() == {
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"connection": "keep-alive",
"host": "example.org:123",
"user-agent": f"python-httpx/{__version__}",
"authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ=",
}
}
```
|
{
"source": "jcuic5/CUDA-PointPillars",
"score": 2
}
|
#### File: models/backbones_2d/base_bev_backbone.py
```python
import numpy as np
import torch
import torch.nn as nn
import sys
class BaseBEVBackbone(nn.Module):
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
if self.model_cfg.get('LAYER_NUMS', None) is not None:
assert len(self.model_cfg.LAYER_NUMS) == len(self.model_cfg.LAYER_STRIDES) == len(self.model_cfg.NUM_FILTERS)
layer_nums = self.model_cfg.LAYER_NUMS
layer_strides = self.model_cfg.LAYER_STRIDES
num_filters = self.model_cfg.NUM_FILTERS
else:
layer_nums = layer_strides = num_filters = []
if self.model_cfg.get('UPSAMPLE_STRIDES', None) is not None:
assert len(self.model_cfg.UPSAMPLE_STRIDES) == len(self.model_cfg.NUM_UPSAMPLE_FILTERS)
num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS
upsample_strides = self.model_cfg.UPSAMPLE_STRIDES
else:
upsample_strides = num_upsample_filters = []
num_levels = len(layer_nums)
c_in_list = [input_channels, *num_filters[:-1]]
self.blocks = nn.ModuleList()
self.deblocks = nn.ModuleList()
for idx in range(num_levels):
cur_layers = [
nn.ZeroPad2d(1),
nn.Conv2d(
c_in_list[idx], num_filters[idx], kernel_size=3,
stride=layer_strides[idx], padding=0, bias=False
),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
]
for k in range(layer_nums[idx]):
cur_layers.extend([
nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
])
self.blocks.append(nn.Sequential(*cur_layers))
if len(upsample_strides) > 0:
stride = upsample_strides[idx]
if stride >= 1:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(
num_filters[idx], num_upsample_filters[idx],
upsample_strides[idx],
stride=upsample_strides[idx], bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
else:
stride = np.round(1 / stride).astype(np.int)
self.deblocks.append(nn.Sequential(
nn.Conv2d(
num_filters[idx], num_upsample_filters[idx],
stride,
stride=stride, bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
c_in = sum(num_upsample_filters)
if len(upsample_strides) > num_levels:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False),
nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01),
nn.ReLU(),
))
self.num_bev_features = c_in
def forward(self, spatial_features):
ups = []
ret_dict = {}
x = spatial_features
for i in range(len(self.blocks)):
x = self.blocks[i](x)
stride = int(spatial_features.shape[2] / x.shape[2])
ret_dict['spatial_features_%dx' % stride] = x
if len(self.deblocks) > 0:
ups.append(self.deblocks[i](x))
else:
ups.append(x)
if len(ups) > 1:
x = torch.cat(ups, dim=1)
elif len(ups) == 1:
x = ups[0]
if len(self.deblocks) > len(self.blocks):
x = self.deblocks[-1](x)
return x
```
|
{
"source": "jcuker/pyMFA",
"score": 2
}
|
#### File: jcuker/pyMFA/mfa.py
```python
import time
import hashlib
import base64
import hmac
import math
import csv
import sys
import os
from multiprocessing.dummy import Pool as ThreadPool
class Source:
def __init__(self, secret, name):
self.secret = secret
self.name = name
def authCode(source):
key = get_key(source.secret)
message = math.floor(time.time() / 30)
message_bytes = (message).to_bytes(len(str(message)), byteorder='big')
hasher = hmac.new(key, message_bytes, hashlib.sha1)
hashed = hasher.digest()
offset = get_last_nibble(hashed)
truncated_hash = hashed[offset:offset+4]
code = calculate_code_from_truncated_hash(truncated_hash)
padded_code = pad_code(code)
source.code = padded_code
print(source.name + ': ' + source.code)
# take the secret key to uppercase and then base32 decode the string
def get_key(secret):
return base64.b32decode(secret.upper())
# convert a bytes object to its decimal representation
def bytes_to_int(bytes):
result = 0
for b in bytes:
result = result * 256 + int(b)
return result
# returns the last nibble of a bitstring
def get_last_nibble(hashed):
return hashed[19] & 15
# ignore significant bits and modulo 1 million to ensure remainder is < 7 digits
def calculate_code_from_truncated_hash(truncated_hash):
return ((bytes_to_int(truncated_hash) & 0x7fffffff) % 1000000)
# pad with zeros if remiander was < 6 digits
def pad_code(code):
numAsString = str(code)
while (len(numAsString) < 6):
numAsString = "0" + numAsString
return numAsString
def resource_path(relative_path):
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def read_sources_from_file():
list_of_sources = []
data = open(resource_path('mfa-data.csv'))
print(data)
reader = csv.DictReader(data, delimiter=',')
for row in reader:
source = Source(row['secret'], row['name'])
list_of_sources.append(source)
return list_of_sources
def main():
list_of_sources_to_authenticate = read_sources_from_file()
try:
while True:
os.system('cls' if os.name == 'nt' else 'clear')
pool = ThreadPool(4)
pool.map(authCode, list_of_sources_to_authenticate)
pool.close()
pool.join()
remaining_seconds = math.floor(time.time()) % 30
time.sleep(remaining_seconds)
except KeyboardInterrupt:
sys.exit()
main()
```
|
{
"source": "jculpon/tappy-flask",
"score": 2
}
|
#### File: tappy-flask/tools/sim.py
```python
from __future__ import unicode_literals
import argparse
import requests
import json
import time
generic_update_json='''[
{ "user": "user1", "x": 1, "y": 1, "z": 1, "area": "NOC", "button": true},
{ "user": "user2", "x": 2, "y": 2, "z": 2, "area": "Radio Statler"},
{ "user": "user3", "x": 3, "y": 3, "z": 3, "area": "Keynote Bullpen"}
]'''
content_type_json_headers={'content-type': 'application/json'}
def post_update(server='http://127.0.0.1:5000', json_data=generic_update_json):
url = server + '/amd/push'
result = requests.post(
url,
data=json_data,
headers=content_type_json_headers
)
return result
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='Send simulated updates to Tappy Terror'
)
arg_parser.add_argument(
'-w',
'--wait-time',
help='time, in ms, to wait between requests',
default=100,
type=int
)
arg_parser.add_argument(
'-n',
'--number',
help='number of requests to send',
default=5,
type=int
)
arg_parser.add_argument(
'-j',
'--json',
help='json string to send'
)
args = arg_parser.parse_args()
if args.json is not None:
try:
parsed = json.loads(args.json)
except:
print 'Unable to parse provided json -- is it valid?'
raise
for i in xrange(args.number):
if args.json is None:
result = post_update()
else:
result = post_update(json_data=args.json)
try:
result.raise_for_status()
print 'Status %d: %s' % (result.status_code, result.json())
except requests.HTTPError, e:
print 'Request failed! Status %d, underlying error %s' % (result.status_code, e)
time.sleep(float(args.wait_time)/1000)
```
|
{
"source": "jc-umana/regolith",
"score": 2
}
|
#### File: regolith/helpers/l_milestoneshelper.py
```python
import datetime as dt
import dateutil.parser as date_parser
from dateutil.relativedelta import relativedelta
import sys
from regolith.dates import get_due_date
from regolith.helpers.basehelper import SoutHelperBase
from regolith.fsclient import _id_key
from regolith.tools import (
all_docs_from_collection,
get_pi_id,
)
TARGET_COLL = "projecta"
HELPER_TARGET = "l_milestones"
ALLOWED_STATI = ["all", "proposed", "started", "finished", "back_burner",
"paused", "cancelled"]
def subparser(subpi):
subpi.add_argument("-v", "--verbose", action="store_true",
help='increase verbosity of output')
subpi.add_argument("-l", "--lead",
help="Filter milestones for this project lead"
)
subpi.add_argument("-s", "--stati", nargs="+",
help=f"Filter milestones for these stati from {ALLOWED_STATI}."
f" Default is active projecta, i.e. 'started'",
default=None
)
return subpi
class MilestonesListerHelper(SoutHelperBase):
"""Helper for listing upcoming (and past) projectum milestones.
Projecta are small bite-sized project quanta that typically will result in
one manuscript.
"""
# btype must be the same as helper target in helper.py
btype = HELPER_TARGET
needed_dbs = [f'{TARGET_COLL}']
def construct_global_ctx(self):
"""Constructs the global context"""
super().construct_global_ctx()
gtx = self.gtx
rc = self.rc
if "groups" in self.needed_dbs:
rc.pi_id = get_pi_id(rc)
rc.coll = f"{TARGET_COLL}"
try:
if not rc.database:
rc.database = rc.databases[0]["name"]
except:
pass
colls = [
sorted(
all_docs_from_collection(rc.client, collname), key=_id_key
)
for collname in self.needed_dbs
]
for db, coll in zip(self.needed_dbs, colls):
gtx[db] = coll
gtx["all_docs_from_collection"] = all_docs_from_collection
gtx["float"] = float
gtx["str"] = str
gtx["zip"] = zip
def sout(self):
rc = self.rc
all_milestones = []
if not rc.stati:
rc.stati = ['started']
for projectum in self.gtx["projecta"]:
if rc.lead and projectum.get('lead') != rc.lead:
continue
for ms in projectum["milestones"]:
if projectum["status"] in rc.stati or \
'all' in rc.stati:
if ms.get('status') not in \
["finished", "cancelled"]:
due_date = get_due_date(ms)
ms.update({
'lead': projectum.get('lead'),
'id': projectum.get('_id'),
'due_date': due_date
})
all_milestones.append(ms)
all_milestones.sort(key=lambda x: x['due_date'], reverse=True)
for ms in all_milestones:
if rc.verbose:
print(
f"{ms.get('due_date')}: lead: {ms.get('lead')}, {ms.get('id')}, status: {ms.get('status')}")
print(f" Title: {ms.get('name')}")
print(f" Purpose: {ms.get('objective')}")
print(f" Audience: {ms.get('audience')}")
else:
print(
f"{ms.get('due_date')}: lead: {ms.get('lead')}, {ms.get('id')}, {ms.get('name')}, status: {ms.get('status')}")
return
def db_updater(self):
rc = self.rc
if not rc.date:
now = dt.date.today()
else:
now = date_parser.parse(rc.date).date()
key = f"{str(now.year)[2:]}{rc.lead[:2]}_{''.join(rc.name.casefold().split()).strip()}"
coll = self.gtx[rc.coll]
pdocl = list(filter(lambda doc: doc["_id"] == key, coll))
if len(pdocl) > 0:
sys.exit("This entry appears to already exist in the collection")
else:
pdoc = {}
pdoc.update({
'begin_date': now.isoformat(),
})
pdoc.update({
'name': rc.name,
})
pdoc.update({
'pi_id': rc.pi_id,
})
pdoc.update({
'lead': rc.lead,
})
if rc.description:
pdoc.update({
'description': rc.description,
})
if rc.grants:
if isinstance(rc.grants, str):
rc.grants = [rc.grants]
pdoc.update({'grants': rc.grants})
else:
pdoc.update({'grants': ["tbd"]})
if rc.group_members:
if isinstance(rc.group_members, str):
rc.group_members = [rc.group_members]
pdoc.update({'group_members': rc.group_members})
else:
pdoc.update({'group_members': []})
if rc.collaborators:
if isinstance(rc.collaborators, str):
rc.collaborators = [rc.collaborators]
pdoc.update({
'collaborators': rc.collaborators,
})
pdoc.update({"_id": key})
firstm = {'due_date': now + relativedelta(days=7),
'name': '<NAME>',
'objective': 'roll out of project to team',
'audience': ['pi', 'lead', 'group members',
'collaborators'],
'status': 'proposed'
}
secondm = {'due_date': now + relativedelta(days=21),
'name': '<NAME>',
'objective': 'lead presents background reading and '
'initial project plan',
'audience': ['pi', 'lead', 'group members'],
'status': 'proposed'
}
thirdm = {'due_date': now + relativedelta(days=28),
'name': '<NAME>',
'objective': 'develop a detailed plan with dates',
'audience': ['pi', 'lead', 'group members'],
'status': 'proposed'
}
fourthm = {'due_date': now + relativedelta(years=1),
'name': '<NAME>',
'objective': 'submit paper, make release, whatever',
'audience': ['pi', 'lead', 'group members', 'collaborators'],
'status': 'proposed'
}
pdoc.update({"milestones": [firstm, secondm, thirdm, fourthm]})
rc.client.insert_one(rc.database, rc.coll, pdoc)
print(f"{key} has been added in {TARGET_COLL}")
return
```
|
{
"source": "jcumby/PIEFACE",
"score": 3
}
|
#### File: PIEFACE/pieface/calcellipsoid.py
```python
import sys
import logging
# Set up logger
logger = logging.getLogger(__name__)
def calcfromcif(CIF, centres, radius, allligtypes=[], alllignames=[], **kwargs):
""" Main routine for computing ellipsoids from CIF file. """
# kwargs should be valid arguments for polyhedron.makeellipsoid(), primarily designed for tolerance and maxcycles
from pieface import readcoords
logger.debug('Starting file %s', CIF)
logger.debug('Phase: %s', kwargs.get('phase', None))
cell, atomcoords, atomtypes, spacegp, symmops, symmid = readcoords.readcif(CIF, phaseblock = kwargs.get('phase', None))
allatoms = readcoords.makeP1cell(atomcoords, symmops, symmid)
phase = readcoords.Crystal(cell=cell, atoms=allatoms, atomtypes=atomtypes)
for cen in centres:
if cen not in allatoms.keys():
logger.info("Centre %s is not present in atom labels: skipping", cen)
continue
validligtyps = list( set(allligtypes).intersection(set(atomtypes.values())))
validlignames = list( set(alllignames).intersection(set(atomtypes.keys())))
if len(validligtyps) == 0 and len(validlignames) == 0:
raise ValueError("No ligands of type(s) or name(s) '{0}' are present in file {1}. Valid types are {2}".format(", ".join(allligtypes)+", ".join(alllignames), CIF, ", ".join([str(p) for p in set(atomtypes.values())])))
elif len(validligtyps) != len(allligtypes):
logger.info("Not all types %s are present in %s; using %s", ", ".join(allligtypes), CIF, ", ".join(validligtyps))
elif len(validlignames) != len(alllignames):
logger.info("Not all labels %s are present in %s; using %s", ", ".join(alllignames), CIF, ", ".join(validlignames))
# Calculate ligands for current centre: the coordinates returned may be in a different unit cell to those in allatoms
ligands, ligtypes = readcoords.findligands(cen, phase.atoms, phase.orthomatrix(), radius=radius, types=validligtyps, names=validlignames, atomtypes=phase.atomtypes)
phase.makepolyhedron({cen:allatoms[cen]}, ligands, atomdict=None, ligtypes=ligtypes)
polynm = cen+"_poly"
getattr(phase, polynm).makeellipsoid(phase.orthomatrix(), **kwargs)
logger.debug('Finishing file %s', CIF)
return phase
def makenesteddict(phases):
""" Return dictionary of phases into nested dict CIF labels and ellipsoid parameters.
Dict structure is:
{Central Atom Label : {Ellipsoid Parameter : Value } }
"""
data = {}
for site in set( [ j for file in phases.keys() for j in phases[file].polyhedra ] ): # Iterate through all possible atom types in phases dict
data[site] = {}
data[site]['files'] = [ f for f in phases.keys() if site in phases[f].polyhedra ] # Get list of files for which site is present
#data[site]['radii'] = [ getattr(phases[f], site+"_poly").ellipsoid.radii for f in data[site]['files'] ]
data[site]['r1'] = [ getattr(phases[f], site+"_poly").ellipsoid.radii[0] for f in data[site]['files'] ]
data[site]['r2'] = [ getattr(phases[f], site+"_poly").ellipsoid.radii[1] for f in data[site]['files'] ]
data[site]['r3'] = [ getattr(phases[f], site+"_poly").ellipsoid.radii[2] for f in data[site]['files'] ]
data[site]['rad_sig'] = [ getattr(phases[f], site+"_poly").ellipsoid.raderr() for f in data[site]['files'] ]
data[site]['meanrad'] = [ getattr(phases[f], site+"_poly").ellipsoid.meanrad() for f in data[site]['files'] ]
#data[site]['centre'] = [ getattr(phases[f], site+"_poly").ellipsoid.centre for f in data[site]['files'] ]
data[site]['cenx'] = [ getattr(phases[f], site+"_poly").ellipsoid.centre[0] for f in data[site]['files'] ]
data[site]['ceny'] = [ getattr(phases[f], site+"_poly").ellipsoid.centre[1] for f in data[site]['files'] ]
data[site]['cenz'] = [ getattr(phases[f], site+"_poly").ellipsoid.centre[2] for f in data[site]['files'] ]
data[site]['centredisp'] = [ getattr(phases[f], site+"_poly").ellipsoid.centredisp() for f in data[site]['files'] ]
data[site]['coordination'] = [ getattr(phases[f], site+"_poly").ellipsoid.numpoints() - 1 for f in data[site]['files'] ]
data[site]['shapeparam'] = [ getattr(phases[f], site+"_poly").ellipsoid.shapeparam() for f in data[site]['files'] ]
data[site]['sphererad'] = [ getattr(phases[f], site+"_poly").ellipsoid.sphererad() for f in data[site]['files'] ]
data[site]['ellipsvol'] = [ getattr(phases[f], site+"_poly").ellipsoid.ellipsvol() for f in data[site]['files'] ]
data[site]['strainen'] = [ getattr(phases[f], site+"_poly").ellipsoid.strainenergy() for f in data[site]['files'] ]
data[site]['cenr1'] = [ getattr(phases[f], site+"_poly").ellipsoid.centreaxes()[0] for f in data[site]['files'] ]
data[site]['cenr2'] = [ getattr(phases[f], site+"_poly").ellipsoid.centreaxes()[1] for f in data[site]['files'] ]
data[site]['cenr3'] = [ getattr(phases[f], site+"_poly").ellipsoid.centreaxes()[2] for f in data[site]['files'] ]
data[site]['rotation'] = [ getattr(phases[f], site+"_poly").ellipsoid.rotation for f in data[site]['files'] ]
data[site]['meanbond'] = [ getattr(phases[f], site+"_poly").averagebondlen(getattr(phases[f], 'mtensor')()) for f in data[site]['files'] ]
data[site]['bondsig'] = [ getattr(phases[f], site+"_poly").bondlensig(getattr(phases[f], 'mtensor')()) for f in data[site]['files'] ]
return data
def makeDataFrame(phases):
""" Return Pandas DataFrame object, with CIF files as index and ellipsoid parameters as columns (hierarchical by centre atom)"""
import pandas as pd
from pieface.readcoords import Crystal
if isinstance(phases, dict):
if isinstance( phases[phases.keys()[0]], Crystal): # We are reading a dict of Crystals: convert to nested dict first
alldata = makenesteddict(phases)
elif isinstance( phases[phases.keys()[0]], dict ): # Looking at a dict of dicts: assume correct for pandas...
alldata = phases
d = dict([ (i, pd.DataFrame(alldata[i]).set_index('files')) for i in alldata.keys() ]) # Make dict of DataFrames
frame = pd.concat(d, axis=1)
if len(frame.index) == 1: # We're looking at a single cif file - unstack DataFrame with atoms as index
return frame.ix[frame.index[0]].unstack().apply(pd.to_numeric, errors='ignore') # Need to convert back to float/int when unstacking
else:
return frame
else:
raise TypeError("Unknown data format for conversion to DataFrame (expected dict)")
if __name__ == "__main__":
raise EnvironmentError("Please use CIFellipsoid.py script when running on the command line.")
```
|
{
"source": "jcuna/green-crn",
"score": 2
}
|
#### File: api/config/routes.py
```python
def register():
"""
declare menuItems as follows: 'module.PluralNameClass@endpoint': 'route'
returns: dict
"""
return {
'users.Users@users_url': '/user',
'users.UsersManager@users_manager_url': '/users|/users/<int:user_id>',
'users.Session@login_url': '/login',
'users.Roles@roles_url': '/roles',
'users.Permissions@permissions_url': '/permissions',
'users.UserTokens@user_tokens_url': '/user-tokens|/user-tokens/<user_token>',
'users.Activate@user_activate_url': '/account/activate-pass',
'users.Audit@audit_url': '/audit|/audit/<int:user_id>',
'users.UserPasswords@user_passwords_url': '/users/reset-password',
'users.UserGroups@user_groups_url': '/user/groups|/user/groups/<int:group_id>',
'users.Messages@messages_url': '/messages|/messages/<int:message_id>',
'company.Company@company_url': '/company',
'meta.Countries@country_url': '/meta/countries',
'meta.SourceProjects@source_projects_url': '/meta/source-projects',
'meta.ProjectTypes@project_types_url': '/meta/project-types',
'meta.Distributors@distributors_url': '/meta/distributors',
'meta.Rates@rates_url': '/meta/rates',
'meta.Transformers@transformers_url': '/meta/transformers',
'meta.TrCapacities@tr_capacities_url': '/meta/tr-capacities',
'meta.Phases@phases_url': '/meta/phases',
'meta.Tensions@tensions_url': '/meta/tensions',
'meta.PanelModels@panel_models_url': '/meta/panel-models',
'meta.InverterModels@inverter_models_url': '/meta/inverter-models',
'meta.DocumentCategories@document_categories_url': '/meta/document-categories',
'meta.SaleTypes@sale_types_url': '/meta/sale-types',
'meta.FinancialEntities@financial_entities_url': '/meta/financial-entities',
'meta.FinancialStates@financial_states_url': '/meta/financial-states',
'customers.Customers@customers_url': '/customers|/customers/<int:customer_id>',
'customers.CustomerProjects@customer_projects_url': '/customers/projects|/customers/projects/<int:project_id>',
'customers.CustomerInstallations@customer_installations_url': '/customers/installations|/customers/installations/<int:installation_id>',
'customers.InstallationFinances@customer_installations_financing_url': '/customers/installations/financing|/customers/installations/financing/<int:installation_id>',
'customers.InstallationProgressStatus@customer_installations_status_url': '/customers/installations/status/<int:installation_id>',
'customers.InstallationFollowUps@installation_follow_up_url': '/customers/installations/follow-up|/customers/installations/follow-up/<int:installation_follow_up_id>',
'customers.CustomerDocuments@customer_documents_url': '/customers/documents|/customers/documents/<int:installation_id>',
'customers.EGauge@egauge_url': '/egauge/<int:installation_id>',
'shared.Email@emails_url': '/email|/email/<string:action>',
'shared.HtmlToPdf@html_to_pdf_url': '/to-pdf',
'shared.Widgets@widgets_url': '/widgets',
'shared.RunWidget@run_widgets_url': '/widgets/<string:widget_name>',
}
no_permissions = [
'views.users.Session',
'views.users.Users',
'views.users.UserTokens',
'views.users.Permissions',
'views.users.Activate',
]
default_access = {
'views.company.Company': ['read'],
'views.meta.Countries': ['read'],
'views.meta.SourceProjects': ['read'],
'views.meta.ProjectTypes': ['read'],
'views.meta.Distributors': ['read'],
'views.meta.Rates': ['read'],
'views.meta.Transformers': ['read'],
'views.meta.TrCapacities': ['read'],
'views.meta.Tensions': ['read'],
'views.meta.PanelModels': ['read'],
'views.meta.InverterModels': ['read'],
'views.meta.DocumentCategories': ['read'],
'views.meta.DocumentTypes': ['read'],
'views.meta.SaleTypes': ['read'],
'views.meta.FinancialEntities': ['read'],
'views.meta.FinancialStates': ['read'],
}
```
#### File: api/dal/shared.py
```python
from base64 import b64encode
from datetime import datetime
from decimal import Decimal
from math import ceil
from sqlalchemy import orm
from flask_sqlalchemy import SQLAlchemy, BaseQuery
from sqlalchemy.orm import joinedload
from functools import wraps
import jwt
from sqlalchemy.orm.state import InstanceState
from views import Result
db = SQLAlchemy()
def get_fillable(model: db.Model, get_attr_object=False, **kwargs):
if len(kwargs) == 0:
raise Exception('Model keywords are missing. Try ** or spread key values')
if not hasattr(model, 'fillable') and any(kwargs):
raise Exception('Must declare a fillable on class ' + model.__name__)
fillable = {}
for attribute_name in model.fillable:
if attribute_name in kwargs:
if get_attr_object:
key = getattr(model, attribute_name)
else:
key = attribute_name
fillable[key] = kwargs[attribute_name] if isinstance(kwargs[attribute_name], list) else \
kwargs[attribute_name]
return fillable
def token_required(f):
from dal.user import User
from flask import current_app, request
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'X-Access-Token' in request.headers:
token = request.headers['X-ACCESS-TOKEN']
if not token:
return Result.error('Token is missing!', 401)
try:
data = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=['HS256'])
current_user = User.query.options(joinedload('roles')).filter_by(email=data['email']).first()
except Exception:
return Result.error('Token is invalid!', 401)
request.user = current_user
return f(*args, **kwargs)
return decorated
def system_call(f):
"""
meant to be called from within server instance, this is a temporary solution until an API key system is created
:param f:
:return:
"""
from flask import current_app, request
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'X-System-Token' in request.headers:
token = request.headers.get('X-SYSTEM-TOKEN')
if not token or token != current_app.config['SECRET_KEY']:
return Result.error('Token is missing!', 401)
return f(*args, **kwargs)
return decorated
def access_required(f):
from flask import request
from core.router import permissions
@wraps(f)
def access_decorator(*args, **kwargs):
if not request.user:
return Result.error('Invalid user', 401)
has_access = False
for role in request.user.roles:
for name, grant in role.get_permissions.items():
if name == permissions[request.endpoint]:
for access in grant:
if access == access_map[request.method]:
has_access = True
break
if not has_access:
return Result.error('Access denied', 403)
return f(*args, **kwargs)
return access_decorator
access_map = {
'GET': 'read',
'PUT': 'write',
'POST': 'write',
'DELETE': 'delete'
}
class Paginator:
per_page = 20
def __init__(self, query: BaseQuery, page: int = 1, order_by: str = None, order_dir: str = None):
self.total = query.count()
self.offset = (page * self.per_page) - self.per_page
self.total_pages = ceil(self.total / self.per_page)
self.query = query
self.page = page
if order_by:
order_by = getattr(self.query.column_descriptions[0]['type'], order_by)
order_dir = getattr(order_by, order_dir if order_dir else 'asc')
self.query = self.query.order_by(order_dir())
def get_items(self) -> list:
items = self.get_result()
return list(map(lambda row: dict(row), items))
def get_result(self):
return self.query.offset(self.offset).limit(self.per_page)
class ModelIter(object):
allowed_widget = False
hidden_props = [
'hidden_props',
'allowed_widget',
'query',
'query_class',
'metadata',
'fillable',
]
def __init__(self, *args, **kwargs):
super(self, *args, **kwargs)
def __iter__(self):
if isinstance(self, db.Model):
relationships = [rel.key for rel in self.__mapper__.relationships]
for column in dir(self):
if column.startswith('_') or column in relationships or column in self.hidden_props:
continue
attr = getattr(self, column)
if isinstance(attr, InstanceState) or \
hasattr(self.__mapper__.attrs, column) and \
hasattr(getattr(self.__mapper__.attrs, column), 'deferred') and \
getattr(self.__mapper__.attrs, column).deferred:
continue
if isinstance(attr, bool) or isinstance(attr, int) or isinstance(attr, float) or isinstance(attr, dict) \
or isinstance(attr, list) or attr is None:
yield column, attr
elif isinstance(attr, Decimal):
yield column, '{0:.2f}'.format(attr)
elif isinstance(attr, datetime):
yield column, str(attr.isoformat())
elif isinstance(attr, bytes):
yield column, b64encode(attr).decode()
elif isinstance(attr, Point):
yield column, attr.get_dict()
elif not isinstance(attr, str):
yield column, str(attr)
else:
yield column, attr
if hasattr(self, '__mapper__'):
# models that have not been loaded
unloaded = orm.attributes.instance_state(self).unloaded
for relationship in relationships:
if relationship not in unloaded and hasattr(self, relationship):
value = getattr(self, relationship)
if isinstance(value, list):
yield relationship, list(map(dict, value))
else:
yield relationship, dict(value) if value else value
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __composite_values__(self):
return self.x, self.y
def __repr__(self):
return "Point(x=%r, y=%r)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, Point) and \
other.x == self.x and \
other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
def get_tuple(self):
return self.x, self.y
def get_list(self):
return [self.x, self.y]
def get_dict(self):
return {'long': self.x, 'lat': self.y}
```
#### File: api/dal/user.py
```python
import json
from datetime import datetime, timedelta
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.orm import relationship, deferred
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
from sqlalchemy.dialects import sqlite
from dal import db
from dal.shared import ModelIter
from config import random_token, configs
from config.routes import default_access
# sqlite is used for testing and it does not auto increment Big Int since there's no support
BigInteger = db.BigInteger().with_variant(sqlite.INTEGER(), 'sqlite')
admin_access = {
'company': '*'
}
admin_preferences = {}
user_roles = db.Table(
'user_roles',
db.Column('id', BigInteger, primary_key=True),
db.Column('user_id', BigInteger, db.ForeignKey('users.id'), index=True),
db.Column('role_id', BigInteger, db.ForeignKey('roles.id'), index=True)
)
user_user_groups = db.Table(
'user_user_groups',
db.Column('id', BigInteger, primary_key=True),
db.Column('user_id', BigInteger, db.ForeignKey('users.id'), index=True),
db.Column('group_id', BigInteger, db.ForeignKey('user_groups.id'), index=True)
)
class User(db.Model, ModelIter):
__tablename__ = 'users'
allowed_widget = True
fillable = ['password', 'email', 'first_name', 'last_name', 'deleted']
id = db.Column(BigInteger, primary_key=True)
email = db.Column(db.String(50, collation=configs.DB_COLLATION), nullable=False, unique=True)
password = db.Column(db.String(80, collation=configs.DB_COLLATION), nullable=True)
first_name = db.Column(db.String(50, collation=configs.DB_COLLATION), nullable=False, index=True)
last_name = db.Column(db.String(50, collation=configs.DB_COLLATION), nullable=False, index=True)
created_at = db.Column(db.DateTime(), nullable=False, default=datetime.utcnow)
deleted = db.Column(db.Boolean, nullable=False, server_default='0', index=True)
roles = relationship('Role', secondary=user_roles, lazy='joined', backref=db.backref('users', lazy='dynamic'))
tokens = relationship('UserToken', back_populates='user')
attributes = relationship('UserAttributes', back_populates='user', lazy='joined', uselist=False)
audit = relationship('Audit')
@hybrid_property
def name(self):
return '{} {}'.format(self.first_name, self.last_name)
def hash_password(self):
self.password = generate_password_hash(str(self.password).encode('ascii'), method='sha256')
def password_correct(self, plain_password):
return check_password_hash(self.password, plain_password)
def get_token(self):
exp = datetime.utcnow() + timedelta(minutes=30)
return {
'value': jwt.encode(
{'email': self.email, 'exp': exp},
configs.SECRET_KEY,
algorithm='HS256').decode('utf-8'),
'expires': round(exp.timestamp())
}
class UserAttributes(db.Model, ModelIter):
__tablename__ = 'user_attributes'
ua_id = db.Column(BigInteger, primary_key=True)
user_id = db.Column(BigInteger, db.ForeignKey('users.id'), index=True)
user_access = db.Column(
db.Text(collation=configs.DB_COLLATION),
comment='A JSON schema of table/rows access',
nullable=False,
default='{}'
)
user_preferences = db.Column(
db.Text(collation=configs.DB_COLLATION),
comment='A JSON schema user preferences',
nullable=False,
default='{}'
)
user = relationship(User, back_populates='attributes', uselist=False)
@property
def preferences(self):
return json.loads(self.user_preferences)
@property
def access(self):
return json.loads(self.user_access)
class UserToken(db.Model, ModelIter):
__tablename__ = 'user_tokens'
id = db.Column(BigInteger, primary_key=True)
user_id = db.Column(BigInteger, db.ForeignKey('users.id'), index=True)
token = db.Column(db.String(64, collation=configs.DB_COLLATION, ), unique=True, nullable=False)
expires = db.Column(db.DateTime(), nullable=False)
target = db.Column(
db.String(250, collation=configs.DB_COLLATION),
comment='Target api url where token will be validated',
nullable=False
)
user = relationship(User, back_populates='tokens')
def new_token(self, email: str, expires: datetime = None):
while not self.token:
temp_token = random_token(email)
so = self.query.filter_by(token=temp_token).count()
if not so:
self.token = temp_token
self.expires = expires if expires else datetime.utcnow() + timedelta(hours=4)
class Role(db.Model, ModelIter):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30, collation=configs.DB_COLLATION), index=True)
permissions = db.Column(db.Text(collation=configs.DB_COLLATION))
@property
def get_permissions(self):
combined_permissions = default_access.copy()
if self.permissions:
for key, userGrants in json.loads(self.permissions).items():
for defaultKey, defaultGrants in default_access.items():
if key == defaultKey:
for grant in defaultGrants:
if grant not in userGrants:
userGrants.append(grant)
combined_permissions.update({key: userGrants})
return combined_permissions
class Audit(db.Model, ModelIter):
__tablename__ = 'audits'
id = db.Column(BigInteger, primary_key=True)
date = db.Column(db.DateTime(), nullable=False, index=True, default=datetime.utcnow)
user_id = db.Column(BigInteger, db.ForeignKey('users.id'), index=True, nullable=True)
ip = db.Column(db.String(15), nullable=False)
endpoint = db.Column(db.String(255, collation=configs.DB_COLLATION), nullable=False)
method = db.Column(db.String(7, collation=configs.DB_COLLATION), nullable=False)
headers = db.Column(db.Text(collation=configs.DB_COLLATION))
payload = db.Column(db.Text(collation=configs.DB_COLLATION))
response = db.Column(db.Text(collation=configs.DB_COLLATION))
user = relationship(User, uselist=False)
class CompanyProfile(db.Model, ModelIter):
__tablename__ = 'company_profile'
allowed_widget = True
fillable = ['name', 'address', 'contact', 'logo']
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30, collation=configs.DB_COLLATION), unique=True, nullable=False)
address = db.Column(db.Text(collation=configs.DB_COLLATION), nullable=False)
contact = db.Column(db.String(10, collation=configs.DB_COLLATION), nullable=False)
logo = db.Column(db.LargeBinary)
settings = db.Column(
MutableDict.as_mutable(db.JSON),
comment='A JSON schema for global settings',
nullable=False,
server_default='{}')
class UserMessage(db.Model, ModelIter):
__tablename__ = 'user_messages'
allowed_widget = True
id = db.Column(BigInteger, primary_key=True)
user = relationship(User, uselist=False)
date = db.Column(db.DateTime(), nullable=False, default=datetime.utcnow)
read = db.Column(db.Boolean, nullable=False, index=True, server_default='0')
subject = db.Column(db.String(255, collation=configs.DB_COLLATION), nullable=False)
message = db.Column(db.Text(collation=configs.DB_COLLATION))
user_id = db.Column(BigInteger, db.ForeignKey('users.id'), index=True, nullable=True)
class UserGroup(db.Model, ModelIter):
__tablename__ = 'user_groups'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = relationship(
User, secondary=user_user_groups, backref=db.backref('user_groups')
)
class Commentable(db.Model, ModelIter):
__tablename__ = 'comments'
allowed_widget = True
id = db.Column(db.Integer, primary_key=True)
user = relationship(User, backref='notes')
comment = db.Column(db.String)
date = db.Column(db.DateTime(), default=datetime.utcnow)
commentable_id = deferred(db.Column(db.Integer, index=True, nullable=False))
commentable_name = db.Column(db.String(96, collation=configs.DB_COLLATION), nullable=False)
user_id = deferred(db.Column(BigInteger, db.ForeignKey('users.id'), index=True, nullable=True))
#__table_args__ = (db.Index('note_model_name_id_idx', commentable_name, commentable_id), )
__mapper_args__ = {
'polymorphic_on' : commentable_name,
'polymorphic_identity' : 'comment'
}
```
#### File: api/tests/__init__.py
```python
import os
from cryptography.fernet import Fernet
from datetime import datetime, time
secret_key = Fernet.generate_key().decode()
config = """
TESTING = True
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ENGINE_OPTIONS = {
'pool_recycle': True
}
SOCKET_ADDRESS = '/var/run/mem_queue-test.sock'
DB_COLLATION = 'BINARY'
APP_ENV = 'testing'
SECRET_KEY = '%s'
CACHE_CONFIG = {
'CACHE_TYPE': 'simple',
'CACHE_KEY_PREFIX': 'local_dev'
}
TIME_ZONE = 'America/New_York'
AWS_ACCESS_KEY_ID = 'testid'
AWS_SECRET_ACCESS_KEY = 'testsecretkey'
UPLOAD_FILE_BUCKET = 'mytestbucket'
""" % (os.path.dirname(os.environ['APP_SETTINGS_PATH']) + '/testdb', secret_key)
def init():
tear_files() ## just in case
settings_fd = open(os.environ['APP_SETTINGS_PATH'], 'w+')
settings_fd.write(config)
settings_fd.close()
def tear_files():
try:
os.unlink(os.path.dirname(os.environ['APP_SETTINGS_PATH']) + '/testdb')
except OSError:
if os.path.exists(os.path.dirname(os.environ['APP_SETTINGS_PATH']) + '/testdb'):
raise
try:
os.unlink(os.environ['APP_SETTINGS_PATH'])
except OSError:
if os.path.exists(os.environ['APP_SETTINGS_PATH']):
raise
def endpoint(uri):
return '/api/v1.0' + uri
def front_end_date(date: datetime = datetime.utcnow(), _time: str = str(time.min)):
return ' '.join([str(date.date()), _time])
class Mock(object):
pass
```
#### File: api/tests/test_encryptor.py
```python
def test_encryption():
from core import encryptor
encrypted = encryptor.encrypt('hello')
assert isinstance(encrypted, bytes)
assert b'hello' not in encrypted
assert encryptor.decrypt(encrypted) == 'hello'
```
#### File: api/tests/test_meta_records.py
```python
from flask.testing import FlaskClient
from tests import endpoint
def test_countries(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/countries'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
for country in resp.json:
if country['name'] == '<NAME>':
assert len(country['provinces']) == 32
else:
assert len(country['provinces']) == 59
from dal.customer import Country
countries = Country.query.all()
assert len(countries) == 2
for country in countries:
assert len(country.provinces) > 30
def test_source_projects(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/source-projects'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 3
assert 'ENESTAR' == resp.json[0]['label']
def test_project_types(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/project-types'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
assert 'COMERCIAL' == resp.json[0]['label']
def test_distributors(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/distributors'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 4
assert 'EDENORTE' == resp.json[0]['label']
def test_rates(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/rates'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 4
assert 'BTS1' == resp.json[0]['label']
def test_transformers(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/transformers'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
assert 'PROPIO' == resp.json[0]['label']
def test_tr_capacities(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/tr-capacities'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 4
assert '37.50' == resp.json[0]['label']
def test_phases(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/phases'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
assert 'MONOFASICO' == resp.json[0]['label']
def test_tensions(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/tensions'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 4
assert 120 == resp.json[0]['label']
def test_panel_models(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/panel-models'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 2
assert 'Q.PEAK L-G5.0.G 375' == resp.json[0]['label']
def test_inverter_models(client: FlaskClient, admin_login):
resp = client.get(endpoint('/meta/inverter-models'), headers=admin_login)
assert resp.status_code == 200
assert len(resp.json) == 9
assert 'SUNNY BOY 3.0-US-40 - 7.7-US-40' == resp.json[0]['label']
```
#### File: api/tests/test_user_actions.py
```python
from base64 import b64encode
from flask.testing import FlaskClient
from tests import endpoint, secret_key
from tests.injector import resources
class LocalUser(object):
id = None
token = None
pass
user = LocalUser()
def test_admin_create_user(client: FlaskClient, admin_login: dict):
resp = client.post(
endpoint('/users'),
json={
'first_name': 'John',
'last_name': 'Smith',
'email': '<EMAIL>',
'roles': [1], # admin
'attributes': {}
},
headers=admin_login
)
assert 'id' in resp.json
assert resp.status_code == 200
assert len(resources.mails) == 1
user.id = resp.json['id']
def test_user_verifies_account(client: FlaskClient):
from dal.user import UserToken
verification_token = UserToken.query.first()
assert verification_token is not None
verification = client.get(endpoint('/user-tokens/badtoken'))
assert verification.status_code == 400
assert 'isValid' in verification.json
assert verification.json['isValid'] == False
verification = client.get(endpoint('/user-tokens/%s' % verification_token.token))
assert verification.status_code == 200
assert 'isValid' in verification.json
assert verification.json['isValid'] == True
user.token = verification_token.token
def test_user_activates_account(client: FlaskClient):
resp = client.post(
endpoint('/account/activate-pass'),
json={
'token': 'bad-token',
'pw': '<PASSWORD>',
'pw2': '<PASSWORD>',
}
)
assert resp.status_code == 400
assert 'Invalid token' in resp.json['error']
resp = client.post(
endpoint('/account/activate-pass'),
json={
'token': user.token,
'pw': '<PASSWORD>',
'pw2': '<PASSWORD>',
}
)
assert resp.status_code == 400
assert 'Invalid password' in resp.json['error']
resp = client.post(
endpoint('/account/activate-pass'),
json={
'token': user.token,
'pw': '<PASSWORD>',
'pw2': '<PASSWORD>',
}
)
assert resp.status_code == 200
def test_token_cannot_be_reused(client: FlaskClient):
verification = client.get(endpoint('/user-tokens/%s' % user.token))
assert verification.status_code == 400
assert 'isValid' in verification.json
assert verification.json['isValid'] == False
def test_new_user_can_login(client: FlaskClient):
auth = {
'Authorization': 'Basic ' + b64encode(b'<EMAIL>:<PASSWORD>').decode()
}
login_resp = client.post(endpoint('/login'), headers=auth)
assert 'token' in login_resp.json, 'token expected'
assert login_resp.status_code == 200
def test_user_changes_password(client: FlaskClient):
from dal.user import UserToken
resp = client.put(endpoint('/users/reset-password'))
assert resp.status_code == 400
assert 'error' in resp.json
assert 'Missing email' in resp.json['error']
resp = client.put(endpoint('/users/reset-password'), json={'email': '<EMAIL>'})
assert resp.status_code == 200
assert len(resources.mails) == 1, 'no email has been sent because user does not exist'
resp = client.put(endpoint('/users/reset-password'), json={'email': '<EMAIL>'})
assert resp.status_code == 200
assert UserToken.query.count() == 2
assert len(resources.mails) == 2, 'an email should have been sent'
token = UserToken.query.offset(1).first()
assert token is not None
resp = client.post(
endpoint('/account/activate-pass'),
json={
'token': token.token,
'pw': <PASSWORD>',
'pw2': <PASSWORD>',
}
)
assert resp.status_code == 200
auth = {
'Authorization': 'Basic ' + b64encode(b'<EMAIL>:@<PASSWORD>@<PASSWORD>').decode()
}
login_resp = client.post(endpoint('/login'), headers=auth)
assert 'token' in login_resp.json, 'token expected'
assert login_resp.status_code == 200
def test_sending_messages(client: FlaskClient):
from dal.user import UserMessage
resp = client.post(
endpoint('/messages'),
json={
'user_id': user.id,
'subject': 'testing a subject',
'body': '<h1>Hello test</h1><p>This is the body</p>'
},
headers={'X-System-Token': 'secret_key'}
)
assert resp.status_code == 401
resp = client.post(
endpoint('/messages'),
json={
'user_id': user.id,
'subject': 'testing a subject',
'body': '<h1>Hello test</h1><p>This is the body</p>'
},
headers={'X-System-Token': secret_key}
)
assert resp.status_code == 200
messages = UserMessage.query.all()
assert len(messages) == 1
assert messages[0].user_id == user.id
assert messages[0].read == False
assert messages[0].subject == 'testing a subject'
assert messages[0].message == '<h1>Hello test</h1><p>This is the body</p>'
def test_get_user_messages(client: FlaskClient, admin_login):
from core.messages import send_message
from dal.user import User
admin = User.query.filter_by(email='<EMAIL>').first()
send_message(admin.id, 'testing a subject', '<h1>Hello test</h1><p>This is the body</p>')
resp = client.get(endpoint('/messages'), headers=admin_login)
assert resp.status_code == 200
assert 'list' in resp.json
assert len(resp.json['list']) == 1
assert 'subject' in resp.json['list'][0]
assert 'id' in resp.json['list'][0]
assert 'message' in resp.json['list'][0]
assert 'read' in resp.json['list'][0]
assert 'date' in resp.json['list'][0]
assert resp.json['list'][0]['read'] == False
def test_mark_notification_read(client: FlaskClient, admin_login):
resp = client.get(endpoint('/messages'), headers=admin_login)
_id = resp.json['list'][0]['id']
resp = client.put(endpoint('/messages/%s' % _id))
assert resp.status_code == 200
resp = client.get(endpoint('/messages'), headers=admin_login)
assert 'read' in resp.json['list'][0]
assert resp.json['list'][0]['read'] == True
```
|
{
"source": "jcuna/room-mgt",
"score": 2
}
|
#### File: api/config/routes.py
```python
def register():
"""
declare menuItems as follows: 'module.PluralNameClass@endpoint': 'route'
returns: dict
"""
return {
'users.Users@users_url': '/user',
'users.UsersManager@users_manager_url': '/users|/users/<int:user_id>',
'users.UserPasswords@user_passwords_url': '/users/reset-password',
'users.Session@login_url': '/login',
'users.Roles@roles_url': '/roles',
'users.Permissions@permissions_url': '/permissions',
'users.UserTokens@user_tokens_url': '/user-tokens|/user-tokens/<user_token>',
'users.Activate@user_activate_url': '/account/activate-pass',
'users.Audit@audit_url': '/audit|/audit/<int:user_id>',
'users.Messages@messages_url': '/messages|/messages/<int:message_id>',
'company.Company@company_url': '/company',
'projects.Projects@projects_url': '/projects|/projects/<int:project_id>',
'projects.Rooms@rooms_url': '/rooms|/rooms/<int:room_id>',
'projects.RoomsHistory@rooms_history_url': '/rooms-history/|/rooms-history/<int:room_id>',
'projects.TimeIntervals@time_intervals_url': '/time-intervals',
'projects.PaymentTypes@payment_types_url': '/payment-types',
'projects.Reports@reports_url': '/reports|/reports/<string:report_uid>',
'agreements.Agreements@agreements_url': '/agreements|/agreements/<int:agreement_id>',
'agreements.Policies@policies_url': '/policies|/policies/<int:policy_id>',
'agreements.BalancePayments@balance_payments_url': '/pay-balance|/pay-balance/<int:agreement_id>',
'agreements.Receipts@receipts_url': '/receipts',
'tenants.Tenants@tenants_url': '/tenants|/tenants/<int:tenant_id>',
'expenses.Expenses@expenses_url': '/expenses|/expenses/<int:expense_id>',
'expenses.ExpenseScans@expense_scans_url': '/expense-scans|/expense-scans/<string:token>/<int:expense_id>',
'shared.Email@emails_url': '/email|/email/<string:action>',
'shared.HtmlToPdf@html_to_pdf_url': '/to-pdf',
'shared.Widgets@widgets_url': '/widgets',
'shared.RunWidget@run_widgets_url': '/widgets/<string:widget_name>',
}
no_permissions = [
'views.users.Session',
'views.users.Users',
'views.users.UserTokens',
'views.users.Permissions',
'views.users.Activate',
'views.users.UserPasswords',
'views.users.Messages',
'views.projects.TimeIntervals',
'views.projects.PaymentTypes',
'views.expenses.ExpenseScans',
'views.shared.Email',
'views.shared.HtmlToPdf',
]
default_access = {
'views.agreements.Receipts': ['read'],
'views.company.Company': ['read'],
}
```
#### File: core/AWS/base.py
```python
from datetime import datetime, timedelta
import boto3
from config import configs
class Base(object):
clients = {}
"""Clients dictionary"""
session = {}
"""Session dictionary"""
resources = {}
"""resources dictionary"""
def __init__(self):
if 'session' not in self.session or self.session['expire'] < datetime.utcnow():
self.session.update({'session':
boto3.Session(
aws_access_key_id=configs.AWS_ACCESS_KEY_ID,
aws_secret_access_key=configs.AWS_SECRET_ACCESS_KEY,
region_name=configs.AWS_REGION
), 'expire': datetime.utcnow() + timedelta(hours=1)})
def get_resource(self, rsrc):
if rsrc not in self.resources:
self.resources[rsrc] = self.session['session'].resource(rsrc)
return self.resources[rsrc]
def get_client(self, rsrc='s3'):
if rsrc not in self.clients:
self.clients[rsrc] = self.session['session'].client(rsrc)
return self.clients[rsrc]
```
#### File: core/AWS/resource.py
```python
from boto3.dynamodb.conditions import Key, Attr
from core.AWS.base import Base
from config import configs
class Resource(Base):
def __init__(self):
super().__init__()
self.monthly_report_table = configs.AWS_MONTHLY_REPORT_TABLE
def get_monthly_reports_table(self):
return self.get_resource('dynamodb').Table(self.monthly_report_table)
@staticmethod
def query(table, key1, value1, key2=None, value2=None):
exp = Key(key1).eq(value1)
if key2 is not None and value2 is not None:
exp = exp & Key(key2).eq(value2)
return table.query(
KeyConditionExpression=exp
)
@staticmethod
def scan(table, index, value, limit=20, start_key=None):
args = {
'IndexName': index,
'FilterExpression': Attr(index).eq(value),
'Limit': limit
}
if start_key is not None:
args['ExclusiveStartKey'] = start_key
return table.scan(**args)
def select_monthly_report(self, value):
return self.query(self.get_monthly_reports_table(), 'date', value)
def insert_monthly_report(self, value: dict):
client = self.get_client('dynamodb')
return client.put_item(
TableName=self.monthly_report_table,
Item=value
)
```
#### File: core/AWS/storage.py
```python
from botocore.exceptions import ClientError
from core.AWS.base import Base
from core.utils import get_logger
class Storage(Base):
def __init__(self, bucket, resource='s3'):
super().__init__()
self.bucket = bucket
self.app_logger = get_logger('app')
if resource not in self.clients:
self.clients.update({resource: self.session['session'].client(resource)})
def get_file(self, object_name):
return self.get_client().get_object(Bucket=self.bucket, Key=object_name)
def put_new(self, body, object_name, content_type=None):
if hasattr(body, 'content_type'):
content_type = body.content_type
elif not content_type:
content_type = 'binary/octet-stream'
return self.get_client().put_object(Body=body, Bucket=self.bucket, Key=object_name, ContentType=content_type)
def remove(self, name):
s3 = self.session['session'].resource('s3')
s3.Object(self.bucket, name).delete()
def get_all_objects(self, prefix=''):
resources = self.session['session'].client('s3').list_objects_v2(Bucket=self.bucket, Prefix=prefix)
return resources['Contents']
def get_bucket(self, bucket):
s3 = self.session['session'].resource('s3')
return s3.Bucket(bucket)
def sign_url(self, object_name, expiration=14400):
"""Generate a presigned URL to share an S3 object
:param object_name: string
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
try:
response = self.get_client().generate_presigned_url(
'get_object',
Params={'Bucket': self.bucket, 'Key': object_name},
ExpiresIn=expiration
)
except ClientError as e:
self.app_logger.error(e)
return None
# The response contains the presigned URL
return response
def upload_file(self, file_name, object_name):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
try:
response = self.get_client().upload_file(file_name, self.bucket, object_name)
except ClientError as e:
self.app_logger.error(e)
return False
return response
```
#### File: api/tests/test_generate_monthly_report.py
```python
from datetime import datetime, time as d_time, timedelta
import pytz
from flask.testing import FlaskClient
from tests import front_end_date, endpoint, Mock
from tests.injectors import resources
from tests.seeders import seed_project, seed_room, seed_tenant, seed_new_agreement
project = Mock()
project.id = None
project.id2 = None
def test_report_format(client: FlaskClient, aws, admin_login):
from core.crons.monthly_report import generate_report
from_date = datetime.utcnow().astimezone(pytz.timezone('America/New_York')).date().replace(day=1)
resp = seed_project(client, admin_login, {'name': 'ProjectName', 'address': '123 Micksburg St'})
assert resp.status_code == 200
project_id = resp.json['id']
generate_report(from_date, project_id)
assert 'monthly_report' in aws.dynamo
assert len(aws.dynamo['monthly_report']) == 1
assert str(from_date) in aws.dynamo['monthly_report'][0]['from_date']['S']
assert 'project' in aws.dynamo['monthly_report'][0]
assert 'project_id' in aws.dynamo['monthly_report'][0]
assert 'address' in aws.dynamo['monthly_report'][0]
assert aws.dynamo['monthly_report'][0]['project']['S'] == 'ProjectName'
assert aws.dynamo['monthly_report'][0]['address']['S'] == '123 Micksburg St'
assert 'report_day' in aws.dynamo['monthly_report'][0]
assert 'expenses' in aws.dynamo['monthly_report'][0]
assert 'income' in aws.dynamo['monthly_report'][0]
assert 'from_date' in aws.dynamo['monthly_report'][0]
assert 'to_date' in aws.dynamo['monthly_report'][0]
assert 'total_income' in aws.dynamo['monthly_report'][0]
assert 'total_expenses' in aws.dynamo['monthly_report'][0]
assert 'revenue' in aws.dynamo['monthly_report'][0]
def test_seed_monthly_data(client: FlaskClient, admin_login):
from dal.models import RentalAgreement
resp = seed_project(client, admin_login, {'name': 'ProjectName1', 'address': '321 Cheeksburg Ave'})
project.id = project_id = resp.json['id']
resp_room = seed_room(client, admin_login, {'project_id': project_id, 'name': 'RM-111'}).json['id']
resp_room2 = seed_room(client, admin_login, {'project_id': project_id, 'name': 'RM-112'}).json['id']
tenant_override1 = {
'email': '<EMAIL>',
'first_name': 'Sample5',
'last_name': 'Tenant5',
'identification_number': '555-1234567-9',
'phone': '5555555546'
}
tenant_override2 = {
'email': '<EMAIL>',
'first_name': 'Sample6',
'last_name': 'Tenant6',
'identification_number': '666-1234567-8',
'phone': '775555556'
}
tenant_id = seed_tenant(client, admin_login, tenant_override1).json['id']
tenant_id2 = seed_tenant(client, admin_login, tenant_override2).json['id']
registration_override1 = {
'date': front_end_date(), # defaults to today
'deposit': '3000.00',
'interval': '100', # weekly, 200 every two weeks and 400 monthly
'rate': '1500.00',
'reference1': '1234561324',
'reference2': '',
'reference3': '',
'room_id': resp_room,
'tenant_id': tenant_id
}
registration_override2 = {
'date': front_end_date(), # defaults to today
'deposit': '3000.00',
'interval': '200', # weekly, 200 every two weeks and 400 monthly
'rate': '2000.00',
'reference1': '8293421324',
'reference2': '8095643214',
'reference3': '8095020212',
'room_id': resp_room2,
'tenant_id': tenant_id2
}
r1 = seed_new_agreement(client, admin_login, registration_override1)
r2 = seed_new_agreement(client, admin_login, registration_override2)
balance1 = RentalAgreement.query.filter(RentalAgreement.id == r1.json['id']).first().balances[0].id
balance2 = RentalAgreement.query.filter(RentalAgreement.id == r2.json['id']).first().balances[0].id
payment_a1_1 = client.post(
endpoint('/pay-balance'),
json={'balance_id': balance1, 'payment_type_id': 1, 'amount': 500},
headers=admin_login
)
assert payment_a1_1.status_code == 200
payment_a1_2 = client.post(
endpoint('/pay-balance'),
json={'balance_id': balance1, 'payment_type_id': 1, 'amount': 2500},
headers=admin_login
)
assert payment_a1_2.status_code == 200
payment_a2_1 = client.post(
endpoint('/pay-balance'),
json={'balance_id': balance2, 'payment_type_id': 1, 'amount': 800},
headers=admin_login
)
assert payment_a2_1.status_code == 200
payment_a2_2 = client.post(
endpoint('/pay-balance'),
json={'balance_id': balance2, 'payment_type_id': 1, 'amount': 5000},
headers=admin_login
)
assert payment_a2_2.status_code == 200
payment_a2_4 = client.post(
endpoint('/pay-balance'),
json={'balance_id': balance2, 'payment_type_id': 1, 'amount': 225.25},
headers=admin_login
)
assert payment_a2_4.status_code == 200
assert 'id' in payment_a2_4.json
payment_a2_3 = client.post(
endpoint('/pay-balance'),
json={'balance_id': balance2, 'payment_type_id': 1, 'amount': -1000},
headers=admin_login
)
assert payment_a2_3.status_code == 200
assert 'id' in payment_a2_3.json
expense1 = client.post(endpoint('/expenses'), json={
'nonce': "randome1234",
'amount': "600.00",
'description': "Something",
'date': front_end_date(),
}, headers=admin_login)
assert expense1.status_code == 200
expense2 = client.post(endpoint('/expenses'), json={
'nonce': "random54321",
'amount': "1500.00",
'description': "Another expense",
'date': front_end_date(),
}, headers=admin_login)
assert expense2.status_code == 200
expense3 = client.post(endpoint('/expenses'), json={
'nonce': "random94932",
'amount': "620.00",
'description': "A thrid expense",
'date': front_end_date(),
}, headers=admin_login)
assert expense3.status_code == 200
assert 'token' in expense3.json
assert 'domain' in expense3.json
assert 'id' in expense3.json
def test_report_generated(aws):
from core.crons.monthly_report import generate_report
from_date = datetime.utcnow().astimezone(pytz.timezone('America/New_York')).date().replace(day=1)
generate_report(from_date, project.id)
# if from_date is the same, then report gets overwritten
assert len(aws.dynamo['monthly_report']) == 2
key = str(datetime.combine(from_date, d_time.min).astimezone(pytz.utc).date())
assert 'expenses' in aws.dynamo['monthly_report'][1]
assert 'income' in aws.dynamo['monthly_report'][1]
assert 'total_income' in aws.dynamo['monthly_report'][1]
assert 'total_expenses' in aws.dynamo['monthly_report'][1]
assert 'revenue' in aws.dynamo['monthly_report'][1]
assert aws.dynamo['monthly_report'][1]['from_date']['S'] == key
assert len(aws.dynamo['monthly_report'][1]['expenses']['L']) == 3
assert aws.dynamo['monthly_report'][1]['total_expenses']['S'] == '2720.00'
assert len(aws.dynamo['monthly_report'][1]['income']['L']) == 6
assert aws.dynamo['monthly_report'][1]['total_income']['S'] == '8025.25'
assert aws.dynamo['monthly_report'][1]['revenue']['S'] == '5305.25'
def test_seed_second_project(client: FlaskClient, admin_login):
from dal.models import RentalAgreement
resp2 = seed_project(client, admin_login, {'name': 'ProjectName2', 'address': '555 Beverly Bottoms St'})
assert resp2.status_code == 200
project.id2 = resp2.json['id']
change_proj = client.put(endpoint('/projects/%s' % project.id2), json={
'id': project.id2,
'active': True,
}, headers=admin_login)
assert change_proj.status_code == 200
tenant_override3 = {
'email': '<EMAIL>',
'first_name': 'Sample8',
'last_name': 'Tenant8',
'identification_number': '888-1234567-8',
'phone': '775555888'
}
tenant_id3 = seed_tenant(client, admin_login, tenant_override3).json['id']
resp_room3 = seed_room(client, admin_login, {'project_id': project.id2, 'name': 'RM-404'}).json['id']
registration_override3 = {
'date': front_end_date(), # defaults to today
'deposit': '3000.00',
'interval': '400', # weekly, 200 every two weeks and 400 monthly
'rate': '4000.00',
'reference1': '8293429565',
'reference2': '8095645542',
'reference3': '8095023124',
'room_id': resp_room3,
'tenant_id': tenant_id3
}
r3 = seed_new_agreement(client, admin_login, registration_override3)
balance3 = RentalAgreement.query.filter(RentalAgreement.id == r3.json['id']).first().balances[0].id
expense4 = client.post(endpoint('/expenses'), json={
'nonce': "randome9656",
'amount': "600.00",
'description': "Something for project 2",
'date': front_end_date(),
}, headers=admin_login)
assert expense4.status_code == 200
payment_a3_1 = client.post(
endpoint('/pay-balance'),
json={'balance_id': balance3, 'payment_type_id': 1, 'amount': 4000},
headers=admin_login
)
assert payment_a3_1.status_code == 200
assert 'id' in payment_a3_1.json
def test_project_2_monthly_report(aws):
from core.crons.monthly_report import generate_report
from_date = datetime.utcnow().astimezone(pytz.timezone('America/New_York')).date().replace(day=1)
generate_report(from_date, project.id2)
# if from_date is the same, then report gets overwritten
assert len(aws.dynamo['monthly_report']) == 3
key2 = str(datetime.combine(from_date, d_time.min).astimezone(pytz.utc).date())
assert aws.dynamo['monthly_report'][2]['from_date']['S'] == key2
assert 'expenses' in aws.dynamo['monthly_report'][2]
assert 'income' in aws.dynamo['monthly_report'][2]
assert 'total_income' in aws.dynamo['monthly_report'][2]
assert 'total_expenses' in aws.dynamo['monthly_report'][2]
assert 'revenue' in aws.dynamo['monthly_report'][2]
assert len(aws.dynamo['monthly_report'][2]['expenses']['L']) == 1
assert aws.dynamo['monthly_report'][2]['total_expenses']['S'] == '600.00'
assert len(aws.dynamo['monthly_report'][2]['income']['L']) == 1
assert aws.dynamo['monthly_report'][2]['total_income']['S'] == '4000.00'
assert aws.dynamo['monthly_report'][2]['revenue']['S'] == '3400.00'
assert 'balance' in aws.dynamo['monthly_report'][2]['income']['L'][0]['M']
assert 'agreement' in aws.dynamo['monthly_report'][2]['income']['L'][0]['M']['balance']['M']
def test_generate_all_from_last_month(aws):
from core.crons.monthly_report import generate_all_reports
generate_all_reports()
this_month_first = datetime.utcnow().date().replace(day=1)
last_month_first = (this_month_first - timedelta(days=1)).replace(day=1)
assert len(aws.dynamo['monthly_report']) == 6
key = str(datetime.combine(last_month_first, d_time.min).astimezone(pytz.utc).date())
assert aws.dynamo['monthly_report'][4]['from_date']['S'] == key
key2 = str(datetime.combine(last_month_first, d_time.min).astimezone(pytz.utc).date())
assert aws.dynamo['monthly_report'][5]['from_date']['S'] == key2
def test_api_get_project_report(client: FlaskClient, aws, admin_login: dict):
this_month_first = datetime.utcnow().date().replace(day=1)
future_month = (this_month_first + timedelta(days=31)).replace(day=1)
last_month_first = (this_month_first - timedelta(days=1)).replace(day=1)
prior_to_last_month_first = (last_month_first - timedelta(days=1)).replace(day=1)
three_months_ago = (last_month_first - timedelta(days=1)).replace(day=1)
resp = client.get(endpoint('/reports/%s-%s' % (project.id, prior_to_last_month_first)), headers=admin_login)
assert resp.status_code == 404
resp = client.get(endpoint('/reports/%s-%s' % (project.id, three_months_ago)), headers=admin_login)
assert resp.status_code == 404
resp = client.get(endpoint('/reports/%s-%s' % (project.id, last_month_first)), headers=admin_login)
assert resp.status_code == 200
resp = client.get(endpoint('/reports/%s-%s' % (project.id, this_month_first)), headers=admin_login)
assert resp.status_code == 200
resp = client.get(endpoint('/reports/%s-%s' % (project.id, future_month)), headers=admin_login)
assert resp.status_code == 404
def test_user_email_sent():
assert len(resources.mails) == 3, 'it should have sent every user with access an email for each report generated'
assert len(resources.requests) == 3
def test_get_reports_by_projects(client: FlaskClient, admin_login):
resp = client.get(endpoint('/reports?project_id=%s' % project.id), headers=admin_login)
assert resp.status_code == 200
assert 'items' in resp.json
assert len(resp.json['items']) == 2
```
#### File: api/tests/test_html_to_pdf.py
```python
from base64 import b64encode
from flask.testing import FlaskClient
from tests import endpoint
def test_api_get_project_report(client: FlaskClient, admin_login: dict):
from urllib.parse import quote
resp = client.post(endpoint('/to-pdf'), json={})
assert resp.status_code == 401
assert resp.json['error'] == 'Token is missing!'
resp = client.post(endpoint('/to-pdf'), json={}, headers=admin_login)
assert resp.status_code == 400
assert resp.json['error'] == 'Missing necessary arguments'
data = {
'html': b64encode(quote('<div><h1>Hello</h1></div>').encode()).decode(),
'styles': b64encode(quote('<style>h1 {color: red;}</style>').encode()).decode(),
'extra_css': [b64encode(
quote('https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css').encode()
).decode()],
'filename': 'filename'
}
resp = client.post(endpoint('/to-pdf'), json=data, headers=admin_login)
assert resp.status_code == 200
assert isinstance(resp.data, bytes)
assert resp.content_type == 'application/pdf'
assert resp.headers['Content-Disposition'] == "attachment; filename=filename.pdf"
```
#### File: api/tests/test_sessions.py
```python
import io
from base64 import b64encode, b64decode
from sqlalchemy.exc import OperationalError
import pytest
from tests import endpoint
def test_install(no_db_client):
from dal.models import User
from dal.models import CompanyProfile
rv = no_db_client.get(endpoint('/user'))
assert rv.json['error'] == 'install'
assert rv.status_code == 501
with pytest.raises(OperationalError) as ex:
User.query.count()
assert 'no such table' in ex.value
no_db_client.get('/install')
post = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'first_name': 'John',
'last_name': 'Smith',
'company_name': 'Green CRN',
'address': '1500 Sample St. Sunnyside CA 98996',
'contact': '5555555555',
'logo': (io.BytesIO(b'12345asdfg'), 'test.png'),
}
rv = no_db_client.post('/install', data=post, content_type='multipart/form-data')
assert b'Redirecting' in rv.data
u = User.query.all()
assert len(u) == 1
assert u[0].email == '<EMAIL>'
c = CompanyProfile.query.all()
assert len(c) == 1
assert c[0].name == 'Green CRN'
assert isinstance(c[0].logo, bytes)
def test_fetch_company(no_db_client):
resp = no_db_client.get(endpoint('/company'))
assert 'name' in resp.json
assert 'logo' in resp.json
assert resp.json['logo'] is not None
assert isinstance(resp.json['logo'], str)
assert isinstance(b64decode(resp.json['logo']), bytes)
def test_no_session(no_db_client):
rv = no_db_client.get(endpoint('/user'))
assert rv.json['error'] == 'no session'
assert rv.status_code == 403
def test_login(no_db_client):
auth = {
'Authorization': 'Basic ' + b64encode(b'<EMAIL>' + b':' + b'master').decode()
}
rv = no_db_client.post(endpoint('/login'), headers=auth)
assert rv.json['user']['email'] == '<EMAIL>'
assert 'value' in rv.json['token']
assert rv.status_code == 200
```
#### File: api/tests/test_widgets.py
```python
from flask.testing import FlaskClient
from tests import endpoint
def test_widgets_return_models(client: FlaskClient, admin_login: dict):
resp = client.get(endpoint('/widgets'))
assert resp.status_code == 401
assert resp.json['error'] == 'Token is missing!'
resp = client.get(endpoint('/widgets'), headers=admin_login)
assert resp.status_code == 200
assert isinstance(resp.json, list)
assert len(resp.json) == 9
assert resp.json[0]['class'] == 'dal.models.Project'
assert len(resp.json[0]['fields']) == 5
assert len(resp.json[0]['relationships']) == 1
assert resp.json[0]['relationships'][0]['class'] == 'dal.models.Room'
assert resp.json[0]['relationships'][0]['name'] == 'rooms'
def test_widget_create_widget(client: FlaskClient, admin_login: dict):
from dal.models import CompanyProfile
from dal.models import User
resp = client.post(endpoint('/widgets'), json={}, headers=admin_login)
assert resp.status_code == 400
assert 'description' in resp.json['error']
assert 'name' in resp.json['error']
assert 'schema' in resp.json['error']
schema = {
'name': 'Something else',
'description': 'A description to show',
'schema': []
}
resp = client.post(endpoint('/widgets'), json=schema, headers=admin_login)
assert resp.status_code == 400
assert resp.json['error']['name'] == 'Name may consists of letters, dashes and underscores'
schema = {
'name': 'schema_name',
'description': 'A description to show',
'schema': {
'model': 'dal.models.Balance',
'conditions': [{'AND': [{'column': 'dal.models.Balance.due_date', 'value': 'today', 'comparator': 'lt'}]}],
'limit': 1,
'order_dir': 'desc',
'order_by': 'dal.models.Balance.due_date',
'fields': [
'dal.models.Balance.balance',
'dal.models.Balance.due_date',
'dal.models.Tenant.last_name',
'dal.models.RentalAgreement.id',
'dal.models.Room.name'
],
'relationships': [
'dal.models.RentalAgreement', 'dal.models.TenantHistory', 'dal.models.Tenant',
'dal.models.Room'
]
}
}
resp = client.post(endpoint('/widgets'), json=schema, headers=admin_login)
assert resp.status_code == 200
c = CompanyProfile.query.first()
assert 'widgets' in c.settings
assert 'schema_name' in c.settings['widgets']
assert c.settings['widgets']['schema_name']['name'] == 'schema_name'
assert c.settings['widgets']['schema_name']['description'] == 'A description to show'
assert c.settings['widgets']['schema_name']['schema']['model'] == 'dal.models.Balance'
schema2 = {
'name': 'new_users',
'private': True,
'description': 'A 2ns description',
'schema': {
'model': 'dal.models.User',
'conditions': [{'AND': [{'column': 'dal.models.User.created_at', 'value': 'today', 'comparator': 'le'}]}],
'fields': ['dal.models.User.id', 'dal.models.User.first_name', 'dal.models.User.last_name']
}
}
resp = client.post(endpoint('/widgets'), json=schema2, headers=admin_login)
assert resp.status_code == 200
admin = User.query.filter_by(email='<EMAIL>').first()
assert 'widgets' in admin.attributes.preferences
assert 'new_users' in admin.attributes.preferences['widgets']
assert admin.attributes.preferences['widgets']['new_users']['name'] == 'new_users'
assert admin.attributes.preferences['widgets']['new_users']['description'] == 'A 2ns description'
assert admin.attributes.preferences['widgets']['new_users']['schema']['model'] == 'dal.models.User'
def test_run_widget(client: FlaskClient, admin_login: dict):
resp = client.get(endpoint('/widgets/dont-exist'), headers=admin_login)
assert resp.status_code == 404
resp = client.get(endpoint('/widgets/schema_name'), headers=admin_login)
assert resp.status_code == 200
assert type(resp.json) == list
resp = client.get(endpoint('/widgets/new_users?type=private'), headers=admin_login)
assert resp.status_code == 200
assert type(resp.json) == list
assert len(resp.json) == 1
```
#### File: api/views/users.py
```python
import datetime
import re
from flask_socketio import emit
import sqlalchemy
from flask import session, json, current_app, render_template, url_for, request
from sqlalchemy.orm import joinedload
from core import Cache, API
from core.middleware import HttpException
from core.messages import send_message
from core.router import permissions
from dal.shared import get_fillable, token_required, access_required, Paginator, system_call
from dal.models import User, db, Role, UserToken, UserAttributes, UserMessage
from flask_mail import Message
from views import Result
ACTION_RESET_PW = '<PASSWORD>-password'
class Users(API):
def get(self):
if 'logged_in' in session:
try:
user = User.query.filter_by(email=session['user_email']).first()
except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
return Result.error('install', 501)
if user:
return user_to_dict(user)
else:
try:
# the second param is a function that would raise exception
# if table not exist we cache it to avoid multiple
# executions when a user is just logged out.
Cache.remember('users.count', User.query.count, 24 * 60 * 60)
except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
return Result.error('install', 501)
return Result.error('no session', 403)
@token_required
def put(self):
user = request.user
raw_data = request.get_json()
if 'first_name' in raw_data:
user.first_name = raw_data['first_name']
if 'last_name' in raw_data:
user.last_name = raw_data['last_name']
access = user.attributes.access
preferences = user.attributes.preferences
if raw_data['attributes']:
if 'access' in raw_data['attributes']:
user.attributes.user_access = json.dumps({**access, **raw_data['attributes']['access']})
if 'preferences' in raw_data['attributes']:
user.attributes.user_preferences = json.dumps({**preferences, **raw_data['attributes']['preferences']})
db.session.commit()
emit('USER_WS_CHANGED', {'data': user.id}, namespace='/' + str(user.id), broadcast=True)
return Result.success()
class UsersManager(API):
@token_required
@access_required
def get(self):
page = request.args.get('page', 1)
total_pages = 1
q = request.args.get('query')
if q:
users = User.query.filter(
(User.first_name.like('%' + q + '%')) |
(User.last_name.like('%' + q + '%')) |
(User.email.like('%' + q + '%'))
).all()
else:
paginator = Paginator(User.query, int(page), request.args.get('orderBy'), request.args.get('orderDir'))
total_pages = paginator.total_pages
users = paginator.get_result()
user_list = list(map(lambda user: {
'first_name': user.first_name,
'last_name': user.last_name,
'name': user.first_name + ' ' + user.last_name,
'id': user.id,
'email': user.email,
'attributes': get_user_attr(user),
'roles': list(map(lambda r: {
'name': r.name,
'id': r.id
}, user.roles))
}, users))
return Result.paginate(user_list, page, total_pages)
@token_required
@access_required
def post(self):
raw_data = request.get_json()
user_data = get_fillable(User, **raw_data)
if 'email' in user_data:
user_data['email'] = user_data['email'].lower()
user = User(**user_data)
if 'password' in raw_data:
user.hash_password()
if raw_data['roles']:
for role in Role.query.filter(Role.id.in_(raw_data['roles'])):
user.roles.append(role)
user.attributes = UserAttributes()
if raw_data['attributes'] and 'access' in raw_data['attributes']:
user.attributes.user_access = json.dumps(raw_data['attributes']['access'])
if raw_data['attributes'] and 'preferences' in raw_data['attributes']:
user.attributes.user_preferences = json.dumps(raw_data['attributes']['preferences'])
db.session.add(user)
db.session.commit()
if not user.password:
send_user_token_email(user, 'Verifica Tu Cuenta', 'email/account_activate.html')
return dict(id=user.id)
@token_required
@access_required
def put(self, user_id):
raw_data = request.get_json()
user = User.query.options(joinedload('roles')).filter_by(id=user_id).first()
if not user:
return Result.error('User does not exist')
user.first_name = raw_data['first_name']
user.last_name = raw_data['last_name']
user.roles = []
if raw_data['attributes']:
if raw_data['attributes'] and 'access' in raw_data['attributes']:
user.attributes.user_access = json.dumps(raw_data['attributes']['access'])
if raw_data['attributes'] and 'preferences' in raw_data['attributes']:
user.attributes.user_preferences = json.dumps(raw_data['attributes']['preferences'])
if raw_data['roles']:
for role in Role.query.filter(Role.id.in_(
list(map(
lambda r: r['id'], raw_data['roles'])
))):
user.roles.append(role)
db.session.commit()
emit('USER_WS_CHANGED', {'data': user.id}, namespace='/' + str(user.id), broadcast=True)
return Result.success()
@token_required
@access_required
def delete(self, user_id):
user = User.query.options(joinedload('roles')).filter_by(id=user_id).first()
user.roles = []
db.session.delete(user)
db.session.commit()
return Result.success()
class Session(API):
def post(self):
auth = request.authorization
if not auth or not auth.username or not auth.password:
return Result.error('Could not verify')
user = User.query.filter_by(email=auth.username.lower()).first()
if not user:
return Result.error('Could not verify')
if user.password_correct(auth.password):
session['logged_in'] = True
session['user_email'] = user.email
return user_to_dict(user)
return Result.error('Could not verify')
def delete(self):
if 'logged_in' in session:
session.pop('logged_in')
session.pop('user_email')
return {}
return Result.error('no session', 401)
class Roles(API):
@token_required
@access_required
def post(self):
role = request.get_json()
if not role:
return Result.error('name is required')
current = Role.query.filter_by(name=role).count()
if current > 0:
return Result.error('name already in used')
role = Role(name=role.title())
db.session.add(role)
db.session.commit()
return {
'id': role.id,
'name': role.name,
'permissions': role.permissions
}
@token_required
@access_required
def get(self):
roles = Role.query.all()
data = []
for role in roles:
data.append({
'id': role.id,
'name': role.name,
'permissions': role.get_permissions
})
return data
@token_required
@access_required
def put(self):
data = request.get_json()
role = Role.query.filter_by(id=data['id']).first()
role.permissions = json.dumps(data['permissions'])
db.session.commit()
emit('ROLE_WS_CHANGED', {'data': role.name}, namespace='/' + role.name, broadcast=True)
return Result.success()
@token_required
@access_required
def delete(self):
role_id = request.get_json()
try:
Role.query.filter_by(id=role_id).delete()
db.session.commit()
except sqlalchemy.exc.IntegrityError as e:
return Result.error('integrity constraint', 409)
return Result.success()
class Permissions(API):
@token_required
def get(self):
return permissions
class UserTokens(API):
def get(self, user_token):
jwt = UserToken.query.filter_by(token=user_token).first()
time = datetime.datetime.utcnow()
if jwt and jwt.expires > time:
return Result.custom({'isValid': True}, 200)
return Result.custom({'isValid': False}, 400)
class Activate(API):
def post(self):
data = request.get_json()
ut = UserToken.query.filter_by(token=data['token']).first()
if not ut or ut.expires <= datetime.datetime.utcnow():
raise HttpException('Invalid token')
if ut.target != request.base_url:
raise HttpException('Invalid target')
parsed = r'^(?=.*\d)(?=.*[a-zA-Z])(?=.*[!@#$%^&*(),.?":{}|<>])'
if len(data['pw']) < 6 or not re.match(parsed, data['pw']) or data['pw'] != data['pw2']:
raise HttpException('Invalid password')
user = ut.user
user.password = data['pw']
user.hash_password()
ut.expires = datetime.datetime.utcnow()
db.session.commit()
return Result.success()
class UserPasswords(API):
def put(self):
data = request.get_json()
if data is None or 'email' not in data:
raise HttpException('Missing email')
user = User.query.filter_by(email=data['email']).first()
if user is not None:
send_user_token_email(user, 'Actualiza tu contraseña', 'email/change_password.html')
# for security reasons, even if user does not exist, we return a success call
return Result.success()
def send_user_token_email(user: User, mail_subject, template):
ut = UserToken(target=request.host_url.rstrip('/') + url_for('user_activate_url'))
ut.new_token(user.email)
user.tokens.append(ut)
db.session.commit()
msg = Message(mail_subject, recipients=[user.email])
msg.html = render_template(
template,
name=user.first_name,
url=request.host_url,
token='account/activate/' + ut.token
)
current_app.mail(msg)
class Messages(API):
@token_required
def get(self):
total_unread = UserMessage.query.filter_by(user_id=request.user.id, read=False).count()
page = request.args.get('page', 1)
paginator = Paginator(
UserMessage.query.filter_by(user_id=request.user.id),
int(page),
request.args.get('orderBy', 'date'),
request.args.get('orderDir', 'desc')
)
total_pages = paginator.total_pages
return Result.custom(
{'list': paginator.get_items(), 'page': page, 'total_pages': total_pages, 'total_unread': total_unread}
)
@system_call
def post(self):
send_message(**request.json)
return Result.success()
def put(self, message_id):
message = UserMessage.query.filter_by(id=message_id).first()
message.read = True
db.session.commit()
return Result.success()
class Audit(API):
@token_required
@access_required
def get(self):
pass
def get_user_attr(user: User):
return {
'preferences': json.loads(
user.attributes.user_preferences
) if hasattr(user.attributes, 'user_preferences') else {},
'access': json.loads(
user.attributes.user_access
) if hasattr(user.attributes, 'user_access') else {}
}
def user_to_dict(user: User) -> dict:
return {
'user': {
'email': user.email,
'id': user.id,
'first_name': user.first_name,
'last_name': user.last_name,
'roles':
list(map(lambda r: {
'name': r.name,
'id': r.id,
'permissions': r.get_permissions
}, user.roles)),
'attributes': get_user_attr(user)
},
'token': user.get_token()
}
```
|
{
"source": "Jcuperus/code_assessment",
"score": 2
}
|
#### File: feedback_app/assessment/helpers.py
```python
import subprocess
from django.core.files.uploadedfile import TemporaryUploadedFile
from .wrappers import CliToolWrapper
from feedback_app.models import Assessment, SourceFile
def get_tmp_file_paths(files):
"""Returns a list of paths to temporarily stored files
Arguments:
files {List<TemporaryUploadFile>} -- List containing uploaded files
Returns:
List<str> -- List of paths to given files
"""
tmp_file_paths = []
for file in files:
if isinstance(file, TemporaryUploadedFile):
tmp_file_paths.append(file.temporary_file_path())
return tmp_file_paths
def assess(files):
"""Runs an assessment for all provided files for all relevant testing tools
Arguments:
files {List<SourceFile>} -- Collection of assessed source files
Returns:
Assessment -- Assessment object containing all files and their respective errors
"""
testing_tools = [CliToolWrapper.factory('phpcs'), CliToolWrapper.factory('phpmd')]
assessment = Assessment.objects.create()
for file in files:
source_file = SourceFile.objects.create(assessment=assessment, name=file)
for tool in testing_tools:
tool.assess(source_file)
return assessment
```
|
{
"source": "jcupitt/argh-steroids",
"score": 3
}
|
#### File: jcupitt/argh-steroids/alien.py
```python
import os
import math
import random
import pygame
from pygame import mixer
import util
import sprite
import bullet
import ship
class Alien(sprite.Sprite):
def __init__(self, world):
super(Alien, self).__init__(world)
self.points = [[1, 0], [-1, 0], [-0.7, 0],
[-0.5, 0.2], [0.5, 0.2],
[0.7, 0],
[0.5, -0.4], [-0.5, -0.4],
[-0.7, 0]]
self.direction = random.randint(1, 2) * 2 - 3
self.position = [world.width / 2 - self.direction * world.width / 2,
random.randint(0, world.height)]
self.angle = 0
self.scale = 10
self.direction_timer = random.randint(10, 50)
self.random_velocity()
self.alien_sound = mixer.Sound(os.path.join("sounds", "alien_engine.ogg"))
self.alien_channel = pygame.mixer.Channel(3)
self.alien_channel.play(self.alien_sound, loops = -1)
def random_velocity(self):
self.velocity = [self.direction * (random.random() * 2 + 1),
random.random() * 6 - 3]
def update(self):
self.direction_timer -= 1
if self.direction_timer < 0:
self.direction_timer = random.randint(10, 50)
self.random_velocity()
if self.angle > 0:
self.angle -= 1
elif self.angle < 0:
self.angle += 1
if self.direction == 1 and self.position[0] > self.world.width - 10:
self.kill = True
elif self.direction == -1 and self.position[0] < 10:
self.kill = True
if self.kill:
self.alien_channel.fadeout(500)
super(Alien, self).update()
def impact(self, other):
self.angle = random.randint(-90, 90)
super(Alien, self).impact(other)
```
|
{
"source": "jcupitt/pillow-perf",
"score": 2
}
|
#### File: testsuite/cases/cv2.py
```python
from __future__ import print_function, unicode_literals, absolute_import
import cv2
from .base import BaseTestCase, root
try:
cv2.setNumThreads(1)
except AttributeError:
print('!!! You are using OpenCV which does not allow you to set '
'the number of threads')
class Cv2TestCase(BaseTestCase):
filter_ids = {
cv2.INTER_AREA: 'sup',
cv2.INTER_NEAREST: 'ner',
cv2.INTER_LINEAR: 'bil',
cv2.INTER_CUBIC: 'bic',
cv2.INTER_LANCZOS4: 'lzs4',
}
def create_test_data(self):
im = cv2.imread(root('resources', 'color_circle.png'),
flags=cv2.IMREAD_UNCHANGED)
if self.mode == 'RGB':
im = im[:, :, :3]
elif self.mode == 'RGBA':
pass
elif self.mode == 'L':
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
else:
raise ValueError('Unknown mode: {}'.format(self.mode))
# Fine for upscaling
im = cv2.resize(im, tuple(self.size), interpolation=cv2.INTER_CUBIC)
return [im]
```
|
{
"source": "jcuquemelle/tf-yarn",
"score": 2
}
|
#### File: tf-yarn/tests/test_env.py
```python
import pytest
from tf_yarn._env import (
gen_pyenv_from_existing_archive,
CONDA_CMD, CONDA_ENV_NAME
)
test_data = [
("/path/to/myenv.pex",
"./myenv.pex",
"myenv.pex"),
("/path/to/myenv.zip",
f"{CONDA_CMD}",
CONDA_ENV_NAME)
]
@pytest.mark.parametrize(
"path_to_archive,expected_cmd, expected_dest_path",
test_data)
def test_gen_pyenvs_from_existing_env(path_to_archive, expected_cmd,
expected_dest_path):
result = gen_pyenv_from_existing_archive(path_to_archive)
assert result.path_to_archive == path_to_archive
assert result.dispatch_task_cmd == expected_cmd
assert result.dest_path == expected_dest_path
def test_gen_pyenvs_from_unknown_format():
with pytest.raises(ValueError):
gen_pyenv_from_existing_archive("/path/to/pack.tar.bz2")
```
#### File: tf-yarn/tests/test_evaluator_task.py
```python
import os
from unittest import mock
from unittest.mock import ANY
import pytest
import tensorflow as tf
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow_estimator.python.estimator.training import EvalSpec
from tf_yarn.experiment import Experiment
from tf_yarn.tasks import evaluator_task
from tf_yarn.tasks.evaluator_task import _get_step
checkpoints = {
"/path/to/model/dir/model.ckpt-0",
"/path/to/model/dir/model.ckpt-100",
"/path/to/model/dir/model.ckpt-200",
"/path/to/model/dir/model.ckpt-300"
}
@pytest.mark.parametrize("evaluated_ckpts,ckpt_to_export", [
({"/path/to/model/dir/model.ckpt-0", "/path/to/model/dir/model.ckpt-100",
"/path/to/model/dir/model.ckpt-200"}, {"/path/to/model/dir/model.ckpt-300"}),
(set(), {"/path/to/model/dir/model.ckpt-0", "/path/to/model/dir/model.ckpt-100",
"/path/to/model/dir/model.ckpt-200", "/path/to/model/dir/model.ckpt-300"}),
({"/path/to/model/dir/model.ckpt-0", "/path/to/model/dir/model.ckpt-100",
"/path/to/model/dir/model.ckpt-200", "/path/to/model/dir/model.ckpt-300"}, {})
])
def test_evaluate(evaluated_ckpts, ckpt_to_export):
with mock.patch('tf_yarn._task_commons._get_experiment') as experiment_mock, \
mock.patch('tf_yarn.tasks.evaluator_task._get_evaluated_checkpoint') \
as _get_evaluated_checkpoint, \
mock.patch('tf_yarn.tasks.evaluator_task._get_all_checkpoints') \
as _get_checkpoints, \
mock.patch('tf_yarn.tasks.evaluator_task.tf.io.gfile.exists') as exists_mock, \
mock.patch('tf_yarn.tasks.evaluator_task.tf.io.gfile.listdir') as listdir_mock:
exists_mock.side_effect = lambda *args, **kwargs: True
listdir_mock.side_effect = lambda *args, **kwargs: evaluated_ckpts
mock_exporter = mock.Mock(spec=tf.estimator.Exporter)
mock_exporter.name = "my_best_exporter"
mock_experiment = mock.Mock(spec=Experiment)
mock_experiment.eval_spec = EvalSpec(
mock.Mock(),
exporters=mock_exporter,
start_delay_secs=0,
throttle_secs=0
)
mock_experiment.estimator.evaluate.side_effect = \
lambda *args, **kwargs: {tf.compat.v1.GraphKeys.GLOBAL_STEP: 300}
mock_experiment.estimator.model_dir = "model_dir"
mock_experiment.train_spec.max_steps = 300
experiment_mock.side_effect = lambda client: mock_experiment
_get_evaluated_checkpoint.side_effect = lambda eval_dir: set(
[_get_step(ckpt) for ckpt in evaluated_ckpts]
)
_get_checkpoints.side_effect = lambda model_dir: list(checkpoints)
evaluator_task.evaluate(mock_experiment)
assert len(mock_exporter.export.call_args_list) == len(ckpt_to_export)
assert len(mock_experiment.estimator.evaluate.call_args_list) == len(ckpt_to_export)
export_path = os.path.join(mock_experiment.estimator.model_dir, mock_exporter.name)
if len(ckpt_to_export) > 0:
for ckpt in ckpt_to_export:
mock_exporter.export.assert_any_call(ANY, export_path, ckpt, ANY, ANY)
mock_experiment.estimator.evaluate(
ANY, steps=ANY, hooks=ANY, name=ANY, checkpoint_path=ckpt
)
@pytest.mark.parametrize("checkpoint_state,checkpoints", [
(CheckpointState(all_model_checkpoint_paths=["/path/to/model/dir/model.ckpt-300"]),
["/path/to/model/dir/model.ckpt-300"]),
(None, []),
])
def test__get_all_checkpoints(checkpoint_state, checkpoints):
with mock.patch("tf_yarn.tasks.evaluator_task.tf.train.get_checkpoint_state"
) as get_checkpoint_state_mock:
get_checkpoint_state_mock.side_effect = lambda *args, **kwargs: checkpoint_state
assert evaluator_task._get_all_checkpoints("dir") == checkpoints
```
#### File: tf-yarn/tf_yarn/_env.py
```python
import os
from typing import (
Optional,
NamedTuple
)
import cluster_pack
from tf_yarn import topologies
class PythonEnvDescription(NamedTuple):
path_to_archive: str
dispatch_task_cmd: str
dest_path: str
INDEPENDENT_WORKERS_MODULE = "tf_yarn.tasks._independent_workers_task"
TENSORBOARD_MODULE = "tf_yarn.tasks._tensorboard_task"
CONDA_ENV_NAME = "pyenv"
CONDA_CMD = f"{CONDA_ENV_NAME}/bin/python"
def gen_pyenv_from_existing_archive(path_to_archive: str) -> PythonEnvDescription:
archive_filename = os.path.basename(path_to_archive)
packer = cluster_pack.detect_packer_from_file(path_to_archive)
if packer == cluster_pack.PEX_PACKER:
return PythonEnvDescription(
path_to_archive,
f"./{archive_filename}",
archive_filename)
elif packer == cluster_pack.CONDA_PACKER:
return PythonEnvDescription(
path_to_archive,
f"{CONDA_CMD}", CONDA_ENV_NAME)
else:
raise ValueError("Archive format unsupported. Must be .pex or conda .zip")
def gen_task_cmd(pyenv: PythonEnvDescription,
task_type: str,
custom_task_module: Optional[str]) -> str:
if task_type == "tensorboard":
containers_module = TENSORBOARD_MODULE
elif task_type in topologies.ALL_TASK_TYPES:
if custom_task_module:
containers_module = custom_task_module
else:
containers_module = INDEPENDENT_WORKERS_MODULE
else:
raise ValueError(f"Invalid task type: {task_type}")
return f"{pyenv.dispatch_task_cmd} -m {containers_module} "
```
|
{
"source": "jcurfman/Python-Practice",
"score": 4
}
|
#### File: jcurfman/Python-Practice/TurnBasedBattle.py
```python
class Turn_Battle():
#Start up
def __init__(self):
print("You've been drawn into battle!")
choice=int(input("Would you like to fight? '1':Yes, '2':No-> "))
if choice==1:
self.battlecore(30,14,3,30,14,3)
elif choice==2:
print("You ran away!")
exit()
elif choice==3:
print("Opening developer mode.")
self.stat_entry()
else:
print("What?")
self.__init__()
#Value Entry
def stat_entry(self):
print("Welcome to developer mode. Please enter stats for this battle.")
pc_HP_max=int(input("Player character's max HP: "))
cpu_HP_max=int(input("Computer character's max HP: "))
pc_MP_max=int(input("Player character's max MP: "))
cpu_MP_max=int(input("Computer character's max MP: "))
pc_attack=int(input("Player character's base attack: "))
cpu_attack=int(input("Computer character's base attack: "))
print("Starting battle.")
self.battlecore(pc_HP_max,pc_MP_max,pc_attack,cpu_HP_max,cpu_MP_max,cpu_attack)
#Core battle system
def battlecore(self,pc_HP_max,pc_MP_max,pc_attack,cpu_HP_max,cpu_MP_max,cpu_attack):
pc_HP=pc_HP_max
pc_MP=pc_MP_max
cpu_HP=cpu_HP_max
cpu_MP=cpu_MP_max
while pc_HP>0 and cpu_HP>0:
self.graphics(pc_HP,pc_MP,pc_attack,cpu_HP,cpu_MP,cpu_attack)
print("1:Attack 2:Magic 3:Item")
choice=int(input("-> "))
if choice==1:
cpu_HP-=pc_attack
print("You hit the monster!")
elif choice==2:
print("1:Fira (2MP) 2:Cura (4MP)")
m_choice=int(input("-> "))
if m_choice==1:
if pc_MP>=2:
print("You've used Fira!")
pc_MP-=2
cpu_HP-=(2*pc_attack)
else:
print("You don't have enough MP.")
elif m_choice==2:
if pc_MP>=4:
print("You've used Cura.")
pc_MP-=4
pc_HP+=10
if pc_HP>pc_HP_max:
pc_HP=pc_HP_max
else:
print("You don't have enough MP.")
else:
"What?"
elif choice==3:
print("1:Potion")
i_choice=int(input("-> "))
if i_choice==1:
print("You used a potion.")
pc_HP+=4
else:
print("What?")
else:
print("What?")
if cpu_HP>0:
notice=int(input("It is now the enemy turn. Please press 1."))
from random import randint
cpu_choice=randint(1,4)
print(cpu_choice)
if cpu_choice<4:
print("The monster hit you!")
pc_HP-=cpu_attack
elif cpu_choice==4:
cpu_choice_m=randint(1,3)
if cpu_choice_m<=2:
if cpu_MP>=2:
print("The monster used Fira! ")
cpu_MP-=2
pc_HP-=(2*cpu_attack)
elif cpu_choice_m==3:
if cpu_MP>=4:
print("The monster used Cura.")
cpu_MP-=4
cpu_HP+=10
if cpu_HP>cpu_HP_max:
cpu_HP=cpu_HP_max
if cpu_HP==0:
print("You've defeated the monster!")
elif pc_HP==0:
print("You have been defeated.")
#Core graphic system
def graphics(self,pc_HP,pc_MP,pc_attack,cpu_HP,cpu_MP,cpu_attack):
print("Monster:")
cpu_healthbar="".join(["|"]*cpu_HP)
print("HP: "+cpu_healthbar+" "+str(cpu_HP))
cpu_manabar="".join(["|"]*cpu_MP)
#print("MP: "+cpu_manabar+" "+str(cpu_MP))
#print("Base Attack: "+str(cpu_attack)) Don't print these- player doesn't need to know monster stats
print("\n /-/--\ ")
print(" (@~@) )/\ ")
print(" ___/-- \ |")
print(" (oo)__ _ )_/")
print(" ^^___/ \ ")
print(" \ |/-\ ")
print(" ( ) |")
print(" | \_/ ")
print("\n\nPlayer Character Stats:")
pc_healthbar="".join(["|"]*pc_HP)
pc_manabar="".join(["|"]*pc_MP)
print("HP: "+pc_healthbar+" "+str(pc_HP))
print("MP: "+pc_manabar+" "+str(pc_MP))
print("Base attack: "+str(pc_attack))
game = Turn_Battle()
```
|
{
"source": "J-Curwell/golf-tracker",
"score": 3
}
|
#### File: src/golf_tracker/main.py
```python
import argparse
import os
from golf_tracker.tracker import Shot, Hole, Round
class EndRound(Exception):
pass
# TODO:
# - Dont ask about distance for driver/tee shots?
# - Change connection for putting to be putter-specific?
def validated_input(input_name: str):
# Used instead of the built in input() function
user_input = input(input_name)
if user_input not in ('exit', 'quit', 'q'):
return user_input
else:
raise EndRound()
def play_golf(save_path: str = None):
if not save_path:
# Default path is in the root of the repo, in a 'saved_rounds/' directory
current_dir = os.path.abspath(os.path.dirname(__file__))
repo_root = current_dir.split('/src/golf_tracker')[0]
save_path = f'{repo_root}/saved_rounds/'
print("Lets play golf!\n\n"
"Type 'exit', 'quit' or 'q' at any time to end the round.\n\n"
"What golf course are you playing?\n")
try:
course_name = validated_input('Course: ')
except EndRound:
print('\n0 holes played.')
print(f"\nSaving round in {save_path}...\n")
round_to_save = Round(holes=[])
round_to_save.save_round(save_path)
print(f"Round saved.")
return round_to_save
holes = []
hole_number = 1
shots = []
shot_number = 1
try:
# Record each hole until the user finishes their round
while True:
print(f"\nHole {hole_number}:\n"
f"-------\n"
f"\nShot {shot_number}:\n")
shot = record_shot(shot_number)
shots.append(shot)
shot_number += 1
if shot.holed is True:
hole = Hole(hole_number=hole_number, shots=shots)
holes.append(hole)
shots = []
shot_number = 1
if hole_number % 9 == 0:
print(f'\n----------------------------'
f'\n{hole_number} holes played, continue?:'
f'\n----------------------------\n')
user_input = {'y': True, 'n': False}[validated_input('Continue? y/n: ')]
if user_input is False:
raise EndRound
if hole_number == 18:
raise EndRound
hole_number += 1
except EndRound:
number_of_holes = len(holes)
print(f'\n{number_of_holes} holes played.')
print(f'\nSaving round in {save_path}...\n')
round_to_save = Round(holes=holes, course=course_name)
round_to_save.save_round(save_path)
print('Round saved.')
return round_to_save
def record_shot(shot_number):
print("What club did you use?")
club = validated_input('Club: ')
print("\nDid it go in the hole?!")
holed = {'y': True, 'n': False}[validated_input('Holed? y/n: ')]
if holed is False:
print("\nHow was the distance?")
distance = validated_input('Long, Short or Correct?: ')
print("\nHow was the direction?")
direction = validated_input('Left, Right or Centre?: ')
else:
if shot_number == 1:
print('\nHole in One!')
print('\nNailed it.')
distance = 'correct'
direction = 'correct'
print('\nHow was the connection?')
connection = validated_input('Pure, Fat, Thin, Toe or Heel?: ')
flight = None
if club.lower() != 'putter':
print('\nHow was the flight?')
flight = validated_input('Hook, Draw, Slice, Fade or Straight?: ')
return Shot(club=club,
distance=distance,
direction=direction,
connection=connection,
flight=flight,
holed=holed)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--save-dir')
args = parser.parse_args()
_round = play_golf(save_path=args.save_dir)
```
|
{
"source": "J-Curwell/portfolio-manager",
"score": 3
}
|
#### File: portfolio-manager/tests/test_return_calculators.py
```python
import unittest
from datetime import datetime
from unittest.mock import MagicMock
from numpy_financial import irr
from portfolio_manager.exceptions import InsufficientData
from portfolio_manager.portfolio import InvestmentPortfolio
from portfolio_manager.return_calculators import (TimeWeightedReturnCalculator,
ReturnCalculator,
SimpleReturnCalculator,
MoneyWeightedReturnCalculator)
class ReturnCalculatorsTests(unittest.TestCase):
def setUp(self) -> None:
self.test_portfolio = InvestmentPortfolio(name="test_portfolio")
def test_calculate_annualised_return(self):
# Test a portfolio age of < 1
mock_rc = MagicMock()
mock_rc._get_portfolio_age.return_value = 0.9
actual = ReturnCalculator.calculate_annualised_return(mock_rc, 'test', 13)
mock_rc._get_portfolio_age.assert_called_once_with('test')
expected = 14.54
self.assertEqual(expected, actual)
# Test a portfolio age of 1
mock_rc = MagicMock()
mock_rc._get_portfolio_age.return_value = 1
actual = ReturnCalculator.calculate_annualised_return(mock_rc, 'test', -11)
mock_rc._get_portfolio_age.assert_called_once_with('test')
expected = -11
self.assertEqual(expected, actual)
# Test a portfolio age of > 1
mock_rc = MagicMock()
mock_rc._get_portfolio_age.return_value = 2.5
actual = ReturnCalculator.calculate_annualised_return(mock_rc, 'test', 120)
mock_rc._get_portfolio_age.assert_called_once_with('test')
expected = 37.08
self.assertEqual(expected, actual)
def test_get_portfolio_age_shorter(self):
# Test a portfolio of age < 1 year
test_data = [
{
'date': datetime(2020, 7, 1)
},
{
'date': datetime(2021, 1, 1),
}
]
self.test_portfolio.portfolio_history = test_data
actual = ReturnCalculator._get_portfolio_age(self.test_portfolio)
self.assertEqual(0.5, round(actual, 2))
def test_get_portfolio_age_medium(self):
# Test a portfolio of age 1 year
test_data = [
{
'date': datetime(2020, 7, 1)
},
{
'date': datetime(2021, 1, 1),
},
{
'date': datetime(2021, 7, 1)
}
]
self.test_portfolio.portfolio_history = test_data
actual = ReturnCalculator._get_portfolio_age(self.test_portfolio)
self.assertEqual(1, round(actual, 2))
def test_get_portfolio_age_longer(self):
# Test a portfolio of age > 1 year
test_data = [
{
'date': datetime(2020, 7, 1)
},
{
'date': datetime(2021, 1, 1),
},
{
'date': datetime(2021, 1, 1),
},
{
'date': datetime(2022, 9, 1)
}
]
self.test_portfolio.portfolio_history = test_data
actual = ReturnCalculator._get_portfolio_age(self.test_portfolio)
self.assertEqual(2.17, round(actual, 2))
class SimpleReturnCalculatorTests(unittest.TestCase):
def setUp(self) -> None:
self.test_portfolio = InvestmentPortfolio(name="test_portfolio")
self.simple_return_calculator = SimpleReturnCalculator()
def test_calculate_return_no_portfolio_data(self):
# Test that one or less transaction results in an error
with self.assertRaises(InsufficientData):
self.simple_return_calculator.calculate_return(self.test_portfolio,
annualised=False)
def test_calculate_return_negative_deposited(self):
# This is an example of how a real portfolio could have a negative total
# deposited value
test_data = [
{
# Deposit £100
'date': datetime(2021, 1, 1),
'total_deposited': 100,
'current_portfolio_value': 100,
'transaction_type': 'deposit'
},
{
# Value increases to £110
'date': datetime(2021, 1, 2),
'total_deposited': 100,
'current_portfolio_value': 110,
'transaction_type': 'update_portfolio_value'
},
{
# Withdraw £105, leaving £5 in the portfolio with total deposited = -£5
'date': datetime(2021, 1, 2),
'total_deposited': -5,
'current_portfolio_value': 5,
'transaction_type': 'update_portfolio_value'
}
]
# Test that negative total deposited raises an error
self.test_portfolio.portfolio_history = test_data
self.test_portfolio.total_deposited = -5
with self.assertRaises(ValueError):
self.simple_return_calculator.calculate_return(self.test_portfolio,
annualised=False)
def test_calculate_return_zero_deposited(self):
# This is an example of how a real portfolio could have a total deposited value
# of zero
test_data = [
{
# Deposit £100
'date': datetime(2021, 1, 1),
'total_deposited': 100,
'current_portfolio_value': 100,
'transaction_type': 'deposit'
},
{
# Value increases to £110
'date': datetime(2021, 1, 2),
'total_deposited': 100,
'current_portfolio_value': 110,
'transaction_type': 'update_portfolio_value'
},
{
# Withdraw £100, leaving £10 in the portfolio with total deposited = £0
'date': datetime(2021, 1, 2),
'total_deposited': -5,
'current_portfolio_value': 5,
'transaction_type': 'update_portfolio_value'
}
]
# Test that zero total deposited raises an error
self.test_portfolio.portfolio_history = test_data
self.test_portfolio.total_deposited = 0
with self.assertRaises(ValueError):
self.simple_return_calculator.calculate_return(self.test_portfolio,
annualised=False)
def test_calculate_return_positive_return(self):
# Ensure the portfolio has non-empty history to prevent an error
test_data = ["I'm", "not", "empty"]
self.test_portfolio.portfolio_history = test_data
# Test 10% return
self.test_portfolio.total_deposited = 100
self.test_portfolio.current_portfolio_value = 110
actual = self.simple_return_calculator.calculate_return(self.test_portfolio,
annualised=False)
self.assertEqual(10, actual)
# Test 110% return
self.test_portfolio.total_deposited = 40
self.test_portfolio.current_portfolio_value = 84
actual = self.simple_return_calculator.calculate_return(self.test_portfolio,
annualised=False)
self.assertEqual(110, actual)
def test_calculate_return_negative_return(self):
# Ensure the portfolio has non-empty history to prevent an error
test_data = ["I'm", "not", "empty"]
self.test_portfolio.portfolio_history = test_data
# Test -10% return
self.test_portfolio.total_deposited = 100
self.test_portfolio.current_portfolio_value = 90
actual = self.simple_return_calculator.calculate_return(self.test_portfolio,
annualised=False)
self.assertEqual(-10, actual)
# Test -35% return
self.test_portfolio.total_deposited = 40
self.test_portfolio.current_portfolio_value = 26
actual = self.simple_return_calculator.calculate_return(self.test_portfolio,
annualised=False)
self.assertEqual(-35, actual)
# Test -100% return
self.test_portfolio.total_deposited = 40
self.test_portfolio.current_portfolio_value = 0
actual = self.simple_return_calculator.calculate_return(self.test_portfolio,
annualised=False)
self.assertEqual(-100, actual)
class TimeWeightedReturnCalculatorTests(unittest.TestCase):
def setUp(self) -> None:
self.test_portfolio = InvestmentPortfolio(name="test_portfolio")
self.twr_calculator = TimeWeightedReturnCalculator()
def test_calculate_return_no_data(self):
# Test that one or less transaction results in an error
with self.assertRaises(InsufficientData):
self.twr_calculator.calculate_return(self.test_portfolio, annualised=False)
def test_calculate_return_single_period(self):
# Test that a single sub-period reduces to the simple rate of return
test_data = [
{
'date': datetime(2021, 1, 1),
'total_deposited': 100,
'current_portfolio_value': 100,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 2),
'total_deposited': 100,
'current_portfolio_value': 110,
'transaction_type': 'update_portfolio_value'
}
]
self.test_portfolio.portfolio_history = test_data
actual_output = self.twr_calculator.calculate_return(self.test_portfolio,
annualised=False)
expected_output = 10
self.assertEqual(actual_output, expected_output)
def test_calculate_return_multi_deposits(self):
# Test a multi-period with deposits only
test_data = [
{
'date': datetime(2021, 1, 1),
'total_deposited': 100,
'current_portfolio_value': 100,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 2),
'total_deposited': 100,
'current_portfolio_value': 110,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 3),
'total_deposited': 200,
'current_portfolio_value': 210,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 4),
'total_deposited': 200,
'current_portfolio_value': 215,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 5),
'total_deposited': 250,
'current_portfolio_value': 265,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 6),
'total_deposited': 250,
'current_portfolio_value': 280,
'transaction_type': 'update_portfolio_value'
}
]
self.test_portfolio.portfolio_history = test_data
actual_output = self.twr_calculator.calculate_return(self.test_portfolio,
annualised=False)
expected_output = 18.99
self.assertEqual(actual_output, expected_output)
def test_calculate_return_mixed_periods(self):
# Test a multi-period including deposits and withdrawals
test_data = [
{
'date': datetime(2021, 1, 1),
'total_deposited': 100,
'current_portfolio_value': 100,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 2),
'total_deposited': 100,
'current_portfolio_value': 110,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 3),
'total_deposited': 50,
'current_portfolio_value': 60,
'transaction_type': 'withdrawal'
},
{
'date': datetime(2021, 1, 4),
'total_deposited': 50,
'current_portfolio_value': 54,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 5),
'total_deposited': 100,
'current_portfolio_value': 104,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 6),
'total_deposited': 100,
'current_portfolio_value': 119.6,
'transaction_type': 'update_portfolio_value'
}
]
self.test_portfolio.portfolio_history = test_data
actual_output = self.twr_calculator.calculate_return(self.test_portfolio,
annualised=False)
expected_output = 13.85
self.assertEqual(actual_output, expected_output)
def test_calculate_return_negative_return(self):
# Test a multi-period including deposits and negative return
test_data = [
{
'date': datetime(2021, 1, 1),
'total_deposited': 100,
'current_portfolio_value': 100,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 2),
'total_deposited': 100,
'current_portfolio_value': 80,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 3),
'total_deposited': 50,
'current_portfolio_value': 30,
'transaction_type': 'withdrawal'
},
{
'date': datetime(2021, 1, 4),
'total_deposited': 50,
'current_portfolio_value': 27,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 5),
'total_deposited': 100,
'current_portfolio_value': 77,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 6),
'total_deposited': 100,
'current_portfolio_value': 78.54,
'transaction_type': 'update_portfolio_value'
}
]
self.test_portfolio.portfolio_history = test_data
actual_output = self.twr_calculator.calculate_return(self.test_portfolio,
annualised=False)
expected_output = -26.56
self.assertEqual(actual_output, expected_output)
class MoneyWeightedReturnCalculatorTests(unittest.TestCase):
def setUp(self) -> None:
self.test_portfolio = InvestmentPortfolio(name="test_portfolio")
self.mwr_calculator = MoneyWeightedReturnCalculator()
def test_calculate_return_no_data(self):
# Test that one or less transaction results in an error
with self.assertRaises(ValueError):
self.mwr_calculator.calculate_return(self.test_portfolio, annualised=False)
def test_calculate_return_single_period(self):
# Test that a single sub-period reduces to the simple rate of return
test_data = [
{
'date': datetime(2021, 1, 1),
'total_deposited': 100,
'current_portfolio_value': 100,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 2),
'total_deposited': 100,
'current_portfolio_value': 110,
'transaction_type': 'update_portfolio_value'
}
]
self.test_portfolio.portfolio_history = test_data
actual_output = self.mwr_calculator.calculate_return(self.test_portfolio,
annualised=False)
expected_output = 10
self.assertEqual(actual_output, expected_output)
def test_calculate_return_deposits_and_withdrawals(self):
# Test a multi-period with deposits and withdrawals
test_data = [
{
'date': datetime(2019, 1, 1),
'total_deposited': 50,
'current_portfolio_value': 50,
'transaction_type': 'deposit'
},
{
'date': datetime(2020, 1, 1),
'total_deposited': 50,
'current_portfolio_value': 52,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2020, 1, 1),
'total_deposited': 48,
'current_portfolio_value': 50,
'transaction_type': 'withdrawal'
},
{
'date': datetime(2020, 1, 3),
'total_deposited': 48,
'current_portfolio_value': 67,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 4),
'total_deposited': 46,
'current_portfolio_value': 65,
'transaction_type': 'withdrawal'
}
]
self.test_portfolio.portfolio_history = test_data
actual_output = self.mwr_calculator.calculate_return(self.test_portfolio,
annualised=False)
# Example from: https://www.investopedia.com/terms/m/money-weighted-return.asp
expected_output = 11.73
self.assertEqual(actual_output, expected_output)
def test_calculate_return_multi_deposits(self):
# Test a multi-period portfolio with deposits only
test_data = [
{
'date': datetime(2020, 1, 1),
'total_deposited': 100,
'current_portfolio_value': 100,
'transaction_type': 'deposit'
},
{
'date': datetime(2020, 3, 1),
'total_deposited': 100,
'current_portfolio_value': 110,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2020, 6, 1),
'total_deposited': 100,
'current_portfolio_value': 150,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2020, 1, 3),
'total_deposited': 200,
'current_portfolio_value': 250,
'transaction_type': 'deposit'
},
{
'date': datetime(2021, 1, 4),
'total_deposited': 200,
'current_portfolio_value': 400,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 5),
'total_deposited': 300,
'current_portfolio_value': 500,
'transaction_type': 'deposit'
}
]
self.test_portfolio.portfolio_history = test_data
actual_output = self.mwr_calculator.calculate_return(self.test_portfolio,
annualised=False)
expected_output = round(irr([-100, -100, -100, 500]) * 100, 2)
self.assertEqual(actual_output, expected_output)
def test_calculate_return_negative_return(self):
# Test a multi-period with negative return
test_data = [
{
'date': datetime(2020, 1, 1),
'total_deposited': 50,
'current_portfolio_value': 50,
'transaction_type': 'deposit'
},
{
'date': datetime(2020, 3, 1),
'total_deposited': 50,
'current_portfolio_value': 30,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2020, 6, 1),
'total_deposited': 100,
'current_portfolio_value': 80,
'transaction_type': 'deposit'
},
{
'date': datetime(2020, 1, 3),
'total_deposited': 100,
'current_portfolio_value': 30,
'transaction_type': 'update_portfolio_value'
},
{
'date': datetime(2021, 1, 4),
'total_deposited': 150,
'current_portfolio_value': 80,
'transaction_type': 'deposit'
}
]
self.test_portfolio.portfolio_history = test_data
actual_output = self.mwr_calculator.calculate_return(self.test_portfolio,
annualised=False)
expected_output = round(irr([-50, -50, -50, 80]) * 100, 2)
self.assertEqual(actual_output, expected_output)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jcushman/capstone",
"score": 2
}
|
#### File: capapi/views/doc_views.py
```python
from django.conf import settings
from django.shortcuts import render
from capapi import serializers
from capdb import models
def home(request):
try:
case = models.CaseMetadata.objects.get(id=settings.API_DOCS_CASE_ID)
except models.CaseMetadata.DoesNotExist:
case = models.CaseMetadata.objects.filter(duplicative=False).first()
reporter = case.reporter
reporter_metadata = serializers.ReporterSerializer(reporter, context={'request': request}).data
case_metadata = serializers.CaseSerializer(case, context={'request': request}).data
whitelisted_jurisdictions = models.Jurisdiction.objects.filter(whitelisted=True).values('name_long', 'name')
return render(request, 'home.html', {
"hide_footer": True,
"case_metadata": case_metadata,
"case_id": case_metadata['id'],
"case_jurisdiction": case_metadata['jurisdiction'],
"reporter_id": reporter_metadata['id'],
"reporter_metadata": reporter_metadata,
"whitelisted_jurisdictions": whitelisted_jurisdictions,
})
```
#### File: capstone/scripts/fts_temp.py
```python
from django.contrib.postgres.search import SearchQuery, SearchQueryField
from django.db.models.expressions import Value
# This file pulls forward the SearchQuery implementation from Django 2.2.
# Once we upgrade to Django 2.2, this can be deleted.
class SearchQueryTemp(SearchQuery, Value):
output_field = SearchQueryField()
SEARCH_TYPES = {
'plain': 'plainto_tsquery',
'phrase': 'phraseto_tsquery',
'raw': 'to_tsquery',
}
def __init__(self, value, output_field=None, *, config=None, invert=False, search_type='plain'):
self.config = config
self.invert = invert
if search_type not in self.SEARCH_TYPES:
raise ValueError("Unknown search_type argument '%s'." % search_type)
self.search_type = search_type
super().__init__(value, output_field=output_field)
def as_sql(self, compiler, connection):
params = [self.value]
function = self.SEARCH_TYPES[self.search_type]
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = '{}({}::regconfig, %s)'.format(function, config_sql)
params = config_params + [self.value]
else:
template = '{}(%s)'.format(function)
if self.invert:
template = '!!({})'.format(template)
return template, params
```
#### File: scripts/tests/test_helpers.py
```python
import re
import pytest
from pyquery import PyQuery
from scripts.helpers import serialize_xml, parse_xml
from scripts.generate_case_html import generate_html, tag_map
from scripts.merge_alto_style import generate_styled_case_xml
from scripts.compare_alto_case import validate
from capdb.models import CaseXML, CaseMetadata
def test_serialize_xml_should_not_modify_input_xml(unaltered_alto_xml):
parsed = parse_xml(unaltered_alto_xml)
# make a change
parsed('[ID="b17-15"]').attr('ID', 'replace_me')
# serialize parsed xml
new_xml = serialize_xml(parsed)
# undo the change for comparison
assert b'replace_me' in new_xml # make sure modification worked
new_xml = new_xml.replace(b'replace_me', b'b17-15')
# serialized xml should be identical
assert unaltered_alto_xml == new_xml
@pytest.mark.django_db
def test_generate_html_tags(ingest_case_xml):
for case in CaseMetadata.objects.in_scope():
parsed_case_xml = case.case_xml.get_parsed_xml()
casebody_tree = parsed_case_xml("casebody|casebody")[0]
casebody_html = generate_html(case.case_xml.extract_casebody()).replace('\n', '').replace('\r', '').replace('\t', ' ')
for element in casebody_tree.iter():
old_tag = element.tag.split("}")[1]
new_tag = 'p' if old_tag == 'p' else tag_map[old_tag]
if 'id' in element.attrib:
id_search = r'<' + re.escape(new_tag) + r'[^>]*id="' + re.escape(element.attrib['id'])
assert re.search(id_search, casebody_html, re.IGNORECASE) is not None
else:
class_search = r'<' + re.escape(new_tag) + r'[^>]*class="' + re.escape(old_tag)
assert re.search(class_search, casebody_html, re.IGNORECASE) is not None
parsed_html = PyQuery(casebody_html)
for footnote in parsed_case_xml("casebody|footnote"):
footnote_id = "footnote_" + footnote.attrib['label']
assert parsed_html('aside[id="%s"]' % footnote_id).length == 1
assert parsed_html('a[href="#%s"]' % footnote_id).length == 1
@pytest.mark.django_db
def test_html_pagebreak(ingest_case_xml):
for case in CaseMetadata.objects.in_scope():
styled_xml = generate_styled_case_xml(case.case_xml, strict = False)
styled_html = generate_html(parse_xml(styled_xml)('casebody|casebody'))
pb_list = re.findall(r'(.{3})<pagebreak/>(.{3})', styled_xml)
br_list = re.findall(r'(.{3})<br class="pagebreak" style="page-break-before: always"/>(.{3})', styled_html)
assert set(pb_list) == set(br_list)
@pytest.mark.django_db
def test_between_paragraph_pagebreak(ingest_case_xml):
for case in CaseMetadata.objects.in_scope():
# this test logic is too stupid to handle pagebreaks where multiple pages of footnotes
# at the end of the opinion. The actual logic does work.
if case.case_id.startswith("WnApp"):
continue
#generate the styled and page broken XML
styled_xml = generate_styled_case_xml(case.case_xml, strict = False)
# Highlight the page breaks at the beginning of the casebody elements...
stripped_xml = re.sub(r'"\>\<page-number[a-zA-Z0-9= #\-"]*\>\*\d+\<\/page-number\>', '">__PAGE_BREAK__', styled_xml)
# get rid of all tags that will interfere with the xml parsing, and the inline pagebreak tags
strip_tags = r'\<em\>|\<\/em\>|\<strong\>|\<\/strong\>|\<footnotemark\>|\<\/footnotemark\>|\<bracketnum\>|\<\/bracketnum\>|\<page-number[a-zA-Z0-9= #\-"]*\>\*\d+\<\/page-number\>'
stripped_xml = re.sub(strip_tags, '', stripped_xml)
stripped_xml = re.sub(r'\xad', ' ', stripped_xml)
parsed_xml = parse_xml(stripped_xml)
previous_page = None
for casebody_element in parsed_xml("casebody|casebody").children():
if 'pgmap' not in casebody_element.attrib:
continue
current_page = casebody_element.attrib['pgmap'].split(' ')[-1].split('(')[0]
if previous_page != current_page and previous_page is not None and ' ' not in casebody_element.attrib['pgmap']:
assert casebody_element.text.startswith("__PAGE_BREAK__")
previous_page = current_page
@pytest.mark.django_db
def test_generate_inline_pagebreak(ingest_case_xml):
page_break_element_search = re.compile(r'\d+\((\d+)\)')
for case in CaseMetadata.objects.in_scope():
# this test logic is too stupid to handle pagebreaks where multiple pages of footnotes
# at the end of the opinion. The actual logic does work.
if case.case_id.startswith("WnApp"):
continue
#generate the styled and page broken XML
styled_xml = generate_styled_case_xml(case.case_xml, strict = False)
# dump the page breaks that come at the beginning of the casebody elements...
stripped_xml = re.sub(r'"\>\<page-number[a-zA-Z0-9= #\-"]*\>\*\d+\<\/page-number\>', '">', styled_xml)
# get rid of all tags that will interfere with the xml parsing, and the beginning of the pagebreak tags
strip_tags = r'\<em\>|\<\/em\>|\<strong\>|\<\/strong\>|\<footnotemark\>|\<\/footnotemark\>|\<bracketnum\>|\<\/bracketnum\>|\<page-number[a-zA-Z0-9= #\-"]*\>\*\d+'
stripped_xml = re.sub(strip_tags, '', stripped_xml)
# so we can keep track of the pagebreak tags without breaking xml rendering
stripped_xml = re.sub(r'\<\/page-number\>', '__PAGE_BREAK__ ', stripped_xml)
stripped_xml = re.sub(r'\xad', ' ', stripped_xml)
parsed_xml = parse_xml(stripped_xml)
for p in parsed_xml("casebody|p"):
if ') ' in p.get('pgmap'):
page_breaks = page_break_element_search.findall(p.get('pgmap'))
page_break_element_counts = []
for i, value in enumerate(page_breaks):
if i >= len(page_breaks) - 1:
break
value = int(value) + int(page_breaks[i - 1]) if i > 0 else int(value)
page_break_element_counts.append(value)
element_split = p.text.split(" ")
actual_locations = []
while '__PAGE_BREAK__' in element_split:
loc = element_split.index("__PAGE_BREAK__")
del element_split[loc]
actual_locations.append(loc)
assert set(actual_locations) == set(page_break_element_counts)
@pytest.mark.django_db
def test_merge_alto_case(ingest_case_xml):
# testing strict, totally compliant case
case_xml = CaseXML.objects.get(metadata_id__case_id="32044057891608_0001")
styled_case = parse_xml(generate_styled_case_xml(case_xml))
assert len(styled_case("casebody|em")) == 23
assert len(styled_case("casebody|strong")) == 11
@pytest.mark.django_db
def test_merge_alto_extra_char_exception(ingest_case_xml):
# testing processing with a case that has some character mismatches.
case_xml = CaseXML.objects.get(metadata_id__case_id="32044057892259_0001")
case_xml.orig_xml = case_xml.orig_xml.replace("</p>", "y</p>")
alto_xml = case_xml.pages.first()
alto_xml.orig_xml = alto_xml.orig_xml.replace('CONTENT="', 'CONTENT="x')
alto_xml.save()
# fails with strict
with pytest.raises(Exception, match=r'Case text and alto text do not match'):
generate_styled_case_xml(case_xml)
# passes without strict
styled_case = parse_xml(generate_styled_case_xml(case_xml, False))
assert len(styled_case("casebody|em")) == 8
assert len(styled_case("casebody|strong")) == 11
@pytest.mark.django_db
def test_merge_dup_exception(ingest_case_xml):
case_xml = CaseXML.objects.get(metadata_id__case_id="32044061407086_0001")
with pytest.raises(Exception, match=r'Duplicative case: no casebody data to merge'):
generate_styled_case_xml(case_xml)
@pytest.mark.django_db
def test_validate_alto_casemets_dup(ingest_case_xml):
results = validate(CaseXML.objects.get(metadata_id__case_id="32044061407086_0001"))
assert results['status'] == 'ok'
assert results['results'] == 'duplicative'
@pytest.mark.django_db
def test_validate_alto_casemets_clean(ingest_case_xml):
results = validate(CaseXML.objects.get(metadata_id__case_id="32044057891608_0001"))
assert results['status'] == 'ok'
assert results['results'] == 'clean'
@pytest.mark.django_db
def test_validate_alto_casemets_dirty(ingest_case_xml):
results = validate(CaseXML.objects.get(metadata_id__case_id="32044057892259_0001"))
assert results['status'] == 'warning'
assert results['results'] == 'encountered 2 problems'
problem_1 = {'alto': {'current': {'ST_17.1.8.1': 'matter'},
'current_character': {'ST_17.1.8.1': 'm'},
'next': {'ST_17.1.8.3': 'in'},
'prev': None},
'casemets': {'current': '\xadmatte',
'current_character': '\xad',
'snippet': 'tion of the subject-\xadmatter in controver'},
'description': 'extra char in case_mets? match found in current alto'}
problem_2 = {'alto': {'current': {'ST_19.1.11.7': '113\xad'},
'current_character': {'ST_19.1.11.7': '\xad'},
'next': {'ST_19.1.11.9': ';'},
'prev': {'ST_19.1.11.5': 'Ill.'}},
'casemets': {'current': '; Ca',
'current_character': ';',
'snippet': 'Strobel, 24 Ill. 113; Carpenter v. Wells'},
'description': 'extra char in alto? match found subsequent alto element'}
assert problem_1 in results['problems']
assert problem_2 in results['problems']
@pytest.mark.django_db
def test_validate_alto_casemets_error(ingest_case_xml):
case_xml = CaseXML.objects.get(metadata_id__case_id="32044057891608_0001")
parsed_case_xml = parse_xml(case_xml.orig_xml)
case_parent_tag = parsed_case_xml('casebody|parties')
case_parent_tag.text("<NAME>, Propellant, v. <NAME>, Applebees.")
case_xml.orig_xml = serialize_xml(parsed_case_xml)
case_xml.save(update_related=False)
results = validate(case_xml)
problem_1 = {'alto': {'current': {'ST_17.2.1.5': 'Appellant,'},
'current_character': {'ST_17.2.1.5': 'A'},
'next': {'ST_17.2.1.7': 'v.'},
'prev': {'ST_17.2.1.3': 'Taylor,'}},
'casemets': {'current': 'Propellant',
'current_character': 'P',
'snippet': '<NAME>, Propellant, v. Macha'},
'description': 'Unspecified Mismatch.'}
problem_2 = {'alto': {'current': {'ST_17.2.1.7': 'v.'},
'current_character': {'ST_17.2.1.7': 'v'},
'next': {'ST_17.2.1.9': 'Michael'},
'prev': {'ST_17.2.1.5': 'Appellant,'}},
'casemets': {'current': 'Pr',
'current_character': 'P',
'snippet': '<NAME>, Propellant, v. Macha'},
'description': 'Unspecified Mismatch.'}
assert results['status'] == 'error'
assert results['results'] == 'gave up after 2 consecutive bad words'
assert problem_1 in results['problems']
assert problem_2 in results['problems']
```
|
{
"source": "jcushman/python-hyperscan",
"score": 2
}
|
#### File: jcushman/python-hyperscan/build.py
```python
import os
import subprocess
import sys
from distutils.core import Extension
from distutils.errors import (
CCompilerError,
DistutilsExecError,
DistutilsPlatformError,
)
from distutils.command.build_ext import build_ext
# http://code.activestate.com/recipes/502261-python-distutils-pkg-config/
def pkgconfig(libs, optional=''):
flag_map = {
'include_dirs': (['--cflags-only-I'], 2),
'library_dirs': (['--libs-only-L'], 2),
'libraries': (['--libs-only-l'], 2),
'extra_compile_args': (['--cflags-only-other'], 0),
'extra_link_args': (['--libs-only-other'], 0),
}
ext_kwargs = {
'extra_compile_args': ['-std=c99'],
}
for lib in libs:
for distutils_kwarg, (pkg_options, trim_offset) in flag_map.items():
try:
options = (
subprocess.check_output(
['pkg-config', optional, *pkg_options, lib]
)
.decode()
.split()
)
except subprocess.CalledProcessError:
continue
ext_kwargs.setdefault(distutils_kwarg, []).extend(
[opt[trim_offset:] for opt in options]
)
return ext_kwargs
def build(setup_kwargs):
setup_kwargs.update(
{
'ext_modules': [
Extension(
'hyperscan._hyperscan',
['hyperscan/hyperscanmodule.c'],
**pkgconfig(['libhs'])
)
],
'cmdclass': {'build_ext': build_ext},
}
)
```
|
{
"source": "jcushman/xport",
"score": 4
}
|
#### File: xport/xport/xport.py
```python
from datetime import datetime
import struct
def parse_date(datestr):
""" Given a date in xport format, return Python date. """
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S") # e.g. "16FEB11:10:07:55"
def _split_line(s, parts):
"""
s: fixed-length string to split
parts: list of (name, length) pairs used to break up string
name '_' will be filtered from output.
result: dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start+length].strip()
start += length
del out['_']
return out
def parse_float(bitstring):
"""
Given IBM-style float stored as string, return Python float.
This is adapted from the following C code in the spec. The adaptation may not be correct, or optimal.
/* Get the first half of the ibm number without the exponent */
/* into the ieee number */
ieee1 = xport1 & 0x00ffffff;
/* get the second half of the ibm number into the second half */
/* of the ieee number . If both halves were 0. then just */
/* return since the ieee number is zero. */
if ((!(ieee2 = xport2)) && !xport1)
return;
/* The fraction bit to the left of the binary point in the */
/* ieee format was set and the number was shifted 0, 1, 2, or */
/* 3 places. This will tell us how to adjust the ibm exponent */
/* to be a power of 2 ieee exponent and how to shift the */
/* fraction bits to restore the correct magnitude. */
if ((nib = (int)xport1) & 0x00800000)
shift = 3;
else
if (nib & 0x00400000)
shift = 2;
else
if (nib & 0x00200000)
shift = 1;
else
shift = 0;
if (shift)
{
/* shift the ieee number down the correct number of places*/
/* then set the second half of the ieee number to be the */
/* second half of the ibm number shifted appropriately, */
/* ored with the bits from the first half that would have */
/* been shifted in if we could shift a double. All we are */
/* worried about are the low order 3 bits of the first */
/* half since we're only shifting by 1, 2, or 3. */
ieee1 >>= shift;
ieee2 = (xport2 >> shift) |
((xport1 & 0x00000007) << (29 + (3 - shift)));
}
/* clear the 1 bit to the left of the binary point */
ieee1 &= 0xffefffff;
/* set the exponent of the ieee number to be the actual */
/* exponent plus the shift count + 1023. Or this into the */
/* first half of the ieee number. The ibm exponent is excess */
/* 64 but is adjusted by 65 since during conversion to ibm */
/* format the exponent is incremented by 1 and the fraction */
/* bits left 4 positions to the right of the radix point. */
ieee1 |=
(((((long)(*temp & 0x7f) - 65) << 2) + shift + 1023) << 20) |
(xport1 & 0x80000000);
"""
xport1, xport2 = struct.unpack('>II', bitstring)
# Start by setting first half of ieee number to first half of IBM number sans exponent
ieee1 = xport1 & 0x00ffffff
# get the second half of the ibm number into the second half of the ieee number
ieee2 = xport2
# If both halves were 0. then just return since the ieee number is zero.
if not ieee1 and not ieee2:
return 0.0
# The fraction bit to the left of the binary point in the ieee format was set and the number was shifted 0, 1, 2, or
# 3 places. This will tell us how to adjust the ibm exponent to be a power of 2 ieee exponent and how to shift
# the fraction bits to restore the correct magnitude.
if xport1 & 0x00800000:
shift = 3
elif xport1 & 0x00400000:
shift = 2
elif xport1 & 0x00200000:
shift = 1
else:
shift = 0
if shift:
# shift the ieee number down the correct number of places then set the second half of the ieee number to be the
# second half of the ibm number shifted appropriately, ored with the bits from the first half that would have
# been shifted in if we could shift a double. All we are worried about are the low order 3 bits of the first
# half since we're only shifting by 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent plus the shift count + 1023. Or this into the
# first half of the ieee number. The ibm exponent is excess 64 but is adjusted by 65 since during conversion to ibm
# format the exponent is incremented by 1 and the fraction bits left 4 positions to the right of the radix point.
# (had to add >> 24 because C treats & 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) + shift + 1023) << 20) | (xport1 & 0x80000000)
# Python doesn't limit to 4 bytes like we need it to ...
ieee1 &= 0xffffffff
ieee2 &= 0xffffffff
return struct.unpack(">d", struct.pack(">II", ieee1, ieee2))[0]
class XportReader(object):
def __init__(self, file, encoding='ISO-8859-1'):
self.encoding = encoding # is this ever anything else that ISO-8859-1 ??
self.loadfile(file)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.opened_file:
try:
self.file.close()
except:
pass
def _get_row(self):
return self.file.read(80)
def loadfile(self, file):
"""
Open file, seek to start, verify that it is an xport file, set:
self.file = file
self.file_info
self.member_info
self.fields
From spec, format of fields is as follows (items with stars renamed for clarity):
struct NAMESTR {
short ntype; /* VARIABLE TYPE: 1=NUMERIC, 2=CHAR */
short nhfun; /* HASH OF NNAME (always 0) */
* short field_length; /* LENGTH OF VARIABLE IN OBSERVATION */
short nvar0; /* VARNUM */
* char8 name; /* NAME OF VARIABLE */
* char40 label; /* LABEL OF VARIABLE */
char8 nform; /* NAME OF FORMAT */
short nfl; /* FORMAT FIELD LENGTH OR 0 */
* short num_decimals; /* FORMAT NUMBER OF DECIMALS */
short nfj; /* 0=LEFT JUSTIFICATION, 1=RIGHT JUST */
char nfill[2]; /* (UNUSED, FOR ALIGNMENT AND FUTURE) */
char8 niform; /* NAME OF INPUT FORMAT */
short nifl; /* INFORMAT LENGTH ATTRIBUTE */
short nifd; /* INFORMAT NUMBER OF DECIMALS */
long npos; /* POSITION OF VALUE IN OBSERVATION */
char rest[52]; /* remaining fields are irrelevant */
};
"""
self.opened_file = False
try:
file = open(file, 'rb')
self.opened_file = True
except TypeError:
try:
file.seek(0)
except AttributeError:
raise TypeError("File should be a string-like or file-like object.")
self.file = file
# read file header
line1 = self._get_row()
if not line1 == "HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!000000000000000000000000000000 ":
raise Exception("Header record is not an XPORT file.")
line2 = self._get_row()
file_info = _split_line(line2, [ ['prefix',24], ['version',8], ['OS',8], ['_',24], ['created',16]])
if file_info['prefix'] != "SAS SAS SASLIB":
raise Exception("Header record has invalid prefix.")
file_info['created'] = parse_date(file_info['created'])
self.file_info = file_info
line3 = self._get_row()
file_info['modified'] = parse_date(line3[:16])
# read member header
header1 = self._get_row()
header2 = self._get_row()
if not header1.startswith("HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000")\
or not header2 == "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!000000000000000000000000000000 ":
raise Exception("Member header not found.")
fieldnamelength = int(header1[-5:-2]) # usually 140, could be 135
# member info
member_info = _split_line(self._get_row(), [['prefix',8],['set_name',8],['sasdata',8],['version',8],['OS',8],['_',24],['created',16]])
member_info.update( _split_line(self._get_row(), [['modified',16],['_',16],['label',40],['type',8]]))
member_info['modified'] = parse_date(member_info['modified'])
member_info['created'] = parse_date(member_info['created'])
self.member_info = member_info
# read field names
types = {1:'numeric', 2:'char'}
fieldcount = int(self._get_row()[54:58])
datalength = fieldnamelength*fieldcount
if datalength%80: # round up to nearest 80
datalength += 80 - datalength%80
fielddata = file.read(datalength)
fields = []
obs_length = 0
while len(fielddata)>=fieldnamelength:
field, fielddata = (fielddata[:fieldnamelength], fielddata[fieldnamelength:]) # pull data for this field from start of fielddata
field = field.ljust(140) # rest at end gets ignored, so if field is short, pad out to match struct pattern below
fieldstruct = struct.unpack('>hhhh8s40s8shhh2s8shhl52s', field)
field = dict(zip(['ntype','nhfun','field_length','nvar0','name','label','nform','nfl','num_decimals','nfj','nfill','niform','nifl','nifd','npos','_'],fieldstruct))
del field['_']
field['ntype'] = types[field['ntype']]
if field['ntype']=='numeric' and field['field_length'] != 8:
raise TypeError("Oops -- only 8-byte floats are currently implemented. Can't read field %s." % field)
for k, v in field.items():
try:
field[k] = v.strip()
except AttributeError:
pass
obs_length += field['field_length']
fields += [field]
if not self._get_row() == "HEADER RECORD*******OBS HEADER RECORD!!!!!!!000000000000000000000000000000 ":
raise Exception("Observation header not found.")
self.fields = fields
self.record_length = obs_length
self.record_start = self.file.tell()
def record_count(self):
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of the file.
"""
self.file.seek(0,2)
total_records_length = self.file.tell() - self.record_start
return total_records_length / self.record_length
def __iter__(self):
return self
def next(self):
s = self.file.read(self.record_length)
if not s or len(s) < self.record_length:
raise StopIteration()
obs = {}
for field in self.fields:
bytes = field['field_length']
field_str, s = (s[:bytes], s[bytes:]) # pull data for this field from start of obs data
if field['ntype'] == 'char':
field_val = unicode(field_str.strip(), self.encoding)
else:
field_val = parse_float(field_str)
if field['num_decimals'] == 0:
field_val = int(field_val)
obs[field['name']] = field_val
return obs
if __name__ == "__main__":
import sys
with XportReader(sys.argv[1]) as reader:
for obj in reader:
try:
print obj
except IOError, e:
# except block to gracefully exit on broken pipe signal (e.g. xport.py foo.xpt | head)
import errno
if e.errno == errno.EPIPE:
sys.exit()
raise
```
|
{
"source": "jcusick13/blackburne",
"score": 3
}
|
#### File: src/data/build_retrosheet_db.py
```python
import os
import psycopg2
import requests
import subprocess
import zipfile
from dotenv import find_dotenv, load_dotenv
from pathlib import Path
class RetroEventFormatter():
"""ETL tools to work with Retrosheet event data.
working_dir : string
Directory to save data to and work out of
year : integer
Year to process data for
conn : psycopg2 connection object
Database connection for loading
"""
def __init__(self, working_dir, year, conn):
self.dir = working_dir
self.year = year
self.conn = conn
def process(self):
"""Collects all Retrosheet event files for a
single year, cleans them, and adds them to a new
database table.
"""
print('Downloading season\'s raw event files...')
self.download_event_files()
print('Reformatting files for loading into database...')
self.format_event_files()
print('Loading files into database table...')
self.load_event_files()
print('Cleaning up...')
self.cleanup()
def download_event_files(self):
"""Downloads and unzips raw Retrosheet regular
season event level summary files into 'data/raw/retro_event'
"""
loc = f'{self.dir}/retro_event'
if not os.path.exists(loc):
os.makedirs(loc, exist_ok=True)
url = f'https://www.retrosheet.org/events/{self.year}eve.zip'
r = requests.get(url)
with open(f'{loc}/{self.year}eve.zip', 'wb') as f:
f.write(r.content)
with zipfile.ZipFile(f'{loc}/{self.year}eve.zip', 'r') as zipped:
zipped.extractall(loc)
def format_event_files(self):
"""Uses Chadwick to reformat raw event level data
into a format suitable for adding to a database table.
Deletes event files when finished.
"""
cmd = f'cwevent -y {self.year} -f 0-96 {self.year}*.EV* > all{self.year}.csv'
process = subprocess.Popen(cmd, shell=True, cwd=f'{self.dir}/retro_event/')
process.communicate()
def load_event_files(self):
"""Creates a new table and loads a single
years worth of event data to the database.
"""
print('Creating table schema...')
with self.conn.cursor() as cursor:
cursor.execute(open('src/data/sql/build_events.sql', 'r').read())
self.conn.commit()
print('Adding to table from csv...')
with open(f'{self.dir}/retro_event/all{self.year}.csv') as f:
copy = 'COPY raw_events FROM STDIN WITH csv'
self.conn.cursor().copy_expert(sql=copy, file=f)
self.conn.commit()
def cleanup(self):
for fname in os.listdir(f'{self.dir}/retro_event'):
if fname.endswith('.EVA') | fname.endswith('.EVN') | fname.endswith('.ROS'):
os.remove(f'{self.dir}/retro_event/{fname}')
if fname.endswith(f'TEAM{self.year}'):
os.remove(f'{self.dir}/retro_event/{fname}')
def main():
# Set input/output locations
project_path = Path.cwd()
raw_path = str((project_path / 'data' / 'raw').resolve())
# Connect to Retrosheet database
load_dotenv(find_dotenv())
retro_db = os.getenv('RETRO_DB')
retro_user = os.getenv('RETRO_USER')
retro_pass = os.<PASSWORD>('RET<PASSWORD>')
conn = psycopg2.connect(database=retro_db, user=retro_user, password=retro_pass)
# Process 2017 event files
retro = RetroEventFormatter(raw_path, 2017, conn)
retro.process()
if __name__ == '__main__':
main()
```
|
{
"source": "jcussen/infotheory",
"score": 2
}
|
#### File: infotheory/tests/tests.py
```python
import numpy as np
import infotheory
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
TEST_HEADER = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
SUCCESS = bcolors.OKGREEN + "SUCCESS" + bcolors.ENDC
FAILED = bcolors.FAIL + "FAILED" + bcolors.ENDC
def _except(e):
print("\n" + FAILED)
print(e)
exit(1)
def do_matching(base_str, result, target, name, decimals=5):
result = np.round(result, decimals=decimals)
target = np.round(target, decimals=decimals)
if result == target:
print(base_str, name, result, target, SUCCESS)
else:
raise Exception(
"{} not equal to expected value. Expected = {}, Actual = {}".format(
name, target, result
)
)
def decomposition_equivalence_4D(dims, nreps, nbins, data_ranges, data):
try:
# creating the object and adding data
it_par = infotheory.InfoTools(dims, nreps)
it_par.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
it_par.add_data(data)
# PID-ing
total_mi = it_par.mutual_info([1, 1, 1, 0])
redundant_info = it_par.redundant_info([1, 2, 3, 0])
unique_1 = it_par.unique_info([1, 2, 3, 0])
unique_2 = it_par.unique_info([2, 1, 3, 0])
unique_3 = it_par.unique_info([2, 3, 1, 0])
synergy = it_par.synergy([1, 2, 3, 0])
targets = [total_mi, redundant_info, unique_1, unique_2, unique_3, synergy]
# Alternate PID-ing
total_mi = it_par.mutual_info([1, 1, 1, 0])
redundant_info = it_par.redundant_info([2, 1, 3, 0])
unique_1 = it_par.unique_info([1, 3, 2, 0])
unique_2 = it_par.unique_info([3, 1, 2, 0])
unique_3 = it_par.unique_info([3, 2, 1, 0])
synergy = it_par.synergy([2, 1, 3, 0])
base_str = "Decomposition equivalence | "
do_matching(base_str, total_mi, targets[0], "Total MI")
do_matching(base_str, redundant_info, targets[1], "Redundant info | ")
do_matching(base_str, unique_1, targets[2], "Unique source 1 info | ")
do_matching(base_str, unique_2, targets[3], "Unique source 2 info | ")
do_matching(base_str, unique_3, targets[4], "Unique source 3 info | ")
do_matching(base_str, synergy, targets[5], "Synergistic info | ")
except Exception as e:
_except(e)
def decomposition_test_4D(dims, nreps, nbins, data_ranges, data, targets):
""" testing if 4D PID matches expected values """
try:
# creating the object and adding data
it_par = infotheory.InfoTools(dims, nreps)
it_par.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
it_par.add_data(data)
# PID-ing
total_mi = it_par.mutual_info([1, 1, 1, 0])
redundant_info = it_par.redundant_info([1, 2, 3, 0])
unique_1 = it_par.unique_info([1, 2, 3, 0])
unique_2 = it_par.unique_info([2, 1, 3, 0])
unique_3 = it_par.unique_info([2, 3, 1, 0])
synergy = it_par.synergy([1, 2, 3, 0])
results = [total_mi, redundant_info, unique_1, unique_2, unique_3, synergy]
base_str = "Decomposition test | "
do_matching(base_str, total_mi, targets[0], "Total MI")
do_matching(base_str, redundant_info, targets[1], "Redundant info | ")
do_matching(base_str, unique_1, targets[2], "Unique source 1 info | ")
do_matching(base_str, unique_2, targets[3], "Unique source 2 info | ")
do_matching(base_str, unique_3, targets[4], "Unique source 3 info | ")
do_matching(base_str, synergy, targets[5], "Synergistic info | ")
except Exception as e:
_except(e)
def pid_test_3D(dims, nreps, nbins, data_ranges, data):
""" testing sum of pid == total_mi """
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(data)
# estimating mutual information
mi = it.mutual_info([1, 1, 0])
redundant_info = it.redundant_info([1, 2, 0])
unique_1 = it.unique_info([1, 2, 0])
unique_2 = it.unique_info([2, 1, 0])
synergy = it.synergy([1, 2, 0])
# total_pid
total_pid = np.sum(
np.round([redundant_info, unique_1, unique_2, synergy], decimals=6)
)
# mi
total_mi = np.round(mi, decimals=6)
if (total_pid - total_mi) < 1e-5:
print(total_pid, total_mi, SUCCESS)
else:
raise Exception(
"Total PID does not equal MI: total_mi = {}; total_pid = {}".format(
total_pid, total_mi
)
)
except Exception as e:
_except(e)
def decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data):
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(data)
# estimating mutual information
redundant_info_1 = it.redundant_info([1, 2, 0])
synergy_1 = it.synergy([1, 2, 0])
redundant_info_2 = it.redundant_info([2, 1, 0])
synergy_2 = it.synergy([2, 1, 0])
base_str = "Decomposition equivalence | "
do_matching(base_str, redundant_info_1, redundant_info_2, "Redundant info | ")
do_matching(base_str, synergy_1, synergy_2, "Synergy | ")
except Exception as e:
_except(e)
def decomposition_test_3D(dims, nreps, nbins, data_ranges, data, results):
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(data)
# estimating mutual information
redundant_info = it.redundant_info([1, 2, 0])
unique_1 = it.unique_info([1, 2, 0])
unique_2 = it.unique_info([2, 1, 0])
synergy = it.synergy([1, 2, 0])
if all(
np.round([redundant_info, unique_1, unique_2, synergy], decimals=2)
== results
):
print(synergy, SUCCESS)
else:
raise Exception("PID computation error")
except Exception as e:
_except(e)
def uniform_random_mi_test(dims, nreps, nbins, data_ranges, num_samples=1000):
print(
"Testing mutual info with uniform random variables. MI = ", end="", flush=True
)
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(np.random.rand(num_samples, dims))
# ...alternatively,
# for _ in range(num_samples):
# it.add_data_point(np.random.rand(dims))
# estimating mutual information
mi = it.mutual_info([0, 1]) / ((1 / dims) * np.log2(np.prod(nbins)))
print(mi, SUCCESS)
except Exception as e:
print(e)
_except(e)
def identical_random_mi_test(
dims, nreps, nbins, data_ranges, add_noise=False, num_samples=1000
):
print("Testing mutual info with identical random variables", end="", flush=True)
if add_noise:
print(" with noise. MI = ", end="", flush=True)
else:
print(". MI = ", end="", flush=True)
try:
# creating the object
if dims % 2 != 0:
dims += 1
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
p_dims = int(dims / 2)
# adding points
for _ in range(num_samples):
point1 = np.random.rand(p_dims)
if add_noise:
point2 = point1 + (np.random.rand(p_dims) / 30)
else:
point2 = point1
it.add_data_point(np.concatenate((point1, point2)))
# computing mutual information
mi = it.mutual_info([0, 1]) / ((1 / dims) * np.log2(np.prod(nbins)))
print(mi, SUCCESS)
except Exception as e:
_except(e)
def entropy_test(dims, nreps, nbins, data_ranges, data_sampler, num_samples=1000):
try:
# creating the object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
for _ in range(num_samples):
it.add_data_point([data_sampler()])
# estimate entropy
print(it.entropy([0]), SUCCESS)
except Exception as e:
_except(e)
def test_pid_4D():
""" Testing
3D PI-decomposition
1. sanity for each PI measure
2. known PIDs for even parity
"""
print("\n" + bcolors.TEST_HEADER + "PID-4D" + bcolors.ENDC)
## Testing PID by value
dims = 4
nreps = 0
nbins = [2] * dims
data_ranges = [[0] * dims, [1] * dims]
# Even parity check
data = [
[0, 0, 0, 0],
[0, 0, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
]
targets = [1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
print("Testing PID with even parity checker")
decomposition_test_4D(dims, nreps, nbins, data_ranges, data, targets)
# random data
print("Testing PID with uniform random data")
dims = 4
neps = 0
nbins = [50] * dims
data_ranges = [[0] * dims, [1] * dims]
data = np.random.rand(5000, dims)
decomposition_equivalence_4D(dims, nreps, nbins, data_ranges, data)
def test_pid_3D():
""" Testing
1. sum(PID) == mi
2. known PIDs for logic gates
3. synergy([0,1,2]) == synergy([0,2,1])?
"""
print("\n" + bcolors.TEST_HEADER + "PID-3D" + bcolors.ENDC)
## Testing PID by value
dims = 3
neps = 0
nbins = [2] * dims
data_ranges = [[0] * dims, [1] * dims]
# AND gate
data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]]
print("Testing total PID with total mi | AND gate = ", end="", flush=True)
pid_test_3D(dims, nreps, nbins, data_ranges, data)
# XOR gate
data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
print("Testing total PID with total mi | XOR gate = ", end="", flush=True)
pid_test_3D(dims, nreps, nbins, data_ranges, data)
# random data
dims = 3
neps = 0
nbins = [50] * 3
data_ranges = [[0] * 3, [1] * 3]
data = np.random.rand(500, dims)
print("Testing total PID with total mi | random data = ", end="", flush=True)
pid_test_3D(dims, nreps, nbins, data_ranges, data)
## Testing PI decomposition
dims = 3
neps = 0
nbins = [2] * 3
data_ranges = [[0] * 3, [1] * 3]
# AND gate
data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]]
print("Testing decomposition with AND gate = ", end="", flush=True)
decomposition_test_3D(dims, nreps, nbins, data_ranges, data, [0.31, 0.0, 0.0, 0.5])
# XOR gate
data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
print("Testing decomposition with XOR gate = ", end="", flush=True)
decomposition_test_3D(dims, nreps, nbins, data_ranges, data, [0.0, 0.0, 0.0, 1.0])
## Testing decomposition equivalence
dims = 3
neps = 0
nbins = [2] * 3
data_ranges = [[0] * 3, [1] * 3]
# AND gate
data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]]
print("Testing redundant and synergistic equivalence | AND gate")
decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data)
# XOR gate
data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
print("Testing redundant and synergistic equivalence | XOR gate")
decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data)
# random data
dims = 3
neps = 0
nbins = [50] * 3
data_ranges = [[0] * 3, [1] * 3]
data = np.random.rand(500, dims)
print("Testing redundant and synergistic equivalence | random data")
decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data)
def test_mutual_info(dims, nreps, nbins, data_ranges):
""" Testing mutual information under three conditions
1. two uniform random variables (low MI)
2. two identical random variables (high MI)
3. one ranom variable and a noisy version of the same (medium MI)
"""
print("\n" + bcolors.TEST_HEADER + "MUTUAL INFORMATION" + bcolors.ENDC)
uniform_random_mi_test(dims, nreps, nbins, data_ranges)
identical_random_mi_test(dims, nreps, nbins, data_ranges, add_noise=False)
identical_random_mi_test(dims, nreps, nbins, data_ranges, add_noise=True)
def test_entropy(dims, nreps, nbins, data_ranges):
""" Testing entropy under two conditions
1. A uniform random variable (high entropy)
2. A gaussian with low std. dev. (low entropy)
"""
print("\n" + bcolors.TEST_HEADER + "ENTROPY" + bcolors.ENDC)
print("Testing entropy with uniform distribution = ", end="", flush=True)
entropy_test(dims, nreps, nbins, data_ranges, lambda: np.random.uniform())
print("Testing entropy with normal distribution = ", end="", flush=True)
entropy_test(
dims, nreps, nbins, data_ranges, lambda: np.random.normal(loc=0.5, scale=0.01)
)
def test_binning(dims, nreps, nbins, data_ranges):
""" Test execution of both types of binning
1. Equal interval
2. Manual specification
"""
print("\n" + bcolors.TEST_HEADER + "BINNING" + bcolors.ENDC)
mi_eq = mi_mb = None
# resetting for this test
dims = 2
# generating a commong set of datapoints
datapoints = []
for _ in range(1000):
point1 = np.random.rand()
point2 = point1 + (np.random.rand() / 30)
datapoints.append([point1, point2])
# Equal interval binning
try:
print("Estimating MI using equal interval binning = ", end="", flush=True)
it = infotheory.InfoTools(dims, nreps)
# set bin boundaries
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
# adding points
it.add_data(datapoints)
# computing mutual information
mi_eq = it.mutual_info([0, 1])
print(mi_eq, SUCCESS)
except Exception as e:
_except(e)
# Manual binning
try:
print("Estimating MI using manually specified binning = ", end="", flush=True)
it = infotheory.InfoTools(dims, nreps)
# set bin boundaries
it.set_bin_boundaries([[0.3333, 0.6666], [0.3333, 0.6666]])
# adding points
it.add_data(datapoints)
# computing mutual information
mi_mb = it.mutual_info([0, 1])
print(mi_mb, SUCCESS)
except Exception as e:
_except(e)
# mi_eq == mi_mb?
print(
"Tested both binning methods. Difference in result = {}".format(mi_eq - mi_mb),
SUCCESS,
)
def test_creation(dims, nreps, nbins, data_ranges):
print("Testing creating an object. ", end="", flush=True)
try:
# creating object
it = infotheory.InfoTools(dims, nreps)
it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1])
print(bcolors.OKGREEN + "SUCCESS" + bcolors.ENDC)
except Exception as e:
_except(e)
def run_tests(dims, nreps, nbins, data_ranges):
""" runs all tests """
print(bcolors.HEADER + "************ Starting tests ************" + bcolors.ENDC)
test_creation(dims, nreps, nbins, data_ranges)
test_binning(dims, nreps, [3, 3], data_ranges)
test_entropy(1, nreps, [50], [[0], [1]])
test_mutual_info(dims, nreps, nbins, data_ranges)
test_pid_3D()
test_pid_4D()
print(
"\n"
+ bcolors.HEADER
+ "************ Tests completed ************"
+ bcolors.ENDC
)
def manual_test(m, n):
it = infotheory.InfoTools(2, 1, [2, 2], [0, 0], [1, 1])
it.add_data([[0, 0]] * m + [[1, 1]] * n)
print("m = ", m, " n = ", n, " MI = ", it.mutual_info([0, 1]))
if __name__ == "__main__":
dims = 2
nreps = 0
nbins = [50] * dims
data_ranges = [[0] * dims, [1] * dims]
# for m,n in zip([1,2,2,3,500,499,200],[1,1,2,2,500,500,500]):
# manual_test(m,n)
run_tests(dims, nreps, nbins, data_ranges)
```
|
{
"source": "jcus/web-scraping-challenge",
"score": 3
}
|
#### File: web-scraping-challenge/Instructions/app.py
```python
from flask import Flask, render_template, redirect
#, url_for F
from flask_pymongo import PyMongo
import scrape_mars
# Create an instance of Flask
app = Flask(__name__)
## Use PyMongo to establish Mongo connection
# mongo = PyMongo(app, uri="mongodb://localhost:27017/Mission_to_Mars_app")
# Use flask pymongo to set up the connection to the database
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_data_db"
mongo = PyMongo(app)
# Route to render index.html template using data from Mongo
@app.route("/")
def index():
# Find one record of data from the mongo database
mars_data = mongo.db.marsData.find_one()
# Return template and data
return render_template("index.html", data = mars_data)
##print(mars_data)
# return "Flas data loaded success"
# Route that will trigger the scrape function and run app
@app.route("/scrape")
def scrape():
# direct the collection data from mongo database
marsTable = mongo.db.marsData
# drop the table if it exist
mongo.db.marsData.drop()
# Update the Mongo database using update and upsert=True
#mongo.db.collection.insert_one(mars_data)
# call scrape mars all scrape
mars_data = scrape_mars.scrape_all()
# take the dictionary and load into mongoDB
marsTable.insert_one(mars_data)
# go to the index route
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
```
|
{
"source": "jcvasquezc/DisVoice",
"score": 3
}
|
#### File: DisVoice/glottal/glottal.py
```python
from scipy.io.wavfile import read
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Times New Roman"
import pysptk
try:
from .peakdetect import peakdetect
from .GCI import SE_VQ_varF0, IAIF, get_vq_params
except:
from peakdetect import peakdetect
from GCI import SE_VQ_varF0, IAIF, get_vq_params
PATH=os.path.dirname(os.path.abspath(__file__))
sys.path.append('../')
from utils import dynamic2static, save_dict_kaldimat, get_dict
from scipy.integrate import cumtrapz
from tqdm import tqdm
import pandas as pd
import torch
from script_mananger import script_manager
class Glottal:
"""
Compute features based on the glottal source reconstruction from sustained vowels and continuous speech.
For continuous speech, the features are computed over voiced segments
Nine descriptors are computed:
1. Variability of time between consecutive glottal closure instants (GCI)
2. Average opening quotient (OQ) for consecutive glottal cycles-> rate of opening phase duration / duration of glottal cycle
3. Variability of opening quotient (OQ) for consecutive glottal cycles-> rate of opening phase duration /duration of glottal cycle
4. Average normalized amplitude quotient (NAQ) for consecutive glottal cycles-> ratio of the amplitude quotient and the duration of the glottal cycle
5. Variability of normalized amplitude quotient (NAQ) for consecutive glottal cycles-> ratio of the amplitude quotient and the duration of the glottal cycle
6. Average H1H2: Difference between the first two harmonics of the glottal flow signal
7. Variability H1H2: Difference between the first two harmonics of the glottal flow signal
8. Average of Harmonic richness factor (HRF): ratio of the sum of the harmonics amplitude and the amplitude of the fundamental frequency
9. Variability of HRF
Static or dynamic matrices can be computed:
Static matrix is formed with 36 features formed with (9 descriptors) x (4 functionals: mean, std, skewness, kurtosis)
Dynamic matrix is formed with the 9 descriptors computed for frames of 200 ms length with a time-shift of 50 ms.
Notes:
1. The fundamental frequency is computed using the RAPT algorithm.
>>> python glottal.py <file_or_folder_audio> <file_features> <dynamic_or_static> <plots (true, false)> <format (csv, txt, npy, kaldi, torch)>
Examples command line:
>>> python glottal.py "../audios/001_a1_PCGITA.wav" "glottalfeaturesAst.txt" "static" "true" "txt"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.csv" "static" "true" "csv"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.ark" "dynamic" "true" "kaldi"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.pt" "dynamic" "true" "torch"
Examples directly in Python
>>> from disvoice.glottal import Glottal
>>> glottal=Glottal()
>>> file_audio="../audios/001_a1_PCGITA.wav"
>>> features=glottal.extract_features_file(file_audio, static, plots=True, fmt="numpy")
>>> features2=glottal.extract_features_file(file_audio, static, plots=True, fmt="dataframe")
>>> features3=glottal.extract_features_file(file_audio, dynamic, plots=True, fmt="torch")
>>> path_audios="../audios/"
>>> features1=glottal.extract_features_path(path_audios, static, plots=False, fmt="numpy")
>>> features2=glottal.extract_features_path(path_audios, static, plots=False, fmt="torch")
>>> features3=glottal.extract_features_path(path_audios, static, plots=False, fmt="dataframe")
"""
def __init__(self):
self.size_frame=0.2
self.size_step=0.05
self.head=["var GCI", "avg NAQ", "std NAQ", "avg QOQ", "std QOQ", "avg H1H2", "std H1H2", "avg HRF", "std HRF"]
def plot_glottal(self, data_audio,fs,GCI, glottal_flow, glottal_sig):
"""Plots of the glottal features
:param data_audio: speech signal.
:param fs: sampling frequency
:param GCI: glottal closure instants
:param glottal_flow: glottal flow
:param glottal_sig: reconstructed glottal signal
:returns: plots of the glottal features.
"""
fig, ax=plt.subplots(3, sharex=True)
t=np.arange(0, float(len(data_audio))/fs, 1.0/fs)
if len(t)>len(data_audio):
t=t[:len(data_audio)]
elif len(t)<len(data_audio):
data_audio=data_audio[:len(t)]
ax[0].plot(t, data_audio, 'k')
ax[0].set_ylabel('Amplitude', fontsize=12)
ax[0].set_xlim([0, t[-1]])
ax[0].grid(True)
ax[1].plot(t, glottal_sig, color='k', linewidth=2.0, label="Glottal flow signal")
amGCI=[glottal_sig[int(k-2)] for k in GCI]
GCI=GCI/fs
ax[1].plot(GCI, amGCI, 'bo', alpha=0.5, markersize=8, label="GCI")
GCId=np.diff(GCI)
ax[1].set_ylabel("Glottal flow", fontsize=12)
ax[1].text(t[2],-0.8, "Avg. time consecutive GCI:"+str(np.round(np.mean(GCId)*1000,2))+" ms")
ax[1].text(t[2],-1.05, "Std. time consecutive GCI:"+str(np.round(np.std(GCId)*1000,2))+" ms")
ax[1].set_xlabel('Time (s)', fontsize=12)
ax[1].set_xlim([0, t[-1]])
ax[1].set_ylim([-1.1, 1.1])
ax[1].grid(True)
ax[1].legend(ncol=2, loc=2)
ax[2].plot(t, glottal_flow, color='k', linewidth=2.0)
ax[2].set_ylabel("Glotal flow derivative", fontsize=12)
ax[2].set_xlabel('Time (s)', fontsize=12)
ax[2].set_xlim([0, t[-1]])
ax[2].grid(True)
plt.show()
def extract_glottal_signal(self, x, fs):
"""Extract the glottal flow and the glottal flow derivative signals
:param x: data from the speech signal.
:param fs: sampling frequency
:returns: glottal signal
:returns: derivative of the glottal signal
:returns: glottal closure instants
>>> from scipy.io.wavfile import read
>>> glottal=Glottal()
>>> file_audio="../audios/001_a1_PCGITA.wav"
>>> fs, data_audio=read(audio)
>>> glottal, g_iaif, GCIs=glottal.extract_glottal_signal(data_audio, fs)
"""
winlen=int(0.025*fs)
winshift=int(0.005*fs)
x=x-np.mean(x)
x=x/float(np.max(np.abs(x)))
GCIs=SE_VQ_varF0(x,fs)
g_iaif=np.zeros(len(x))
glottal=np.zeros(len(x))
if GCIs is None:
print("------------- warning -------------------, not enought voiced segments were found to compute GCI")
return glottal, g_iaif, GCIs
start=0
stop=int(start+winlen)
win = np.hanning(winlen)
while stop <= len(x):
x_frame=x[start:stop]
pGCIt=np.where((GCIs>start) & (GCIs<stop))[0]
GCIt=GCIs[pGCIt]-start
g_iaif_f=IAIF(x_frame,fs,GCIt)
glottal_f=cumtrapz(g_iaif_f, dx=1/fs)
glottal_f=np.hstack((glottal[start], glottal_f))
g_iaif[start:stop]=g_iaif[start:stop]+g_iaif_f*win
glottal[start:stop]=glottal[start:stop]+glottal_f*win
start=start+winshift
stop=start+winlen
g_iaif=g_iaif-np.mean(g_iaif)
g_iaif=g_iaif/max(abs(g_iaif))
glottal=glottal-np.mean(glottal)
glottal=glottal/max(abs(glottal))
glottal=glottal-np.mean(glottal)
glottal=glottal/max(abs(glottal))
return glottal, g_iaif, GCIs
def extract_features_file(self, audio, static=True, plots=False, fmt="npy", kaldi_file=""):
"""Extract the glottal features from an audio file
:param audio: .wav audio file.
:param static: whether to compute and return statistic functionals over the feature matrix, or return the feature matrix computed over frames
:param plots: timeshift to extract the features
:param fmt: format to return the features (npy, dataframe, torch, kaldi)
:param kaldi_file: file to store kaldi features, only valid when fmt=="kaldi"
:returns: features computed from the audio file.
>>> glottal=Glottal()
>>> file_audio="../audios/001_a1_PCGITA.wav"
>>> features1=glottal.extract_features_file(file_audio, static=True, plots=True, fmt="npy")
>>> features2=glottal.extract_features_file(file_audio, static=True, plots=True, fmt="dataframe")
>>> features3=glottal.extract_features_file(file_audio, static=False, plots=True, fmt="torch")
>>> glottal.extract_features_file(file_audio, static=False, plots=False, fmt="kaldi", kaldi_file="./test.ark")
"""
if audio.find('.wav')==-1 and audio.find('.WAV')==-1:
raise ValueError(audio+" is not a valid wav file")
fs, data_audio=read(audio)
data_audio=data_audio-np.mean(data_audio)
data_audio=data_audio/float(np.max(np.abs(data_audio)))
size_frameS=self.size_frame*float(fs)
size_stepS=self.size_step*float(fs)
overlap=size_stepS/size_frameS
nF=int((len(data_audio)/size_frameS/overlap))-1
data_audiof=np.asarray(data_audio*(2**15), dtype=np.float32)
f0=pysptk.sptk.rapt(data_audiof, fs, int(0.01*fs), min=20, max=500, voice_bias=-0.2, otype='f0')
sizef0=int(self.size_frame/0.01)
stepf0=int(self.size_step/0.01)
startf0=0
stopf0=sizef0
glottal, g_iaif, GCI=self.extract_glottal_signal(data_audio, fs)
if plots:
self.plot_glottal(data_audio,fs,GCI, g_iaif, glottal)
avgGCIt=np.zeros(nF)
varGCIt=np.zeros(nF)
avgNAQt=np.zeros(nF)
varNAQt=np.zeros(nF)
avgQOQt=np.zeros(nF)
varQOQt=np.zeros(nF)
avgH1H2t=np.zeros(nF)
varH1H2t=np.zeros(nF)
avgHRFt=np.zeros(nF)
varHRFt=np.zeros(nF)
rmwin=[]
for l in range(nF):
init=int(l*size_stepS)
endi=int(l*size_stepS+size_frameS)
gframe=glottal[init:endi]
dgframe=glottal[init:endi]
pGCIt=np.where((GCI>init) & (GCI<endi))[0]
gci_s=GCI[pGCIt]-init
f0_frame=f0[startf0:stopf0]
pf0framez=np.where(f0_frame!=0)[0]
f0nzframe=f0_frame[pf0framez]
if len(f0nzframe)<5:
startf0=startf0+stepf0
stopf0=stopf0+stepf0
rmwin.append(l)
continue
startf0=startf0+stepf0
stopf0=stopf0+stepf0
GCId=np.diff(gci_s)
avgGCIt[l]=np.mean(GCId/fs)
varGCIt[l]=np.std(GCId/fs)
NAQ, QOQ, T1, T2, H1H2, HRF=get_vq_params(gframe, dgframe, fs, gci_s)
avgNAQt[l]=np.mean(NAQ)
varNAQt[l]=np.std(NAQ)
avgQOQt[l]=np.mean(QOQ)
varQOQt[l]=np.std(QOQ)
avgH1H2t[l]=np.mean(H1H2)
varH1H2t[l]=np.std(H1H2)
avgHRFt[l]=np.mean(HRF)
varHRFt[l]=np.std(HRF)
if static and len(rmwin)>0:
varGCIt=np.delete(varGCIt,rmwin)
avgNAQt=np.delete(avgNAQt,rmwin)
varNAQt=np.delete(varNAQt,rmwin)
avgQOQt=np.delete(avgQOQt,rmwin)
varQOQt=np.delete(varQOQt,rmwin)
avgH1H2t=np.delete(avgH1H2t,rmwin)
varH1H2t=np.delete(varH1H2t,rmwin)
avgHRFt=np.delete(avgHRFt,rmwin)
varHRFt=np.delete(varHRFt,rmwin)
feat=np.stack((varGCIt, avgNAQt, varNAQt, avgQOQt, varQOQt, avgH1H2t, varH1H2t, avgHRFt, varHRFt), axis=1)
if fmt in("npy","txt"):
if static:
return dynamic2static(feat)
return feat
if fmt in("dataframe","csv"):
if static:
feat_st=dynamic2static(feat)
head_st=[]
df={}
for k in ["global avg", "global std", "global skewness", "global kurtosis"]:
for h in self.head:
head_st.append(k+" "+h)
for e, k in enumerate(head_st):
df[k]=[feat_st[e]]
return pd.DataFrame(df)
else:
df={}
for e, k in enumerate(self.head):
df[k]=feat[:,e]
return pd.DataFrame(df)
if fmt=="torch":
if static:
feat_s=dynamic2static(feat)
feat_t=torch.from_numpy(feat_s)
return feat_t
return torch.from_numpy(feat)
if fmt=="kaldi":
if static:
raise ValueError("Kaldi is only supported for dynamic features")
name_all=audio.split('/')
dictX={name_all[-1]:feat}
save_dict_kaldimat(dictX, kaldi_file)
def extract_features_path(self, path_audio, static=True, plots=False, fmt="npy", kaldi_file=""):
"""Extract the glottal features for audios inside a path
:param path_audio: directory with (.wav) audio files inside, sampled at 16 kHz
:param static: whether to compute and return statistic functionals over the feature matrix, or return the feature matrix computed over frames
:param plots: timeshift to extract the features
:param fmt: format to return the features (npy, dataframe, torch, kaldi)
:param kaldi_file: file to store kaldifeatures, only valid when fmt=="kaldi"
:returns: features computed from the audio file.
>>> glottal=Glottal()
>>> path_audio="../audios/"
>>> features1=glottal.extract_features_path(path_audio, static=True, plots=False, fmt="npy")
>>> features2=glottal.extract_features_path(path_audio, static=True, plots=False, fmt="csv")
>>> features3=glottal.extract_features_path(path_audio, static=False, plots=True, fmt="torch")
>>> glottal.extract_features_path(path_audio, static=False, plots=False, fmt="kaldi", kaldi_file="./test.ark")
"""
hf=os.listdir(path_audio)
hf.sort()
pbar=tqdm(range(len(hf)))
ids=[]
Features=[]
for j in pbar:
pbar.set_description("Processing %s" % hf[j])
audio_file=path_audio+hf[j]
feat=self.extract_features_file(audio_file, static=static, plots=plots, fmt="npy")
Features.append(feat)
if static:
ids.append(hf[j])
else:
ids.append(np.repeat(hf[j], feat.shape[0]))
Features=np.vstack(Features)
ids=np.hstack(ids)
if fmt in("npy","txt"):
return Features
if fmt in("dataframe","csv"):
if static:
head_st=[]
df={}
for k in ["global avg", "global std", "global skewness", "global kurtosis"]:
for h in self.head:
head_st.append(k+" "+h)
for e, k in enumerate(head_st):
df[k]=Features[:,e]
else:
df={}
for e, k in enumerate(self.head):
df[k]=Features[:,e]
df["id"]=ids
return pd.DataFrame(df)
if fmt=="torch":
return torch.from_numpy(Features)
if fmt=="kaldi":
if static:
raise ValueError("Kaldi is only supported for dynamic features")
dictX=get_dict(Features, ids)
save_dict_kaldimat(dictX, kaldi_file)
if __name__=="__main__":
if len(sys.argv)!=6:
print("python glottal.py <file_or_folder_audio> <file_features> <static (true, false)> <plots (true, false)> <format (csv, txt, npy, kaldi, torch)>")
sys.exit()
glottal_o=Glottal()
script_manager(sys.argv, glottal_o)
```
|
{
"source": "JCVenterInstitute/NSForest",
"score": 3
}
|
#### File: JCVenterInstitute/NSForest/NSForest_v3.py
```python
def NS_Forest(adata, clusterLabelcolumnHeader = "louvain", rfTrees = 1000, Median_Expression_Level = 0, Genes_to_testing = 6, betaValue = 0.5):
#adata = scanpy object
#rfTrees = Number of trees
#Median_Expression_Level = median expression level for removing negative markers
#Genes_to_testing = How many top genes ranked by binary score will be evaluated in permutations by fbeta-score (as the number increases the number of permutation rises exponentially!)
#betaValue = Set values for fbeta weighting. 1 is default f-measure. close to zero is Precision, greater than 1 weights toward Recall
#libraries
import numpy as np
import pandas as pd
import numexpr
import itertools
from subprocess import call
import scanpy as sc
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import graphviz
import time
# Functions
def randomForest(adata,dataDummy,column,rfTrees,threads): #Runs Random forest on the binary dummy variables; outputs all genes ranked Gini Index
x_train = adata.X
names = adata.var_names
y_train = dataDummy[column]
rf = RandomForestClassifier(n_estimators=rfTrees, n_jobs=threads, random_state=123456)
rf.fit(x_train, y_train)
Ranked_Features = sorted(zip([round(x, 8) for x in rf.feature_importances_], names),reverse=True)
return Ranked_Features
def rankInformative(Ranked_Features,column,rankedDict,howManyInformativeGenes2test): #subsets list according to howManyInformativeGenes2test parameter
RankedList = []
midcounter = 0
for x in Ranked_Features:
midcounter +=1
RankedList.append(x[1])
if midcounter==howManyInformativeGenes2test:
break
rankedDict[column] = RankedList
return RankedList
def negativeOut(x, column,medianValues,Median_Expression_Level): # Removes genes with median expression < Median_Expression_Level parameter
Positive_RankedList_Complete = []
for i in x:
if medianValues.loc[column, i] > Median_Expression_Level:
print(i)
print(medianValues.loc[column, i])
Positive_RankedList_Complete.append(i)
else:
print(i)
print(medianValues.loc[column, i])
print("Is Right Out!")
return Positive_RankedList_Complete
def binaryScore(Positive_RankedList_Complete, InformativeGenes, medianValues, column, clusters2Loop, Ranked_Features, Genes_to_testing,Binary_store_DF): # Takes top ranked positive genes (number according to Genes_to_testing) and computes Binary score for each gene
Positive_RankedList = list(Positive_RankedList_Complete[0:InformativeGenes])
Median_RF_Subset = medianValues.loc[:, Positive_RankedList]
Rescaled_Matrix = pd.DataFrame()
for i in Positive_RankedList:
Target_value = medianValues.loc[column, i]
Rescaled_values = Median_RF_Subset[[i]].divide(Target_value)
Rescaled_Matrix = pd.concat([Rescaled_Matrix,Rescaled_values],axis=1)
difference_matrix = Rescaled_Matrix.apply(lambda x: 1-x, axis=1)
difference_matrix_clean1 = difference_matrix.where(difference_matrix >= 0,other=0)
difference_matrix_clean = difference_matrix_clean1.where(difference_matrix > 0, 0)
ColumnSums = difference_matrix_clean.sum(0)
rescaled = ColumnSums/clusters2Loop
# Double sort so that for ties, the RF ranking prevails!
Ranked_Features_df = pd.DataFrame(Ranked_Features)
Ranked_Features_df.rename(columns={1: 'Symbol'}, inplace=True)
Ranked_Features_df_indexed = Ranked_Features_df.set_index("Symbol")
rescaled_df = pd.DataFrame(rescaled)
binaryAndinformation_Ranks = rescaled_df.join(Ranked_Features_df_indexed,lsuffix='_scaled', rsuffix='_informationGain')
binaryAndinformation_Ranks.sort_values(by=['0_scaled','0_informationGain'],ascending= [False, False], inplace = True)
Binary_ranked_Genes = binaryAndinformation_Ranks.index.tolist()
Binary_RankedList = list(Binary_ranked_Genes[0:Genes_to_testing])
Binary_scores = rescaled.to_dict()
Binary_store_DF = Binary_store_DF.append(binaryAndinformation_Ranks)
return Binary_RankedList,Binary_store_DF
def DT_cutOffs(x, column, dataDummy): # For each gene in the top binary gene, function finds optimal decision tree cutoff for F-beta testing
cut_dict = {}
for i in x:
filename = str(i)
y_train = dataDummy[column]
x_train = adata[:,i].X
X = x_train[:, None]
clf = tree.DecisionTreeClassifier(max_leaf_nodes=2)
clf = clf.fit(x_train, y_train)
threshold = clf.tree_.threshold
cut_dict[i] = threshold[0]
return cut_dict
def queryGenerator(Binary_RankedList, cut_dict): # Builds dict to create queries for F-beta testing
queryList = []
for i in Binary_RankedList:
str1 = i
current_value = cut_dict.get(str1)
queryString1 = str(str1.replace("-", "_").replace(".", "_"))+'>='+ str(current_value)
queryList.append(queryString1)
return queryList
def permutor(x): # creates all combinations of queries built above
binarylist2 = x
combs = []
for i in range(1, len(x)+1):
els = [list(x) for x in itertools.combinations(binarylist2, i)]
combs.extend(els)
return combs
def fbetaTest(x, column, adata, Binary_RankedList,testArray, betaValue): # uses queries to perform F-beta testing at the betaValue set in parameters
fbeta_dict = {}
subset_adata = adata[:,Binary_RankedList]
Subset_dataframe = pd.DataFrame(data = subset_adata.X, index = subset_adata.obs_names, columns = subset_adata.var_names)
Subset_dataframe.columns = Subset_dataframe.columns.str.replace("-", "_").str.replace(".", "_")
for list in x:
testArray['y_pred'] = 0
betaQuery = '&'.join(list)
Ineq1 = Subset_dataframe.query(betaQuery)
testList = Ineq1.index.tolist()
testArray.loc[testList, 'y_pred'] = 1
f1 = fbeta_score(testArray['y_true'], testArray['y_pred'], average= 'binary', beta=betaValue)
tn, fp, fn, tp = confusion_matrix(testArray['y_true'], testArray['y_pred']).ravel()
### strip betaQuery and normalize
dictName = column+"&"+betaQuery.replace("_", "-")
fbeta_dict[dictName] = f1, tn, fp, fn, tp
return fbeta_dict
def ReportReturn(max_grouped_df): # Cleaning up results to return as dataframe
for column in max_grouped_df.columns[8:14]:
max_grouped_df[column] = max_grouped_df[column].str.replace('nan', '')
max_grouped_df["NSForest_Markers"] = max_grouped_df[max_grouped_df.columns[8:14]].values.tolist()
max_grouped_df = max_grouped_df[['clusterName',"f-measure",'markerCount','NSForest_Markers','True Positive','True Negative','False Positive','False Negative',1,2,3,4,5,6,"index"]]
for i in max_grouped_df.index:
cleanList = [string for string in max_grouped_df.loc[i,'NSForest_Markers'] if string != ""]
max_grouped_df.at[i, 'NSForest_Markers'] = cleanList
Results = max_grouped_df
return Results
#Parameters of interest
#Random Forest parameters
threads = -1 #Number of threads to use, -1 is the greedy option where it will take all available CPUs/RAM
#Filtering and ranking of genes from random forest parameters
howManyInformativeGenes2test = 15 #How many genes from the GiniRanking move on for further testing...
#How many top genes from the Random Forest ranked features will be evaluated for binariness
InformativeGenes = 10
#Main function#
#Creates dummy columns for one vs all Random Forest modeling
dataDummy = pd.get_dummies(adata.obs[clusterLabelcolumnHeader], columns=[clusterLabelcolumnHeader], prefix = "", prefix_sep = "")
#Creates matrix of cluster median expression values
medianValues = pd.DataFrame(columns=adata.var_names, index=adata.obs[clusterLabelcolumnHeader].cat.categories)
ClusterList = adata.obs[clusterLabelcolumnHeader].unique()
for clust in ClusterList: #adata.obs.Clusters.cat.categories:
subset_adata = adata[adata.obs[clusterLabelcolumnHeader].isin([clust]),:]
Subset_dataframe = pd.DataFrame(data = subset_adata.X, index = subset_adata.obs, columns = subset_adata.var_names)
medianValues.loc[clust] = Subset_dataframe.median()
medianValues.to_csv('NSForest3_medianValues.csv')
##Use Mean
#for clust in adata.obs.Clusters.cat.categories:
#medianValues.loc[clust] = adata[adata.obs[clusterLabelcolumnHeader].isin([clust]),:].X.mean(0)
clusters2Loop = len(dataDummy.columns)-1
print (clusters2Loop)
#gives us the top ten features from RF
rankedDict = {}
f1_store_1D = {}
Binary_score_store_DF = pd.DataFrame()
DT_cutoffs_store = {}
for column in dataDummy.columns:
print(column)
Binary_store_DF = pd.DataFrame()
#Run Random Forest and get a ranked list
Ranked_Features = randomForest(adata, dataDummy, column, rfTrees, threads)
RankedList = rankInformative(Ranked_Features,column,rankedDict,howManyInformativeGenes2test)
#Setup testArray for f-beta evaluation
testArray = dataDummy[[column]]
testArray.columns = ['y_true']
#Rerank according to expression level and binary score
Positive_RankedList_Complete = negativeOut(RankedList, column, medianValues, Median_Expression_Level)
print(Positive_RankedList_Complete)
outputlist = binaryScore(Positive_RankedList_Complete, InformativeGenes, medianValues, column, clusters2Loop, Ranked_Features, Genes_to_testing,Binary_store_DF)
Binary_RankedList = outputlist[0]
Binary_score_store_DF_extra = outputlist[1].assign(clusterName = column)
Binary_score_store_DF = Binary_score_store_DF.append(Binary_score_store_DF_extra)
#Get expression cutoffs for f-beta testing
cut_dict = DT_cutOffs(Binary_RankedList, column, dataDummy)
DT_cutoffs_store[column] = cut_dict
#Generate expression queries and run those queries using fscore() function
queryInequalities = queryGenerator(Binary_RankedList, cut_dict)
FullpermutationList = permutor(queryInequalities)
print(len(FullpermutationList))
f1_store = fbetaTest(FullpermutationList, column, adata, Binary_RankedList, testArray, betaValue)
f1_store_1D.update(f1_store)
#Report generation and cleanup for file writeouts
f1_store_1D_df = pd.DataFrame() #F1 store gives all results.
f1_store_1D_df = pd.DataFrame.from_dict(f1_store_1D)
Results_df = f1_store_1D_df.transpose()
Results_df.columns = ["f-measure", "True Negative", "False Positive", "False Negative", "True Positive"]
Results_df['markerCount'] = Results_df.index.str.count('&')
Results_df.reset_index(level=Results_df.index.names, inplace=True)
Results_df_done= Results_df['index'].apply(lambda x: pd.Series(x.split('&')))
NSForest_Results_Table=Results_df.join(Results_df_done)
NSForest_Results_Table_Fin = pd.DataFrame()
NSForest_Results_Table_Fin = NSForest_Results_Table[NSForest_Results_Table.columns[0:8]]
for i, col in enumerate(NSForest_Results_Table.columns[8:15]):
splitResults = NSForest_Results_Table[col].astype(str).apply(lambda x: pd.Series(x.split('>=')))
firstOnly = splitResults[0]
Ascolumn = firstOnly.to_frame()
Ascolumn.columns = [col]
NSForest_Results_Table_Fin = NSForest_Results_Table_Fin.join(Ascolumn)
NSForest_Results_Table_Fin.rename(columns={0:'clusterName'},inplace=True) #rename columns by position
NSForest_Results_Table_Fin.sort_values(by=['clusterName','f-measure','markerCount'],ascending= [True, False, True], inplace = True)
print (NSForest_Results_Table_Fin)
time.perf_counter()
#Write outs
Binary_score_store_DF.to_csv('NS-Forest_v3_Extended_Binary_Markers_Supplmental.csv')
NSForest_Results_Table_Fin.to_csv('NS-Forest_v3_Full_Results.csv')
#Subset of full results
max_grouped = NSForest_Results_Table_Fin[NSForest_Results_Table_Fin.groupby("clusterName")["f-measure"].transform('max') == NSForest_Results_Table_Fin['f-measure']]
max_grouped_df = pd.DataFrame(max_grouped)
##Move binary genes to Results dataframe
clusters2Genes = pd.DataFrame(columns = ['Gene', 'clusterName'])
clusters2Genes["clusterName"] = Binary_score_store_DF["clusterName"]
clusters2Genes["Gene"] = Binary_score_store_DF.index
GroupedBinarylist = clusters2Genes.groupby('clusterName').apply(lambda x: x['Gene'].unique())
BinaryFinal = pd.DataFrame(columns = ['clusterName','Binary_Genes'])
BinaryFinal['clusterName'] = GroupedBinarylist.index
BinaryFinal['Binary_Genes'] = GroupedBinarylist.values
Results = ReportReturn(max_grouped_df)
#Results["NSForest_Markers"] = Results["NSForest_Markers"].apply(clean_alt_list)
Result = pd.merge(Results, BinaryFinal, on='clusterName')
Result.to_csv('NSForest_v3_Final_Result.csv')
ResultUnique = Result.drop_duplicates(subset=["clusterName"])
time.perf_counter()
return ResultUnique
```
|
{
"source": "jcvera8/RCC",
"score": 3
}
|
#### File: RCC/tests/test_filters.py
```python
import unittest
from zkfarmer.utils import create_filter
class TestFilters(unittest.TestCase):
def test_simple_filter(self):
"""Check if a simple equality filter works."""
filter = create_filter("enable=1")
self.assertTrue(filter(dict(enable="1")))
self.assertFalse(filter(dict(enable="0")))
self.assertFalse(filter(dict(notenabled="1")))
self.assertFalse(filter(dict(notenabled="1", something="18")))
self.assertFalse(filter(dict(something="19", enable="0")))
self.assertTrue(filter(dict(something="19", enable="1")))
self.assertTrue(filter(dict(something="19", enable="1", somethingelse="1")))
def test_two_equalities(self):
"""Check if a filter with two equalities works."""
filter = create_filter("enable=1,maintainance=0")
self.assertTrue(filter(dict(enable="1", maintainance="0")))
self.assertTrue(filter(dict(enable="1", maintainance="0", somethingelse="43")))
self.assertFalse(filter(dict(enable="0", maintainance="0", somethingelse="43")))
self.assertFalse(filter(dict(enable="1", maintainance="1", somethingelse="43")))
self.assertFalse(filter(dict(enable="1", somethingelse="43")))
def test_existence(self):
"""Check if filters on existence work."""
filter = create_filter("enable=1,working")
self.assertTrue(filter(dict(enable="1",working="0")))
self.assertTrue(filter(dict(enable="1",working="1")))
self.assertTrue(filter(dict(enable="1",working="1",notworking="1")))
self.assertFalse(filter(dict(enable="0",working="1")))
self.assertFalse(filter(dict(enable="1",notworking="1")))
def test_inexistence(self):
"""Check if filters on inexistence work."""
filter = create_filter("enable=1,!working")
self.assertFalse(filter(dict(enable="1",working="0")))
self.assertFalse(filter(dict(enable="1",working="1")))
self.assertFalse(filter(dict(enable="1",working="1",notworking="1")))
self.assertFalse(filter(dict(enable="0",working="1")))
self.assertTrue(filter(dict(enable="1",notworking="1")))
self.assertFalse(filter(dict(enable="0",notworking="1")))
def test_inequalities(self):
"""Check if filters with inequalities work."""
filter = create_filter("enable=1,weight>20")
self.assertTrue(filter(dict(enable="1",weight="21")))
self.assertTrue(filter(dict(enable="1",weight="121")))
self.assertFalse(filter(dict(enable="1",weight="1")))
self.assertFalse(filter(dict(enable="1",weight="20")))
self.assertFalse(filter(dict(enable="0",weight="21")))
self.assertFalse(filter(dict(enable="1")))
filter = create_filter("enable=1,weight>=20")
self.assertTrue(filter(dict(enable="1",weight="20")))
self.assertFalse(filter(dict(enable="1",weight="19")))
self.assertTrue(filter(dict(enable="1",weight="21")))
filter = create_filter("enable=1,weight<=20")
self.assertTrue(filter(dict(enable="1",weight="20")))
self.assertTrue(filter(dict(enable="1",weight="19")))
self.assertFalse(filter(dict(enable="1",weight="21")))
filter = create_filter("enable=1,weight<20")
self.assertFalse(filter(dict(enable="1",weight="20")))
self.assertTrue(filter(dict(enable="1",weight="19")))
self.assertFalse(filter(dict(enable="1",weight="21")))
filter = create_filter("enable=1,weight!=20")
self.assertFalse(filter(dict(enable="1",weight="20")))
self.assertTrue(filter(dict(enable="1",weight="19")))
self.assertTrue(filter(dict(enable="1",weight="121")))
self.assertFalse(filter(dict(enable="1")))
def test_empty_filter(self):
"""Check if an empty filter works."""
filter = create_filter("")
# All is true
self.assertTrue(filter({1: 2}))
self.assertTrue(filter({}))
self.assertTrue(filter({3: 4}))
def test_nested_filter(self):
"""Check if a filter on nested elements works."""
filter = create_filter("enable=1,mysql.replication_delay<20")
self.assertTrue(filter(dict(enable="1", mysql=dict(replication_delay="10"))))
self.assertFalse(filter(dict(enable="1", mysql=dict(replication_delay="30"))))
self.assertFalse(filter(dict(enable="0", mysql=dict(replication_delay="10"))))
self.assertFalse(filter(dict(enable="1")))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jcvernaleo/PyGnuplot",
"score": 2
}
|
#### File: jcvernaleo/PyGnuplot/PyGnuplot.py
```python
from subprocess import Popen as _Popen, PIPE as _PIPE
default_term = 'x11' # change this if you use a different terminal
class _FigureList(object):
def __init__(self):
proc = _Popen(['gnuplot', '-p'], shell=False, stdin=_PIPE, universal_newlines=True) # persitant -p
self.instance = {0 : [proc, default_term]} # {figure number : [process, terminal type]}
self.n = 0 # currently selected Figure
# Format:
# instance[self.n][0] = process
# instance[self.n][1] = terminal
def figure(number=None):
'''Make Gnuplot plot in a new Window or update a defined one figure(num=None, term='x11'):
>>> figure(2) # would create or update figure 2
>>> figure() # simply creates a new figure
returns the new figure number
'''
if not isinstance(number, int): # create new figure if no number was given
number = max(fl.instance) + 1
if number not in fl.instance: # number is new
proc = _Popen(['gnuplot', '-p'], shell=False, stdin=_PIPE, universal_newlines=True)
fl.instance[number] = [proc, default_term]
fl.n = number
c('set term ' + str(fl.instance[fl.n][1]) + ' ' + str(fl.n))
return number
def c(command):
'''
Send command to gnuplot
>>> c('plot sin(x)')
>>> c('plot "tmp.dat" u 1:2 w lp)
'''
proc = fl.instance[fl.n][0] # this is where the process is
proc.stdin.write(command + '\n') # \n 'send return in python 2.7'
proc.stdin.flush() # send the command in python 3.4+
def s(data, filename='tmp.dat'):
'''
saves numbers arrays and text into filename (default = 'tmp.dat)
(assumes equal sizes and 2D data sets)
>>> s(data, filename='tmp.dat') # overwrites/creates tmp.dat
'''
file = open(filename, 'w')
columns = len(data)
rows = len(data[0])
for j in range(rows):
for i in range(columns):
file.write(str(data[i][j]))
file.write(' ')
file.write('\n')
if j % 1000 == 0 :
file.flush() # write once after every 1000 entries
file.close() # write the rest
def plot(data, filename='tmp.dat'):
''' Save data into filename (default = 'tmp.dat') and send plot instructions to Gnuplot'''
s(data, filename)
c('plot "' + filename + '" w lp')
def p(filename='tmp.ps', width=14, height=9, fontsize=12, term=default_term):
'''Script to make gnuplot print into a postscript file
>>> p(filename='myfigure.ps') # overwrites/creates myfigure.ps
'''
c('set term postscript size ' + str(width) + 'cm, ' + str(height) + 'cm color solid ' +
str(fontsize) + " font 'Calibri';")
c('set out "' + filename + '";')
c('replot;')
c('set term ' + str(term) + '; replot')
def pdf(filename='tmp.pdf', width=14, height=9, fontsize=12, term=default_term):
'''Script to make gnuplot print into a pdf file
>>> pdf(filename='myfigure.pdf') # overwrites/creates myfigure.pdf
'''
c('set term pdf enhanced size ' + str(width) + 'cm, ' + str(height) + 'cm color solid fsize ' +
str(fontsize) + " fname 'Helvetica';")
c('set out "' + filename + '";')
c('replot;')
c('set term ' + str(term) + '; replot')
fl = _FigureList()
```
|
{
"source": "jcvicelli/devops",
"score": 3
}
|
#### File: jcvicelli/devops/curl-monitoring.py
```python
import requests
import sys
import os
from slack import WebClient
from slack.errors import SlackApiError
def main():
print("Starting...")
filepath = 'sites.list'
with open(filepath) as fp:
for cnt, line in enumerate(fp):
try:
response = requests.head(line.rstrip())
if (response.status_code == requests.codes.ok):
print("Check {}: {} OK!".format(cnt, line.rstrip()))
else:
print("Check {}: {} ERROR {}".format(
cnt, line.rstrip(), response.status_code))
slack_message_send(line.rstrip())
except:
print("Unexpected error:", sys.exc_info()[0], line.rstrip())
slack_message_send(line.rstrip())
def slack_message_send(url_message):
slack_token = os.environ["SLACK_API_TOKEN"]
client = WebClient(token=slack_token)
try:
slack_response = client.chat_postMessage(
channel="#monitoring",
text=url_message + " is offline! :fire:"
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
# str like 'invalid_auth', 'channel_not_found'
assert e.slack_response["error"]
if __name__ == '__main__':
main()
```
|
{
"source": "jcw780/gameParamsExtract",
"score": 3
}
|
#### File: gameParamsExtract/src/extractTurning.py
```python
import json, argparse
from gpToDict import gpToDict, makeEntities, getComponentData
from utility import writeToFile
import extractGM
from matplotlib import pyplot as plt
import numpy as np
'''
For extracting and packaging shell information - single
'''
def getHullData(entityTypes: dict, locale={}):
shipHullData = getComponentData(entityTypes, 'hull')
dragDeceleration = []
speeds = []
lBR = []
for shipName, hulls in shipHullData.items():
for hull, hullData in hulls.items():
#print(shipName, hull)
speed = hullData['maxSpeed'] * 0.514444 * hullData['speedCoef'] #To m/s
power = hullData['enginePower'] * 735.49875 #To Watts
mass = hullData['mass']
draft = hullData['draft']
size = hullData['size']
lengthBeamRatio = size[1] * draft * size[0] ** 0.5
deceleration = power / speed
#if shipName[3] == 'C':
if shipName[0:4] == 'PASC':
#if True:
print(shipName, deceleration, speed, lengthBeamRatio)
dragDeceleration.append(deceleration)
speeds.append(speed)
lBR.append(lengthBeamRatio)
lShipName = shipName
searchName = F'IDS_{shipName.split("_")[0]}'
if searchName in locale:
lShipName = locale[searchName]
plt.annotate(lShipName, (speed, deceleration))
#print(speed, power, mass, deceleration)
plt.scatter(np.array(speeds), np.array(dragDeceleration), c=np.array(lBR))
plt.show()
return shipHullData
def getEngineData(entityTypes: dict):
shipEngineData = getComponentData(entityTypes, 'engine')
for shipName, engines in shipEngineData.items():
for engine, engineData in engines.items():
print(shipName, engine)
print('backwardForsage: Multiplier: ', engineData['backwardEngineForsag'], 'Speed: ', engineData['backwardEngineForsagMaxSpeed'])
print('forwardForsage: Multiplier: ', engineData['forwardEngineForsag'], 'Speed: ', engineData['forwardEngineForsagMaxSpeed'])
pass
return shipEngineData
def run(gpData: object, accuracy=True, locale={}):
entityTypes = makeEntities(gpData)
#hullComponents = getHullData(entityTypes, locale=locale)
engineComponents = getEngineData(entityTypes)
#print(hullComponents)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("inDirectory", type=str, help="Input directory")
parser.add_argument("outDirectory", type=str, help="Output directory")
parser.add_argument("-l", "--locale", type=str, help="Localization Directory")
parser.add_argument("-o", "--output", type=str, help="Output file name")
parser.add_argument("--readable", help="Readable Output", action="store_true")
args = parser.parse_args()
outputName = 'hull.json'
if args.output:
outputName = args.output
data, fileHash = gpToDict(F'{args.inDirectory}/GameParams.data')
lData = {}
if locale := args.locale:
lData = extractGM.run(F'{locale}/global.mo')
run(data, locale=lData)
'''
if args.readable:
writeToFile(
run(data, locale=lData),
F'{args.outDirectory}/{outputName}',
indent=4, sort_keys=True
)
else:
writeToFile(
run(data, locale=lData),
F'{args.outDirectory}/{outputName}',
sort_keys=True
)
'''
```
#### File: gameParamsExtract/src/extractVertical.py
```python
import argparse, operator
from collections import defaultdict
from gpToDict import gpToDict, makeEntities
from utility import readFromFile
def run(target):
fileType = target.split('.')[-1]
if fileType == 'data':
entities = makeEntities(gpToDict(target)[0])
elif fileType == 'json':
entities = makeEntities(readFromFile(target))
else:
raise NotImplementedError
turretTargets = ['radiusOnDelim', 'radiusOnMax', 'radiusOnZero', 'delim', 'idealRadius', 'minRadius']
artilleryTargets = ['taperDist']
radiusShips = defaultdict(list)
for shipName, shipData in entities['Ship'].items():
componentSet = set()
upgrades = shipData['ShipUpgradeInfo']
for name, data in upgrades.items():
if type(data) == dict:
components = data['components']
if 'artillery' in components:
tgtComponents = components['artillery']
#print(name, components['artillery'])
componentSet |= set(tgtComponents)
#print(shipName, componentSet)
#data = {'delim': set(), 'max': set(), 'zero': set()}
data = defaultdict(set)
for artilleryName in componentSet:
artillery = shipData[artilleryName]
for pTurret, pTurretData in artillery.items():
if type(pTurretData) == dict and 'typeinfo' in pTurretData:
typeinfo = pTurretData['typeinfo']
if typeinfo['species'] == 'Main' and typeinfo['type'] == 'Gun':
for target in turretTargets:
data[target].add(pTurretData[target])
for target in artilleryTargets:
data[target].add(artillery[target])
#print(data)
try:
dataTuple = tuple([data[target].pop() for target in (turretTargets + artilleryTargets)])
radiusShips[dataTuple].append(shipName)
except:
pass
sortedKeys = list(radiusShips.keys())
sortedKeys.sort(key=operator.itemgetter(slice(0, -1)))
for disp in sortedKeys:
ships = radiusShips[disp]
outstr = ''
for i, items in enumerate(turretTargets):
outstr = F'{outstr}{items}: {disp[i]} '
tLen = len(turretTargets)
for i, items in enumerate(artilleryTargets):
outstr = F'{outstr}{items}: {disp[i + tLen]} '
print(outstr)
print()
temp = ''
for i, ship in enumerate(ships):
temp = F'{temp}{ship} '
if(i % 3 == 2):
print(temp)
temp = ''
if temp != '':
print(temp)
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("inDirectory", type=str, help="Input directory")
#parser.add_argument("outDirectory", type=str, help="Output directory")
#parser.add_argument("-o", "--output", type=str, help="Output file name")
args = parser.parse_args()
run(args.inDirectory)
```
|
{
"source": "jcw780/wows_shell",
"score": 2
}
|
#### File: Python/examples/regression.py
```python
from wows_shell import *
import numpy as np
from sklearn.linear_model import LinearRegression
ships = [
(.100, 1000, .3137, 13, 2154, 10., .010, 17, 45, 60.0, 0, "Akizuki"),
#(.130, 861, .291, 33.5, 1652, 10, .03, 22, 45, 60.0, 0, "Okhotnik"),
(.130, 870, .2857, 33.50, 1700, 10., .010, 22, 45, 60.0, 0, "Leningrad"),
(.128, 830, .3447, 28, 1640, 10., .010, 21, 45, 60.0, 0, "Maass"),
(.203, 853, .3210, 118.0, 2846, 7.0, .033, 34, 60, 67.5, 0, "<NAME>leans"),
(.152, 950, .3210, 55.00, 2216, 8.5, .025, 25, 45, 60.0, 0, "Budyonny"),
(.220, 985, .2549, 176.0, 2590, 7.0, .033, 37, 45, 60.0, 0, "Moskva"),
(.150, 960, .3307, 45.50, 1862, 8.5, .025, 25, 45, 60.0, 0, "Nurnberg"),
(.283, 910, .3333, 300.0, 2282, 6.0, .010, 47, 45, 60.0, 0, "<NAME>"),
(.152, 841, .3297, 50.8, 2609, 8.5, 0.005, 12, 60, 75.0, 0, "Edinburgh"),
(.152, 1000, .3256, 50, 2142, 8.5, 0.025, 25, 45, 60.0, 0, "Duca d'Aosta"),
(.356, 792, .332, 680.4, 2604, 6, 0.033, 59, 45, 60.0, 0, "Arizona"),
(.406, 701, .352, 1225, 2598, 6, .033, 68, 45, 60.0, 0, "North Carolina"),
(.283, 890, .2827, 330, 2312, 6, 0.01, 47, 45, 60.0, 0, "Scharnhorst"),
(.42, 800, .2994, 1220, 2415, 6, .033, 70, 45, 60.0, 0, "<NAME> 420"),
(.381, 731.5, .3379, 879, 2190, 6, 0.033, 64, 45, 60.0, 0, "Hood"),
#(.356, 757, .3142, 721, 2295, 6, .015, 59, 45, 60.0, 0, "King George V"),
(.457, 762, .2897, 1506, 2485, 6, .033, 76, 45, 60.0, 0, "Thunderer"),
(.33, 870, .2801, 560, 2428, 6, .033, 55, 45, 60.0, 0, "Dunkerque"),
(.305, 762, .4595, 470.9, 2627, 6, 0.01, 51,
45, 60.0, 0, "Oktyabrskaya Revolutsiya"),
#(.32, 830, .4098, 525, 2600, 6, 0.033, 53, 45, 60.0, 0, "<NAME>"),
(.356, 792, .31, 635, 1603, 6, 0.033, 59, 45, 60, 0, "New York")
]
referenceDistancePenetration = {
"Akizuki": {5000: 91, 10000: 47},
"Okhotnik": {5000: 86, 10000: 46},
"Leningrad": {5000: 119, 10000: 74, 15000: 47},
"Maass": {5000: 80, 10000: 41},
"New Orleans": {5000: 321, 10000: 221, 15000: 154},
"Budyonny": {5000: 215, 10000: 138, 15000: 91},
"Moskva": {5000: 496, 10000: 392, 15000: 314},
"Nurnberg": {5000: 148, 10000: 86, 15000: 52},
"<NAME>": {5000: 405, 10000: 301, 15000: 227},
"Edinburgh": {5000: 193, 10000: 118, 15000: 73},
"Duca d'Aosta": {5000: 197, 10000: 121, 15000: 76},
"Arizona": {5000: 575, 10000: 467, 15000: 381},
"North Carolina": {5000: 662, 10000: 563, 15000: 479},
"Scharnhorst": {5000: 455, 10000: 363, 15000: 289},
"Grosser Kurfurst 420": {5000: 722, 10000: 624, 15000: 539},
"Hood": {5000: 491, 10000: 406, 15000: 337},
"King George V": {5000: 541, 10000: 385, 15000: 326},
"Thunderer": {5000: 742, 10000: 646, 15000: 564},
"Dunkerque": {5000: 594, 10000: 495, 15000: 414},
"Oktyabrskaya Revolutsiya": {5000: 458, 10000: 338, 15000: 251},
"<NAME>": {5000: 439, 10000: 416, 15000: 320},
"New York": {5000: 337, 15000: 224, 18000: 204},
# {5000: , 10000:, 15000: },
}
angles = []
velocities = []
diameter = []
mass = []
krupp = []
referenceData = []
def normalization(angle, normalization):
return max(angle - normalization, 0)
c = shellCalc()
c.setDtMin(.01)
for i, ship in enumerate(ships):
print(ship[-1])
s = shell(shellParams(*(ship[:-1])), ship[-1])
c.calcImpactForwardEuler(s)
normal = ship[5]
shipRef = referenceDistancePenetration[ship[-1]]
for dist, penetration in shipRef.items():
referenceData.append(penetration)
adjAngle = normalization(s.interpolateDistanceImpact(
dist, int(impactIndices.impactAngleHorizontalDegrees)), normal)
angles.append(adjAngle)
impactVelocity = s.interpolateDistanceImpact(
dist, int(impactIndices.impactVelocity))
velocities.append(impactVelocity)
print(impactVelocity, adjAngle)
diameter.append(ship[0])
mass.append(ship[3])
krupp.append(ship[4] / 2400)
referenceData = np.array(referenceData)
angles = np.array(angles)
velocities = np.array(velocities)
diameter = np.array(diameter)
mass = np.array(mass)
krupp = np.array(krupp)
def regressV():
Y = referenceData / np.cos(np.radians(angles)) / krupp / \
np.power(mass, 0.5506) / np.power(diameter, -0.6521)
lY = np.log(Y)
lV = np.log(velocities)
Xstacked = np.vstack((lV))
X = Xstacked
reg = LinearRegression().fit(X, lY)
print(F'R²: {reg.score(X, lY)}')
regCoeffs = reg.coef_
regIntercept = reg.intercept_
regInterceptE = np.exp(regIntercept)
print(
F'Penetration = {regInterceptE} * V^{regCoeffs[0]} * D^-0.6521 * M^0.5506')
return regInterceptE * np.cos(np.radians(angles)) * (
np.power(velocities, regCoeffs[0]) *
np.power(diameter, -0.6521) *
np.power(mass, 0.5506) *
np.power(krupp, 1)
)
def regressVDM():
Y = referenceData / np.cos(np.radians(angles)) / krupp
lY = np.log(Y)
lV = np.log(velocities)
lD = np.log(diameter)
lM = np.log(mass)
lK = np.log(krupp)
Xstacked = np.vstack((lV, lD, lM))
X = Xstacked.T
reg = LinearRegression().fit(X, lY)
print(F'R²: {reg.score(X, lY)}')
regCoeffs = reg.coef_
regIntercept = reg.intercept_
regInterceptE = np.exp(regIntercept)
print(
F'Penetration = {regInterceptE} * V^{regCoeffs[0]} * D^{regCoeffs[1]} * M^{regCoeffs[2]}')
return regInterceptE * np.cos(np.radians(angles)) * (
np.power(velocities, regCoeffs[0]) *
np.power(diameter, regCoeffs[1]) *
np.power(mass, regCoeffs[2]) *
np.power(krupp, 1)
)
def regressVDMK():
Y = referenceData / np.cos(np.radians(angles))
lY = np.log(Y)
lV = np.log(velocities)
lD = np.log(diameter)
lM = np.log(mass)
lK = np.log(krupp)
Xstacked = np.vstack((lV, lD, lM, lK))
X = Xstacked.T
reg = LinearRegression().fit(X, lY)
print(F'R²: {reg.score(X, lY)}')
regCoeffs = reg.coef_
regIntercept = reg.intercept_
regInterceptE = np.exp(regIntercept)
print(
F'Penetration = {regInterceptE} * V^{regCoeffs[0]} * D^{regCoeffs[1]} * M^{regCoeffs[2]} * (K/2400)^{regCoeffs[3]}')
return regInterceptE * np.cos(np.radians(angles)) * (
np.power(velocities, regCoeffs[0]) *
np.power(diameter, regCoeffs[1]) *
np.power(mass, regCoeffs[2]) *
np.power(krupp, regCoeffs[3])
)
predictions = regressV()
current = 0
for ship in ships:
row = F'{ship[-1]}: '
for i in range(len(referenceDistancePenetration[ship[-1]])):
pCurr = predictions[current]
row = F'{row} {pCurr} diff: {pCurr - referenceData[current]}; '
current += 1
print(row)
```
|
{
"source": "jcw833/Geographic_App_Stanford",
"score": 2
}
|
#### File: jcw833/Geographic_App_Stanford/app.py
```python
import dash
import dash_html_components as html
import dash_core_components as dcc
import plotly.graph_objects as go
import pandas as pd
from dash.dependencies import Input, Output
import dash_core_components as dcc
import plotly.express as px
import plotly.graph_objects as go
from numpy.polynomial.polynomial import polyfit
from dash.dependencies import Input, Output, State
import plotly.figure_factory as ff
import numpy as np
import matplotlib.pyplot as plt
import pathlib
import os
# Load data
df = pd.read_csv(
'appData.csv')
df = df[0:50]
# df2 = pd.read_csv(
# 'appdata2.csv')
# Load data
# df_lat_lon = pd.read_csv("CountyData.csv", encoding='cp1252')
# Load data
df_unemp = pd.read_csv('UnemploymentStats.csv')
df_unemp['State FIPS Code'] = df_unemp['State FIPS Code'].apply(lambda x: str(x).zfill(2))
df_unemp['County FIPS Code'] = df_unemp['County FIPS Code'].apply(lambda x: str(x).zfill(3))
df_unemp['FIPS'] = df_unemp['State FIPS Code'] + df_unemp['County FIPS Code']
# Initialize app
app = dash.Dash(
__name__,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
)
server = app.server
# server = flask.Flask(__name__)
# app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP], server=server)
colorscale = [
"#f2fffb",
"#bbffeb",
"#98ffe0",
"#79ffd6",
"#6df0c8",
"#69e7c0",
"#59dab2",
"#45d0a5",
"#31c194",
"#2bb489",
"#25a27b",
"#1e906d",
"#188463",
"#157658",
"#11684d",
"#10523e",
]
mapbox_access_token = "<KEY>"
mapbox_style = "mapbox://styles/plotlymapbox/cjvprkf3t1kns1cqjxuxmwixz"
# Creates a list of dictionaries
# map_vals = ['2015 GDP per capita','2016 GDP per capita','2017 GDP per capita','2018 GDP per capita','2019 GDP per capita', '2020 Population', '2018 Population', 'Log 2020 Population', 'Log 2018 Population', 'Number of Universites Per State', 'Number of Universites Per State (No CA)']
# def get_map_options(map_vals):
# map_options = []
# for i in map_vals:
# map_options.append({'label': i, 'value': i})
# return map_options
# x_axis = ['2019 Project Count', '2018 Project Count', '2017 Project Count', 'Outliers Removed (NY, CA, TX, WA) Project Count 2019', '2018-2019 Change in Project Count (%)', '2017-2018 Change in Project Count (%)']
x_axis = ['2019 GDP per capita', '2018 GDP per capita', '2017 GDP per capita', '2016 GDP per capita', '2015 GDP per capita']
def get_xax_options(x_axis):
x_axis_options = []
for i in x_axis:
x_axis_options.append({'label': i, 'value': i})
return x_axis_options
# y_axis = ['Outliers Removed (NY, CA, TX, WA) USA GDP 2019', '2018-2019 Change in GDP (%)', '2017-2018 Change in GDP (%)', 'Number of Universites Per State', 'Number of Universites Per State (No CA)', '2020 Population', '2018 Population', 'Log 2020 Population', 'Log 2018 Population'
y_axis = ['2020 Population', '2018 Population', '2010 Population', 'Log 2020 Population', 'Log 2018 Population', 'Log 2010 Population']
def get_yax_options(y_axis):
y_axis_options = []
for i in y_axis:
y_axis_options.append({'label': i, 'value': i})
return y_axis_options
YEARS = [2015, 2016, 2017, 2018, 2019]
#######################################################################################
# App layout
app.layout = html.Div(
id="root",
children=[
html.Div(
id="header",
children=[
html.H4(children="USA Geographic Analysis Application"),
html.P(
id="description",
children="† Graph Geographic Data Below:",
),
],
),
html.Div(
id="app-container",
children=[
html.Div(
id="left-column",
children=[
html.Div(
id="mapdropdown-container",
children=[
dcc.Dropdown(
id='Mapselect',
options=[
{'label': 'State GDP', 'value': 'GDP'},
{'label': 'County Population', 'value': 'POP'},
{'label': 'County Unemployment', 'value': 'Unemployment'},
],
value=['Unemployment'],
multi=False,
className='MapSelector',
style={'color': '#1E1E1E'}),
dcc.Checklist(
id="checkbox",
options=[
{'label': 'State GDP', 'value': 'GDP'},
{'label': 'County Population', 'value': 'POP'},
{'label': 'County Unemployment', 'value': 'Unemployment'},
],
value=['Unemployment']
)]),
html.Div(
id="slider-container",
children=[
html.P(
id="slider-text",
children="Drag the slider to adjust year:",
),
dcc.Slider(
id="years-slider",
min=min(YEARS),
max=max(YEARS),
value=min(YEARS),
marks={
str(year): {
"label": str(year),
"style": {"color": "#7fafdf"},
}
for year in YEARS
},
),
],
),
html.Div(
id="heatmap-container",
children=[
html.P(
"Data Visualization".format(
min(YEARS)
),
id="heatmap-title",
),
dcc.Graph(
id="county-choropleth",
figure=dict(
layout=dict(
mapbox=dict(
layers=[],
accesstoken=mapbox_access_token,
style=mapbox_style,
center=dict(
lat=38.72490, lon=-95.61446
),
pitch=0,
zoom=3.5,
),
autosize=True,
),
),
),
],
),
],
),
html.Div(
id="graph-container",
children=[
html.P(id="chart-selector", children="Select Plot:"),
html.P('X-Axis for Scatterplot'),
html.Div(className='X-Axis',
children=[
dcc.Dropdown(id='X-Axis Select',
options=get_xax_options(x_axis),
multi=False,
value=[x_axis[0]],
style={'backgroundColor': '#1E1E1E'},
className='ScatterSelector')
],
style={'color': '#1E1E1E'}),
html.P('Y-Axis for Scatterplot'),
dcc.Dropdown(id='Y-Axis Select',
options=get_yax_options(y_axis),
multi=False,
value=[y_axis[0]],
style={'backgroundColor': '#1E1E1E'},
className='ScatterSelector'),
dcc.Graph(
id="Scatterplot",
figure=dict(
data=[dict(x=0, y=0)],
layout=dict(
paper_bgcolor="#1E1E1E",
plot_bgcolor="#1E1E1E",
autofill=True,
margin=dict(t=75, r=50, b=100, l=50),
),
),
),
],
),
],
),
])
###################################
@app.callback(
Output("county-choropleth", "figure"),
[Input("years-slider", "value")],
[Input("checkbox", "value")],
[State("county-choropleth", "figure")],
)
def display_map(year, checkbox, figure):
if checkbox == ['Unemployment']:
print(checkbox)
print(year)
endpts = list(np.linspace(1, 12, len(colorscale) - 1))
fips = df_unemp['FIPS'].tolist()
values = df_unemp['Unemployment Rate '+str(year)+' (%)'].tolist()
fig = ff.create_choropleth(
fips=fips, values=values, scope=['usa'],
binning_endpoints=endpts, colorscale=colorscale,
show_state_data=False,
show_hover=True,
asp = 2.9,
legend_title = '% unemployed '+str(year)
)
fig.layout.paper_bgcolor="#252b33"
fig.layout.plot_bgcolor="#252b33"
return fig
elif checkbox == ['POP']:
print(checkbox)
print(year)
endpts = list(np.linspace(10000, 1000000, len(colorscale) - 1))
fips = df_unemp['FIPS'].tolist()
values = df_unemp['Pop '+str(year)].tolist()
fig = ff.create_choropleth(
fips=fips, values=values, scope=['usa'],
binning_endpoints=endpts, colorscale=colorscale,
show_state_data=False,
show_hover=True,
asp = 2.9,
legend_title = 'County Population '+str(year)
)
fig.layout.paper_bgcolor="#252b33"
fig.layout.plot_bgcolor="#252b33"
return fig
elif checkbox == ['GDP']:
print(checkbox)
print(year)
endpts = list(np.linspace(40000, 100000, len(colorscale) - 1))
fips = df_unemp['FIPS'].tolist()
values = df_unemp['GDP '+str(year)].tolist()
fig = ff.create_choropleth(
fips=fips, values=values, scope=['usa'],
binning_endpoints=endpts, colorscale=colorscale,
show_hover=True,
asp = 2.9,
legend_title = 'GDP '+str(year)
)
fig.layout.paper_bgcolor="#252b33"
fig.layout.plot_bgcolor="#252b33"
return fig
else:
endpts = list(np.linspace(1, 12, len(colorscale) - 1))
fips = df_unemp['FIPS'].tolist()
values = df_unemp['Unemployment Rate '+str(year)+' (%)'].tolist()
fig = ff.create_choropleth(
fips=fips, values=values, scope=['usa'],
binning_endpoints=endpts, colorscale=colorscale,
show_state_data=False,
show_hover=True,
asp = 2.9,
legend_title = '% unemployed '+str(year)
)
fig.layout.paper_bgcolor="#252b33"
fig.layout.plot_bgcolor="#25<PASSWORD>"
return fig
# Callback for Map
@app.callback(Output('Scatterplot', 'figure'),
[Input('X-Axis Select', 'value'),
Input('Y-Axis Select','value')])
def update_scatter(x1,y1):
xval = '2019 GDP per capita'
yval = '2020 Population'
if x1 == '2019 GDP per capita':
xval = '2019 GDP per capita'
elif x1 == '2018 GDP per capita':
xval = '2018 GDP per capita'
elif x1 == '2017 GDP per capita':
xval = '2017 GDP per capita'
elif x1 == '2016 GDP per capita':
xval = '2016 GDP per capita'
elif x1 == '2015 GDP per capita':
xval = '2015 GDP per capita'
if y1 == '2020 Population':
yval = '2020 Population'
elif y1 == '2018 Population':
yval = '2018 Population'
elif y1 == '2010 Population':
yval = '2010 Population'
elif y1 == 'Log 2020 Population':
yval = 'Log 2020 Population'
elif y1 == 'Log 2018 Population':
yavl = 'Log 2018 Population'
elif y1 == 'Log 2010 Population':
yavl = 'Log 2010 Population'
figure = go.Figure(
data=px.scatter(df,
x=xval,
y=yval,
text="GeoName",
title="United States Data Comparison",
template='plotly_dark',
trendline = 'ols'))
figure.layout.paper_bgcolor="#252b33"
figure.layout.plot_bgcolor="#252b33"
return figure
if __name__ == "__main__":
app.run_server(debug=True)
```
|
{
"source": "jcw931/ppa-2-sw-testing-qa-spring-2018-team-1",
"score": 2
}
|
#### File: ppa-2-sw-testing-qa-spring-2018-team-1/app/app.py
```python
import logging
from flask import Flask, render_template, redirect, url_for, request, Blueprint
from .forms import gen_dist_form, gen_email_form, gen_bmi_form, gen_retire_form, gen_tip_form
from .distance import calc_distance
from .retirement import calc_retirement
from .email import verify_email
from .bmi import calc_bmi
from .tip import split_tip
main_blueprint = Blueprint('main', __name__)
@main_blueprint.route('/')
def index():
return render_template('index.jinja2')
@main_blueprint.route('/bmi', methods=['GET', 'POST'])
def bmi():
bmi_form = gen_bmi_form(request.form)
if request.method == 'POST':
try:
bmi = calc_bmi(bmi_form.f.data, bmi_form.i.data, bmi_form.p.data)
except Exception:
bmi = -1
return render_template('bmi.jinja2', form=bmi_form, bmi=bmi, post=1)
else:
return render_template('bmi.jinja2', form=bmi_form, distance=0, post=0)
@main_blueprint.route('/distance', methods=['GET', 'POST'])
def distance():
dist_form = gen_dist_form(request.form)
if request.method == 'POST':
distance = calc_distance(dist_form.x1.data, dist_form.y1.data, dist_form.x2.data, dist_form.y2.data)
return render_template('distance.jinja2', form=dist_form, distance=distance, post=1)
else:
return render_template('distance.jinja2', form=dist_form, distance=0, post=0)
@main_blueprint.route('/email', methods=['GET', 'POST'])
def email():
email_form = gen_email_form(request.form)
if request.method == 'POST':
email = verify_email(email_form.email_input.data)
return render_template('email.jinja2', form=email_form, email=email, post=True)
else:
return render_template('email.jinja2', form=email_form, email="", post=0)
@main_blueprint.route('/retirement', methods=['GET', 'POST'])
def retirement():
retire_form = gen_retire_form(request.form)
if request.method == 'POST':
age = calc_retirement(retire_form.age.data, retire_form.salary.data, retire_form.percent.data, retire_form.goal.data)
return render_template('retirement.jinja2', form=retire_form, age=age, post=1)
else:
return render_template('retirement.jinja2', form=retire_form, age=0, post=0)
@main_blueprint.route('/tip', methods=['GET', 'POST'])
def tip():
tip_form = gen_tip_form(request.form)
if request.method == 'POST':
bills = split_tip(tip_form.bill.data, tip_form.guests.data)
checks = list()
if bills != False:
guest = 1
for amount in bills:
temp_dict = dict({'guest': guest, 'amount': amount})
checks.append(temp_dict)
guest += 1
return render_template('tip.jinja2', form=tip_form, bills=checks, post=1)
else:
return render_template('tip.jinja2', form=tip_form, bills=0, post=0)
```
#### File: ppa-2-sw-testing-qa-spring-2018-team-1/app/distance.py
```python
def calc_distance(x1, y1, x2, y2):
# checking for non int or float inputs
try:
x1 = float(x1)
except Exception:
return False
try:
x2 = float(x2)
except Exception:
return False
try:
y1 = float(y1)
except Exception:
return False
try:
y2 = float(y2)
except Exception:
return False
# if the points are the same
if ((x1 == x2) and (y1 == y2)):
return 0
# Calc distance
else:
distance = ((((x2-x1)**2)+((y2-y1)**2))**0.5)
return round(distance, 4)
```
#### File: ppa-2-sw-testing-qa-spring-2018-team-1/app/email.py
```python
import re
def verify_email(email):
regex = r"^[a-zA-Z!$%*+\-=?^_{|}~]+(((\.[\w!$%*+\-=?^_{|}~]+)+|[\w!$%*+\-=?^_{|}~])+)@((?=[a-z0-9-]{1,63}\.)(xn--)?[a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,63}$"
is_email = re.fullmatch(regex, email, re.IGNORECASE)
if is_email:
return True
else:
return False
```
#### File: ppa-2-sw-testing-qa-spring-2018-team-1/app/retirement.py
```python
def calc_retirement(age, salary, percent, goal):
# cast each variable as its proper data type
try:
age = int(age)
salary = float(salary)
percent = float(percent)
goal = float(goal)
except Exception:
return (False)
# check each value to make sure it is in the proper range
if ((age < 15 or age > 99)
or (salary <= 0)
or (percent <= 0)
or (goal <= 0)):
return (False)
# savings from salary without employer's 35%
rawAnnualSavings = salary * (percent / 100)
# total annual savings including employer's 35%
annualSavings = rawAnnualSavings + (rawAnnualSavings * 0.35)
# total savings so far
currentSavings = 0.00
# add annual savings to total savings for each year until age 100
for i in range(age, 100):
currentSavings += annualSavings
if currentSavings >= goal:
return("You will meet your savings goal at age "+str(i))
# Notify user if they will not meet their goal
return("Sorry, your goal won't be met.")
```
#### File: ppa-2-sw-testing-qa-spring-2018-team-1/tests/test_email.py
```python
from app.email import verify_email
def test_basic_email_text_only():
assert verify_email("<EMAIL>") == True
def test_basic_email_with_numbers():
assert verify_email("<EMAIL>") == True
def test_email_with_periods():
assert verify_email("<EMAIL>") == True
def test_email_with_begining_period():
assert verify_email(".<EMAIL>") == False
def test_email_with_ending_period():
assert verify_email("<EMAIL>") == False
def test_email_with_two_periods():
assert verify_email("na..<EMAIL>") == False
def test_email_starting_with_number():
assert verify_email("<EMAIL>") == False
def test_email_with_symbols():
assert verify_email("!$%*+-=?^_{|}~<EMAIL>") == True
def test_email_with_sub_domain():
assert verify_email("<EMAIL>") == True
def test_email_with_no_at_or_domain():
assert verify_email("not_email") == False
def test_email_with_no_domain():
assert verify_email("not_email@") == False
def test_email_with_no_at():
assert verify_email("not_emaildomain.com") == False
def test_email_with_hyphen_in_domain():
assert verify_email("<EMAIL>") == True
def test_email_with_hyphen_beginining_domain():
assert verify_email("[email protected]") == False
def test_email_with_hyphen_ending_domain():
assert verify_email("[email protected]") == False
def test_email_with_numbers_for_domain():
assert verify_email("<EMAIL>") == True
def test_email_with_1_letter_domain():
assert verify_email("<EMAIL>") == True
def test_email_with_1_number_domain():
assert verify_email("<EMAIL>") == True
def test_email_with_too_long_domain():
assert verify_email("<EMAIL>23456789012345678901234567890123456789012345678901234567890123456789<EMAIL>") == False
def test_email_with_punycode_domain():
assert verify_email("<EMAIL>") == True
def test_email_with_domain_and_subdomains():
assert verify_email("<EMAIL>") == True
```
|
{
"source": "jcwang123/AwesomeContrastiveLearning",
"score": 2
}
|
#### File: AwesomeContrastiveLearning/Pixel/pixcl.py
```python
import copy
import random
from functools import wraps,partial
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms as T
# helper functions
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# loss fn
def loss_fn(x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
# MLP class for projector and predictor
class ConvMLP(nn.Module):
def __init__(self, chan, chan_out = 256, inner_dim = 2048):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, inner_dim, 1),
nn.BatchNorm2d(inner_dim),
nn.ReLU(),
nn.Conv2d(inner_dim, chan_out, 1)
)
def forward(self, x):
return self.net(x)
# a wrapper class for the base neural network
# will manage the interception of the hidden layer output
# and pipe it into the projecter and predictor nets
class NetWrapper(nn.Module):
def __init__(
self,
*,
net,
projection_size,
projection_hidden_size,
layer_pixel = -2,
):
super().__init__()
self.net = net
self.layer_pixel = layer_pixel
self.pixel_projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.hidden_pixel = None
self.hook_registered = False
def _find_layer(self, layer_id):
if type(layer_id) == str:
modules = dict([*self.net.named_modules()])
return modules.get(layer_id, None)
elif type(layer_id) == int:
children = [*self.net.children()]
return children[layer_id]
return None
def _hook(self, attr_name, _, __, output):
setattr(self, attr_name, output)
def _register_hook(self):
pixel_layer = self._find_layer(self.layer_pixel)
assert pixel_layer is not None, f'hidden layer ({self.layer_pixel}) not found'
pixel_layer.register_forward_hook(partial(self._hook, 'hidden_pixel'))
self.hook_registered = True
@singleton('pixel_projector')
def _get_pixel_projector(self, hidden):
_, dim, *_ = hidden.shape
projector = ConvMLP(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if not self.hook_registered:
self._register_hook()
_ = self.net(x)
hidden_pixel = self.hidden_pixel
self.hidden_pixel = None
assert hidden_pixel is not None, f'hidden pixel layer {self.layer_pixel} never emitted an output'
return hidden_pixel
def forward(self, x):
pixel_representation = self.get_representation(x)
pixel_projector = self._get_pixel_projector(pixel_representation)
pixel_projection = pixel_projector(pixel_representation)
return pixel_projection
# main class
class PixCL(nn.Module):
def __init__(
self,
net,
image_size,
hidden_layer_pixel = -2,
projection_size = 256,
projection_hidden_size = 4096,
augment_fn = None,
augment_fn2 = None,
moving_average_decay = 0.99,
use_momentum = True
):
super().__init__()
self.net = net
# default SimCLR augmentation
DEFAULT_AUG = torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
),
T.RandomGrayscale(p=0.2),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
T.Resize((image_size, image_size)),
)
self.augment1 = default(augment_fn, DEFAULT_AUG)
self.augment2 = default(augment_fn2, self.augment1)
self.online_encoder = NetWrapper(
net=net,
projection_size=projection_size,
projection_hidden_size=projection_hidden_size,
layer_pixel=hidden_layer_pixel
)
self.use_momentum = use_momentum
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
self.online_predictor = ConvMLP(projection_size, projection_size, projection_hidden_size)
# get device of network and make wrapper same device
device = get_module_device(net)
self.to(device)
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, 3, image_size, image_size, device=device))
@singleton('target_encoder')
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def reset_moving_average(self):
del self.target_encoder
self.target_encoder = None
def update_moving_average(self):
assert self.use_momentum, 'you do not need to update the moving average, since you have turned off momentum for the target encoder'
assert self.target_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.target_ema_updater, self.target_encoder, self.online_encoder)
def forward(self, x, return_embedding = False):
if return_embedding:
return self.online_encoder(x, True)
image_one, image_two = self.augment1(x), self.augment2(x)
online_pix_proj_one = self.online_encoder(image_one)
online_pix_proj_two = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_pix_proj_one)
online_pred_two = self.online_predictor(online_pix_proj_two)
with torch.no_grad():
target_encoder = self._get_target_encoder() if self.use_momentum else self.online_encoder
target_proj_one = target_encoder(image_one)
target_proj_two = target_encoder(image_two)
loss_one = loss_fn(online_pred_one, target_proj_two.detach())
loss_two = loss_fn(online_pred_two, target_proj_one.detach())
#
loss = loss_one + loss_two
return loss.mean()
```
|
{
"source": "jcwchen/models",
"score": 2
}
|
#### File: models/workflow_scripts/test_models.py
```python
import onnx
from pathlib import Path
import subprocess
import sys
def run_lfs_install():
result = subprocess.run(['git', 'lfs', 'install'], cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Git LFS install completed with return code=" + str(result.returncode))
def pull_lfs_file(file_name):
result = subprocess.run(['git', 'lfs', 'pull', '--include', file_name, '--exclude', '\"\"'], cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("LFS pull completed with return code=" + str(result.returncode))
cwd_path = Path.cwd()
# obtain list of added or modified files in this PR
obtain_diff = subprocess.Popen(['git', 'diff', '--name-only', '--diff-filter=AM', 'origin/master', 'HEAD'],
cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutput, stderroutput = obtain_diff.communicate()
diff_list = stdoutput.split()
# identify list of changed onnx models in model Zoo
model_list = [str(model).replace("b'","").replace("'", "") for model in diff_list if ".onnx" in str(model)]
# run lfs install before starting the tests
run_lfs_install()
print("\n=== Running ONNX Checker on added models ===\n")
# run checker on each model
failed_models = []
for model_path in model_list:
model_name = model_path.split('/')[-1]
print("Testing:", model_name)
try:
pull_lfs_file(model_path)
model = onnx.load(model_path)
onnx.checker.check_model(model)
print("Model", model_name, "has been successfully checked!")
except Exception as e:
print(e)
failed_models.append(model_path)
if len(failed_models) != 0:
print(str(len(failed_models)) +" models failed onnx checker.")
sys.exit(1)
print(len(model_list), "model(s) checked.")
```
|
{
"source": "j-c-w/EXPCAP_Process",
"score": 3
}
|
#### File: j-c-w/EXPCAP_Process/arrival_time_difference_plot.py
```python
import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import graph_utils
import os
import process_csv
import process_txt
import process_pcap
import sys
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("first_file")
parser.add_argument("second_file")
args = parser.parse_args(args)
first_file = args.first_file
second_file = args.second_file
if args.first_file.endswith('.pcap'):
first_times = np.array(process_pcap.extract_times(first_file))
elif args.first_file.endswith('.csv'):
first_times = np.array(process_csv.extract_times(first_file))
else:
first_times = np.array(process_txt.extract_times(first_file))
if args.second_file.endswith('.pcap'):
second_times = np.array(process_pcap.extract_times(second_file))
elif args.second_file.endswith('.csv'):
second_times = np.array(process_csv.extract_times(second_file))
else:
second_times = np.array(process_txt.extract_times(second_file))
if len(first_times) != len(second_times):
print len(first_times), "in first trace"
print len(second_times), "in second trace"
print "Error: There are a different number of packets in each trace"
sys.exit(1)
# Now, go through each time and calculate the difference.
# Plot that difference in a histogram.
diffs = first_times - second_times
# Convert to ns:
diffs = diffs * (10 ** 9)
# Convert to floats so they can be plotted.
diffs = np.asarray(diffs, dtype='float')
print "Plottiong ", len(diffs), "packets"
print min(diffs), max(diffs)
bins = graph_utils.get_linspace(min(diffs), max(diffs))
plt.hist(diffs, cumulative=True, bins=bins, histtype='step', normed=True)
plt.xlabel("Difference (ns)")
plt.ylabel("CDF")
graph_utils.set_yax_max_one()
graph_utils.set_ticks()
plt.tight_layout()
filename = os.path.basename(first_file) + '_diff_' + \
os.path.basename(second_file) + '.eps'
plt.savefig(filename)
print "Figure saved in ", filename
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: j-c-w/EXPCAP_Process/bandwidth_through_time.py
```python
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import process_csv
import graph_utils
import sys
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--input-file', dest='input_files', nargs=2, action='append', required=True, help="csv file to plot. Needs a label as a second argument.")
parser.add_argument('--window-size', nargs=2, dest='window_size', action='append', help="How long to average over. In ps. (Also needs a label)", required=True)
parser.add_argument('--keep-temps', dest='keep_temps', default=False, action='store_true', help="Keep temp files")
parser.add_argument('--server', dest='server_ip', required=True, help="IP of the machine that the card is directory connected to")
parser.add_argument('--output-name', dest='output_name', required=True)
parser.add_argument('--title', dest='title', required=False, default=None)
# This is to avoid issues with tcpdump hanging.
parser.add_argument('--packets', type=int, required=False,
default=None, dest='packets',
help="Number of packets to process from a pcap file")
args = parser.parse_args(args)
plt.figure(1)
plt.clf()
plt.figure(2)
plt.clf()
for (pcap_file, label) in args.input_files:
for (window_size, label_suffix) in args.window_size:
if pcap_file.endswith('.csv'):
incoming_x_values, incoming_bandwidths = \
process_csv.extract_bandwidths(pcap_file, window_size,
to_ip=args.server_ip)
outgoing_x_values, outgoing_bandwidths = \
process_csv.extract_bandwidths(pcap_file, window_size,
from_ip=args.server_ip)
# Handle the outgoing information first.
# Recenter the xvalues around zero.
zero_value = outgoing_x_values[0][0]
for i in range(len(outgoing_x_values)):
outgoing_x_values[i] = float(outgoing_x_values[i][0] - zero_value)
# Handle the incoming information next.
# Recenter the xvalues around zero.
zero_value = incoming_x_values[0][0]
for i in range(len(incoming_x_values)):
incoming_x_values[i] = float(incoming_x_values[i][0] - zero_value)
if len(incoming_x_values) < 3000000:
plt.figure(2)
plt.plot(incoming_x_values, incoming_bandwidths, label=label + ' ' + label_suffix)
else:
print "Error: Skipping line ", label + ' ' + label_suffix, " because it has more than 3 million entries."
if len(outgoing_x_values) < 3000000:
plt.figure(1)
plt.plot(outgoing_x_values, outgoing_bandwidths, label=label + ' ' + label_suffix)
else:
print "Error: Skipping line ", label + ' ' + label_suffix, " because it has more than 3 million entries."
if args.title:
plt.figure(1)
plt.title('Server Traffic: ' + args.title)
plt.figure(2)
plt.title('Client Traffic: ' + args.title)
label_count = len(args.input_files) * len(args.window_size)
graph_utils.latexify(bottom_label_rows=label_count / 2)
plt.figure(2)
plt.xlabel("Time (s)")
plt.ylabel("Bandwidth (Mbps)")
graph_utils.set_ticks()
graph_utils.set_non_negative_axes()
graph_utils.set_legend_below()
filename = args.output_name + '_incoming_bandwidth_windowed.eps'
plt.savefig(filename, format='eps')
print "Done! File is in ", filename
plt.figure(1)
plt.xlabel("Time (s)")
plt.ylabel("Bandwidth (Mbps)")
graph_utils.set_non_negative_axes()
graph_utils.set_ticks()
graph_utils.set_legend_below()
filename = args.output_name + '_outgoing_bandwidth_windowed.eps'
plt.savefig(filename, format='eps')
print "Done! File is in ", filename
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: j-c-w/EXPCAP_Process/graph_utils.py
```python
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import sys
import matplotlib
import numpy as np
from math import sqrt
SPINE_COLOR = 'gray'
def no_zeroes(data):
non_zero = 0
for element in data:
if element > 0:
non_zero += 1
new_data = [0] * (non_zero)
insert_index = 0
for element in data:
if element > 0:
new_data[insert_index] = element
insert_index += 1
print "Removed ", len(data) - non_zero, "entries that were all zero"
print "Before this, there were ", len(data), "entries"
return new_data
def get_logspace(min_lim, max_lim):
assert min_lim != 0 # We can't handle zeroes...
small_diff_upper = max_lim / 10000.0
small_diff_lower = - (min_lim / 10000.0)
logspace_bins = np.append(np.logspace(np.log10(min_lim + small_diff_lower), np.log10(max_lim + small_diff_upper), 1000), np.inf)
return logspace_bins
def get_linspace(min_lim, max_lim):
small_diff_lower = - (abs(min_lim) / 10000.0)
small_diff_upper = max_lim / 10000.0
linspace_bins = \
np.append(np.linspace((min_lim + small_diff_lower),
(max_lim + small_diff_upper), 1000), np.inf)
return linspace_bins
def set_integer_ticks():
ax = plt.gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
def set_log_x():
ax = plt.gca()
ax.set_xscale('log')
def set_log_y():
ax = plt.gca()
ax.set_yscale('log')
def set_legend_below(extra=0.0, ncol=2):
ax = plt.gca()
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.20 - extra),
fancybox=True, shadow=True, ncol=ncol)
def set_ticks():
(xmin, xmax) = plt.xlim()
(ymin, ymax) = plt.ylim()
if xmin == xmax or ymin == ymax:
print "Error, have you plotted anything yet?"
print " You need ot have plotted data before calling add_grid()"
sys.exit(1)
ax = plt.gca()
ax.set_axisbelow(True)
plt.grid()
def set_non_negative_axes():
(xmin, xmax) = plt.xlim()
(ymin, ymax) = plt.ylim()
if xmin < 0:
plt.xlim(0, xmax)
if ymin < 0:
plt.ylim(0, ymax)
def set_yax_max_one():
(ymin, ymax) = plt.ylim()
if ymax > 1:
plt.ylim(ymin, 1)
# This saves the multiple ways in which we might want to view
# a CDF.
def save_cdf(filename, use_xlims=None):
plt.savefig(filename + '.eps')
def legend_bottom_right():
plt.legend(loc='lower right')
def legend_upper_left():
plt.legend(loc='upper left')
def latexify(fig_width=None, fig_height=None, columns=2, space_below_graph=0.0, bottom_label_rows=0):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
# Add some height for the labels.
fig_height += space_below_graph
# Make the graph taller to account for the labels at the
# bottom of the graph.
fig_height += bottom_label_rows * 0.33
if bottom_label_rows > 0:
# And a bit to make up for the padding at the beginning
# and end of such a label box
fig_height += 0.2
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {# 'backend': 'ps',
'text.latex.preamble': ['\\usepackage{gensymb}'],
'axes.labelsize': 12, # fontsize for x and y labels (was 10)
'axes.titlesize': 12,
# 'text.fontsize': 8, # was 10
'legend.fontsize': 12, # was 10
'xtick.labelsize': 12,
'ytick.labelsize': 12,
# 'text.usetex': True,
'figure.figsize': [fig_width,fig_height],
'font.family': 'serif',
'figure.autolayout': True,
'patch.linewidth': 1.3
}
matplotlib.rcParams.update(params)
def format_axes(ax):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
return ax
# Call this regardless.
print "Graph utils is overwriting the Matplotlib RC"
latexify()
```
#### File: j-c-w/EXPCAP_Process/interarrival_statistics.py
```python
import argparse
import numpy as np
import process_csv
import process_txt
import process_pcap
import sys
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('file')
args = parser.parse_args(args)
file = args.file
if file.endswith('.pcap'):
times = process_pcap.extract_deltas(file)
elif file.endswith('.csv'):
times = process_csv.extract_deltas(file)
else:
times = process_txt.extract_deltas(file)
print "Mean delta: ", np.mean(times), ", Median delta: ", np.median(times),
print ", deviation: ", np.std(times)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: j-c-w/EXPCAP_Process/plot_arrival_times.py
```python
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import process_csv
import process_txt
import graph_utils
import process_pcap
import sys
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('input_file')
parser.add_argument('--packets', type=int,
help='Number of packets to process',
required=False, dest='packets',
default=None)
args = parser.parse_args(args)
input_file = args.input_file
if input_file.endswith('.pcap'):
arrival_times = process_pcap.extract_times(input_file)
elif input_file.endswith('.csv'):
arrival_times = process_csv.extract_times(input_file)
else:
arrival_times = process_txt.extract_times(input_file)
x_values = range(0, len(arrival_times))
last_time = 0
for index in range(len(arrival_times)):
time = arrival_times[index]
if time <= last_time:
print "have time ", time, "at line ", index
last_time = time
arrival_times = np.asarray(arrival_times, dtype='float')
plt.plot(x_values, arrival_times)
plt.title("Arrival time")
plt.xlabel("Packet number")
plt.ylabel("Absolute Arrival Time")
plt.savefig(input_file + '_arrival_times.eps', format='eps')
if __name__ == "__main__":
main(sys.argv[1:])
```
|
{
"source": "JcwGitHub/HuoBiApi_Python",
"score": 2
}
|
#### File: HuoBiApi_Python/PythonApplication/PythonApplicationUI.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(969, 693)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_16 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_16.setObjectName("gridLayout_16")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setStyleSheet("")
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setStyleSheet("")
self.tab.setObjectName("tab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.BTNBTC = QtWidgets.QPushButton(self.tab)
self.BTNBTC.setObjectName("BTNBTC")
self.horizontalLayout_3.addWidget(self.BTNBTC)
self.BTNETH = QtWidgets.QPushButton(self.tab)
self.BTNETH.setObjectName("BTNETH")
self.horizontalLayout_3.addWidget(self.BTNETH)
self.BTNEOS = QtWidgets.QPushButton(self.tab)
self.BTNEOS.setObjectName("BTNEOS")
self.horizontalLayout_3.addWidget(self.BTNEOS)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.widget = QtWidgets.QWidget(self.tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setAutoFillBackground(False)
self.widget.setStyleSheet("background-color: rgb(126, 126, 126);\n"
"background-color: rgb(220, 220, 220);\n"
"border-color: rgb(0, 255, 255);")
self.widget.setObjectName("widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.info21 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info21.sizePolicy().hasHeightForWidth())
self.info21.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info21.setFont(font)
self.info21.setScaledContents(False)
self.info21.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info21.setWordWrap(False)
self.info21.setObjectName("info21")
self.gridLayout.addWidget(self.info21, 3, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 3, 0, 1, 1)
self.info1 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info1.sizePolicy().hasHeightForWidth())
self.info1.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info1.setFont(font)
self.info1.setScaledContents(False)
self.info1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info1.setWordWrap(False)
self.info1.setObjectName("info1")
self.gridLayout.addWidget(self.info1, 3, 1, 1, 1)
self.gridLayout.setColumnMinimumWidth(0, 1)
self.gridLayout.setColumnMinimumWidth(1, 1)
self.gridLayout.setColumnMinimumWidth(2, 1)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 1)
self.gridLayout.setColumnStretch(2, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.gridLayout_14 = QtWidgets.QGridLayout()
self.gridLayout_14.setObjectName("gridLayout_14")
self.info4 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info4.sizePolicy().hasHeightForWidth())
self.info4.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info4.setFont(font)
self.info4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info4.setObjectName("info4")
self.gridLayout_14.addWidget(self.info4, 0, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.gridLayout_14.addWidget(self.label_7, 0, 0, 1, 1)
self.info24 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info24.sizePolicy().hasHeightForWidth())
self.info24.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info24.setFont(font)
self.info24.setScaledContents(False)
self.info24.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info24.setWordWrap(False)
self.info24.setObjectName("info24")
self.gridLayout_14.addWidget(self.info24, 0, 2, 1, 1)
self.gridLayout_14.setColumnMinimumWidth(0, 1)
self.gridLayout_14.setColumnMinimumWidth(1, 1)
self.gridLayout_14.setColumnMinimumWidth(2, 1)
self.gridLayout_14.setColumnStretch(0, 1)
self.gridLayout_14.setColumnStretch(1, 1)
self.gridLayout_14.setColumnStretch(2, 1)
self.verticalLayout.addLayout(self.gridLayout_14)
self.gridLayout_9 = QtWidgets.QGridLayout()
self.gridLayout_9.setObjectName("gridLayout_9")
self.label_9 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.gridLayout_9.addWidget(self.label_9, 0, 0, 1, 1)
self.info5 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info5.sizePolicy().hasHeightForWidth())
self.info5.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info5.setFont(font)
self.info5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info5.setObjectName("info5")
self.gridLayout_9.addWidget(self.info5, 0, 1, 1, 1)
self.info25 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info25.sizePolicy().hasHeightForWidth())
self.info25.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info25.setFont(font)
self.info25.setScaledContents(False)
self.info25.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info25.setWordWrap(False)
self.info25.setObjectName("info25")
self.gridLayout_9.addWidget(self.info25, 0, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_9)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_3 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1)
self.info2 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info2.sizePolicy().hasHeightForWidth())
self.info2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info2.setFont(font)
self.info2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info2.setObjectName("info2")
self.gridLayout_2.addWidget(self.info2, 0, 1, 1, 1)
self.info22 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info22.sizePolicy().hasHeightForWidth())
self.info22.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info22.setFont(font)
self.info22.setScaledContents(False)
self.info22.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info22.setWordWrap(False)
self.info22.setObjectName("info22")
self.gridLayout_2.addWidget(self.info22, 0, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_2)
self.gridLayout_13 = QtWidgets.QGridLayout()
self.gridLayout_13.setObjectName("gridLayout_13")
self.label_5 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.gridLayout_13.addWidget(self.label_5, 0, 0, 1, 1)
self.info3 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info3.sizePolicy().hasHeightForWidth())
self.info3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info3.setFont(font)
self.info3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info3.setObjectName("info3")
self.gridLayout_13.addWidget(self.info3, 0, 1, 1, 1)
self.info23 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info23.sizePolicy().hasHeightForWidth())
self.info23.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info23.setFont(font)
self.info23.setScaledContents(False)
self.info23.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info23.setWordWrap(False)
self.info23.setObjectName("info23")
self.gridLayout_13.addWidget(self.info23, 0, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_13)
self.gridLayout_11 = QtWidgets.QGridLayout()
self.gridLayout_11.setObjectName("gridLayout_11")
self.label_11 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.gridLayout_11.addWidget(self.label_11, 0, 0, 1, 1)
self.info6 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info6.sizePolicy().hasHeightForWidth())
self.info6.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info6.setFont(font)
self.info6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info6.setObjectName("info6")
self.gridLayout_11.addWidget(self.info6, 0, 1, 1, 1)
self.info21_6 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info21_6.sizePolicy().hasHeightForWidth())
self.info21_6.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info21_6.setFont(font)
self.info21_6.setScaledContents(False)
self.info21_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info21_6.setWordWrap(False)
self.info21_6.setObjectName("info21_6")
self.gridLayout_11.addWidget(self.info21_6, 0, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_11)
self.gridLayout_8 = QtWidgets.QGridLayout()
self.gridLayout_8.setContentsMargins(0, -1, -1, -1)
self.gridLayout_8.setObjectName("gridLayout_8")
self.label_17 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_17.setFont(font)
self.label_17.setObjectName("label_17")
self.gridLayout_8.addWidget(self.label_17, 0, 0, 1, 1)
self.info7 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info7.sizePolicy().hasHeightForWidth())
self.info7.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info7.setFont(font)
self.info7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info7.setObjectName("info7")
self.gridLayout_8.addWidget(self.info7, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_8)
self.gridLayout_15 = QtWidgets.QGridLayout()
self.gridLayout_15.setObjectName("gridLayout_15")
self.label_15 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.gridLayout_15.addWidget(self.label_15, 0, 0, 1, 1)
self.info8 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info8.sizePolicy().hasHeightForWidth())
self.info8.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info8.setFont(font)
self.info8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info8.setObjectName("info8")
self.gridLayout_15.addWidget(self.info8, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_15)
self.gridLayout_12 = QtWidgets.QGridLayout()
self.gridLayout_12.setObjectName("gridLayout_12")
self.label_13 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.gridLayout_12.addWidget(self.label_13, 0, 0, 1, 1)
self.info9 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info9.sizePolicy().hasHeightForWidth())
self.info9.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info9.setFont(font)
self.info9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info9.setObjectName("info9")
self.gridLayout_12.addWidget(self.info9, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_12)
self.gridLayout_10 = QtWidgets.QGridLayout()
self.gridLayout_10.setObjectName("gridLayout_10")
self.label_19 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
self.gridLayout_10.addWidget(self.label_19, 0, 0, 1, 1)
self.info10 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info10.sizePolicy().hasHeightForWidth())
self.info10.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info10.setFont(font)
self.info10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info10.setObjectName("info10")
self.gridLayout_10.addWidget(self.info10, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_10)
self.gridLayout_6 = QtWidgets.QGridLayout()
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_21 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.gridLayout_6.addWidget(self.label_21, 0, 0, 1, 1)
self.info11 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info11.sizePolicy().hasHeightForWidth())
self.info11.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info11.setFont(font)
self.info11.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info11.setObjectName("info11")
self.gridLayout_6.addWidget(self.info11, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_6)
self.gridLayout_7 = QtWidgets.QGridLayout()
self.gridLayout_7.setObjectName("gridLayout_7")
self.label_23 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_23.setFont(font)
self.label_23.setObjectName("label_23")
self.gridLayout_7.addWidget(self.label_23, 0, 0, 1, 1)
self.info12 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info12.sizePolicy().hasHeightForWidth())
self.info12.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info12.setFont(font)
self.info12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info12.setObjectName("info12")
self.gridLayout_7.addWidget(self.info12, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_7)
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_25 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_25.setFont(font)
self.label_25.setObjectName("label_25")
self.gridLayout_3.addWidget(self.label_25, 0, 0, 1, 1)
self.info13 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info13.sizePolicy().hasHeightForWidth())
self.info13.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info13.setFont(font)
self.info13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info13.setObjectName("info13")
self.gridLayout_3.addWidget(self.info13, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_3)
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_27 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_27.setFont(font)
self.label_27.setObjectName("label_27")
self.gridLayout_4.addWidget(self.label_27, 0, 0, 1, 1)
self.info14 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info14.sizePolicy().hasHeightForWidth())
self.info14.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info14.setFont(font)
self.info14.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info14.setObjectName("info14")
self.gridLayout_4.addWidget(self.info14, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_4)
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.label_29 = QtWidgets.QLabel(self.widget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_29.setFont(font)
self.label_29.setObjectName("label_29")
self.gridLayout_5.addWidget(self.label_29, 0, 0, 1, 1)
self.info15 = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info15.sizePolicy().hasHeightForWidth())
self.info15.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info15.setFont(font)
self.info15.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.info15.setObjectName("info15")
self.gridLayout_5.addWidget(self.info15, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_5)
self.horizontalLayout_2.addWidget(self.widget)
self.widget_2ASDASD = QtWidgets.QWidget(self.tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_2ASDASD.sizePolicy().hasHeightForWidth())
self.widget_2ASDASD.setSizePolicy(sizePolicy)
self.widget_2ASDASD.setAutoFillBackground(True)
self.widget_2ASDASD.setStyleSheet("")
self.widget_2ASDASD.setObjectName("widget_2ASDASD")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.widget_2ASDASD)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.widget_4 = QtWidgets.QWidget(self.widget_2ASDASD)
self.widget_4.setStyleSheet("")
self.widget_4.setObjectName("widget_4")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget_4)
self.horizontalLayout.setObjectName("horizontalLayout")
self.info51 = QtWidgets.QLabel(self.widget_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info51.sizePolicy().hasHeightForWidth())
self.info51.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info51.setFont(font)
self.info51.setStyleSheet("")
self.info51.setScaledContents(False)
self.info51.setAlignment(QtCore.Qt.AlignCenter)
self.info51.setWordWrap(False)
self.info51.setObjectName("info51")
self.horizontalLayout.addWidget(self.info51)
self.order3 = QtWidgets.QLabel(self.widget_4)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.order3.setFont(font)
self.order3.setObjectName("order3")
self.horizontalLayout.addWidget(self.order3)
self.horizontalLayout.setStretch(0, 10)
self.horizontalLayout.setStretch(1, 10)
self.verticalLayout_4.addWidget(self.widget_4)
self.widget_6 = QtWidgets.QWidget(self.widget_2ASDASD)
self.widget_6.setStyleSheet("")
self.widget_6.setObjectName("widget_6")
self.gridLayout_17 = QtWidgets.QGridLayout(self.widget_6)
self.gridLayout_17.setObjectName("gridLayout_17")
self.order2 = QtWidgets.QComboBox(self.widget_6)
self.order2.setMinimumSize(QtCore.QSize(0, 25))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.order2.setFont(font)
self.order2.setEditable(True)
self.order2.setDuplicatesEnabled(False)
self.order2.setObjectName("order2")
self.order2.addItem("")
self.order2.addItem("")
self.order2.addItem("")
self.order2.addItem("")
self.gridLayout_17.addWidget(self.order2, 0, 1, 1, 1)
self.order1 = QtWidgets.QComboBox(self.widget_6)
self.order1.setMinimumSize(QtCore.QSize(0, 25))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.order1.setFont(font)
self.order1.setEditable(True)
self.order1.setDuplicatesEnabled(False)
self.order1.setObjectName("order1")
self.order1.addItem("")
self.order1.addItem("")
self.order1.addItem("")
self.gridLayout_17.addWidget(self.order1, 0, 0, 1, 1)
self.order4 = QtWidgets.QComboBox(self.widget_6)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.order4.setFont(font)
self.order4.setLayoutDirection(QtCore.Qt.LeftToRight)
self.order4.setObjectName("order4")
self.order4.addItem("")
self.order4.addItem("")
self.order4.addItem("")
self.order4.addItem("")
self.gridLayout_17.addWidget(self.order4, 0, 2, 1, 1)
self.order7 = QtWidgets.QPushButton(self.widget_6)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.order7.setFont(font)
self.order7.setStyleSheet("")
self.order7.setObjectName("order7")
self.gridLayout_17.addWidget(self.order7, 0, 3, 1, 1)
self.verticalLayout_4.addWidget(self.widget_6)
self.widget_5 = QtWidgets.QWidget(self.widget_2ASDASD)
self.widget_5.setObjectName("widget_5")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.widget_5)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.info53 = QtWidgets.QLabel(self.widget_5)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info53.setFont(font)
self.info53.setStyleSheet("color: rgb(0, 170, 0);")
self.info53.setObjectName("info53")
self.verticalLayout_5.addWidget(self.info53)
self.widget_7 = QtWidgets.QWidget(self.widget_5)
self.widget_7.setStyleSheet("")
self.widget_7.setObjectName("widget_7")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.widget_7)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_8 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.horizontalLayout_4.addWidget(self.label_8)
self.info41 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info41.sizePolicy().hasHeightForWidth())
self.info41.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info41.setFont(font)
self.info41.setScaledContents(False)
self.info41.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info41.setWordWrap(False)
self.info41.setObjectName("info41")
self.horizontalLayout_4.addWidget(self.info41)
self.label_16 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.horizontalLayout_4.addWidget(self.label_16)
self.info46 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info46.sizePolicy().hasHeightForWidth())
self.info46.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info46.setFont(font)
self.info46.setScaledContents(False)
self.info46.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info46.setWordWrap(False)
self.info46.setObjectName("info46")
self.horizontalLayout_4.addWidget(self.info46)
self.verticalLayout_6.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_10 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.horizontalLayout_5.addWidget(self.label_10)
self.info42 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info42.sizePolicy().hasHeightForWidth())
self.info42.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info42.setFont(font)
self.info42.setScaledContents(False)
self.info42.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info42.setWordWrap(False)
self.info42.setObjectName("info42")
self.horizontalLayout_5.addWidget(self.info42)
self.label_18 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.horizontalLayout_5.addWidget(self.label_18)
self.info47 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info47.sizePolicy().hasHeightForWidth())
self.info47.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info47.setFont(font)
self.info47.setScaledContents(False)
self.info47.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info47.setWordWrap(False)
self.info47.setObjectName("info47")
self.horizontalLayout_5.addWidget(self.info47)
self.verticalLayout_6.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_26 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_26.setFont(font)
self.label_26.setObjectName("label_26")
self.horizontalLayout_6.addWidget(self.label_26)
self.info43 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info43.sizePolicy().hasHeightForWidth())
self.info43.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info43.setFont(font)
self.info43.setScaledContents(False)
self.info43.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info43.setWordWrap(False)
self.info43.setObjectName("info43")
self.horizontalLayout_6.addWidget(self.info43)
self.label_20 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_20.setFont(font)
self.label_20.setObjectName("label_20")
self.horizontalLayout_6.addWidget(self.label_20)
self.info48 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info48.sizePolicy().hasHeightForWidth())
self.info48.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info48.setFont(font)
self.info48.setScaledContents(False)
self.info48.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info48.setWordWrap(False)
self.info48.setObjectName("info48")
self.horizontalLayout_6.addWidget(self.info48)
self.verticalLayout_6.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_12 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.horizontalLayout_7.addWidget(self.label_12)
self.info44 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info44.sizePolicy().hasHeightForWidth())
self.info44.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info44.setFont(font)
self.info44.setScaledContents(False)
self.info44.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info44.setWordWrap(False)
self.info44.setObjectName("info44")
self.horizontalLayout_7.addWidget(self.info44)
self.label_22 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_22.setFont(font)
self.label_22.setObjectName("label_22")
self.horizontalLayout_7.addWidget(self.label_22)
self.info49 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info49.sizePolicy().hasHeightForWidth())
self.info49.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info49.setFont(font)
self.info49.setScaledContents(False)
self.info49.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info49.setWordWrap(False)
self.info49.setObjectName("info49")
self.horizontalLayout_7.addWidget(self.info49)
self.verticalLayout_6.addLayout(self.horizontalLayout_7)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.label_14 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.horizontalLayout_8.addWidget(self.label_14)
self.info45 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info45.sizePolicy().hasHeightForWidth())
self.info45.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info45.setFont(font)
self.info45.setScaledContents(False)
self.info45.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info45.setWordWrap(False)
self.info45.setObjectName("info45")
self.horizontalLayout_8.addWidget(self.info45)
self.label_24 = QtWidgets.QLabel(self.widget_7)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_24.setFont(font)
self.label_24.setObjectName("label_24")
self.horizontalLayout_8.addWidget(self.label_24)
self.info50 = QtWidgets.QLabel(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.info50.sizePolicy().hasHeightForWidth())
self.info50.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.info50.setFont(font)
self.info50.setScaledContents(False)
self.info50.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.info50.setWordWrap(False)
self.info50.setObjectName("info50")
self.horizontalLayout_8.addWidget(self.info50)
self.verticalLayout_6.addLayout(self.horizontalLayout_8)
self.verticalLayout_5.addWidget(self.widget_7)
self.label_4 = QtWidgets.QLabel(self.widget_5)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.verticalLayout_5.addWidget(self.label_4)
self.widget_8 = QtWidgets.QWidget(self.widget_5)
self.widget_8.setStyleSheet("")
self.widget_8.setObjectName("widget_8")
self.verticalLayout_5.addWidget(self.widget_8)
self.verticalLayout_5.setStretch(0, 10)
self.verticalLayout_5.setStretch(1, 100)
self.verticalLayout_5.setStretch(2, 10)
self.verticalLayout_5.setStretch(3, 100)
self.verticalLayout_4.addWidget(self.widget_5)
self.verticalLayout_4.setStretch(0, 10)
self.verticalLayout_4.setStretch(1, 10)
self.verticalLayout_4.setStretch(2, 100)
self.horizontalLayout_2.addWidget(self.widget_2ASDASD)
self.horizontalLayout_2.setStretch(0, 20)
self.horizontalLayout_2.setStretch(1, 40)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.verticalLayout_2.setStretch(0, 1)
self.verticalLayout_2.setStretch(1, 100)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.formLayout = QtWidgets.QFormLayout(self.tab_2)
self.formLayout.setObjectName("formLayout")
self.widget_3 = Ui_HangQing(self.tab_2)
self.widget_3.setMinimumSize(QtCore.QSize(500, 500))
self.widget_3.setObjectName("widget_3")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.SpanningRole, self.widget_3)
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout_16.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 969, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.BTNBTC.setText(_translate("MainWindow", "BTC"))
self.BTNETH.setText(_translate("MainWindow", "ETH"))
self.BTNEOS.setText(_translate("MainWindow", "EOS"))
self.info21.setText(_translate("MainWindow", "0"))
self.label_2.setText(_translate("MainWindow", "账户总资产:"))
self.info1.setText(_translate("MainWindow", "0"))
self.info4.setText(_translate("MainWindow", "0"))
self.label_7.setText(_translate("MainWindow", "可用保证金:"))
self.info24.setText(_translate("MainWindow", "0"))
self.label_9.setText(_translate("MainWindow", "持仓保证金:"))
self.info5.setText(_translate("MainWindow", "0"))
self.info25.setText(_translate("MainWindow", "0"))
self.label_3.setText(_translate("MainWindow", "已实现盈亏:"))
self.info2.setText(_translate("MainWindow", "0"))
self.info22.setText(_translate("MainWindow", "0"))
self.label_5.setText(_translate("MainWindow", "未实现盈亏:"))
self.info3.setText(_translate("MainWindow", "0"))
self.info23.setText(_translate("MainWindow", "0"))
self.label_11.setText(_translate("MainWindow", "冻结保证金:"))
self.info6.setText(_translate("MainWindow", "0"))
self.info21_6.setText(_translate("MainWindow", "0"))
self.label_17.setText(_translate("MainWindow", "预估强平价:"))
self.info7.setText(_translate("MainWindow", "0"))
self.label_15.setText(_translate("MainWindow", "保证金率: "))
self.info8.setText(_translate("MainWindow", "0"))
self.label_13.setText(_translate("MainWindow", "调整系数: "))
self.info9.setText(_translate("MainWindow", "0"))
self.label_19.setText(_translate("MainWindow", "杠杠倍数:"))
self.info10.setText(_translate("MainWindow", "0"))
self.label_21.setText(_translate("MainWindow", "品种代码:"))
self.info11.setText(_translate("MainWindow", "0"))
self.label_23.setText(_translate("MainWindow", "账户权益:"))
self.info12.setText(_translate("MainWindow", "0"))
self.label_25.setText(_translate("MainWindow", "账户权益:"))
self.info13.setText(_translate("MainWindow", "0"))
self.label_27.setText(_translate("MainWindow", "账户权益:"))
self.info14.setText(_translate("MainWindow", "0"))
self.label_29.setText(_translate("MainWindow", "账户权益:"))
self.info15.setText(_translate("MainWindow", "0"))
self.info51.setText(_translate("MainWindow", "NONE"))
self.order3.setText(_translate("MainWindow", "总张数:NONE"))
self.order2.setItemText(0, _translate("MainWindow", "25%"))
self.order2.setItemText(1, _translate("MainWindow", "50%"))
self.order2.setItemText(2, _translate("MainWindow", "75%"))
self.order2.setItemText(3, _translate("MainWindow", "100%"))
self.order1.setItemText(0, _translate("MainWindow", "对手价"))
self.order1.setItemText(1, _translate("MainWindow", "最优5档"))
self.order1.setItemText(2, _translate("MainWindow", "最优10档"))
self.order4.setItemText(0, _translate("MainWindow", "开空"))
self.order4.setItemText(1, _translate("MainWindow", "开多"))
self.order4.setItemText(2, _translate("MainWindow", "平空"))
self.order4.setItemText(3, _translate("MainWindow", "平多"))
self.order7.setText(_translate("MainWindow", "一键下单"))
self.info53.setText(_translate("MainWindow", "已成交|多单"))
self.label_8.setText(_translate("MainWindow", "杠杆:"))
self.info41.setText(_translate("MainWindow", "0"))
self.label_16.setText(_translate("MainWindow", "未实现盈亏:"))
self.info46.setText(_translate("MainWindow", "0"))
self.label_10.setText(_translate("MainWindow", "开仓均价:"))
self.info42.setText(_translate("MainWindow", "0"))
self.label_18.setText(_translate("MainWindow", "收益:"))
self.info47.setText(_translate("MainWindow", "0"))
self.label_26.setText(_translate("MainWindow", "持仓均价:"))
self.info43.setText(_translate("MainWindow", "0"))
self.label_20.setText(_translate("MainWindow", "收益率:"))
self.info48.setText(_translate("MainWindow", "0"))
self.label_12.setText(_translate("MainWindow", "最新价:"))
self.info44.setText(_translate("MainWindow", "0"))
self.label_22.setText(_translate("MainWindow", "持仓量:"))
self.info49.setText(_translate("MainWindow", "0"))
self.label_14.setText(_translate("MainWindow", "持仓保证金:"))
self.info45.setText(_translate("MainWindow", "0"))
self.label_24.setText(_translate("MainWindow", "可平量:"))
self.info50.setText(_translate("MainWindow", "0"))
self.label_4.setText(_translate("MainWindow", "未成交"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", " 账户 "))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", " 信息 "))
from PythonHangQing import Ui_HangQing
```
#### File: HuoBiApi_Python/PythonApplication/PythonHangQing.py
```python
from pprint import pprint
from PyQt5.QtGui import QPainter
from PyQt5.QtWidgets import QStyleOption, QStyle, QWidget
from PythonHangQingUI import Ui_PythonHangQing
#行情页面
class Ui_HangQing(QWidget):
def __init__(self,parent=None):
super(Ui_HangQing, self).__init__(parent)
self.__Slate = Ui_PythonHangQing()
self.__Slate.setupUi(self)
def paintEvent(self, event):
# 以下几行代码的功能是避免在多重传值后的功能失效
opt = QStyleOption()
opt.initFrom(self)
p = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, p, self)
```
#### File: HuoBiApi_Python/PythonApplication/PythonSqliteRead.py
```python
import sqlite3
import time
from pprint import pprint
#时间
curTime = time.localtime(time.time())
gTableName ='[' + str(curTime.tm_year) + str(curTime.tm_mon) + ']'
class HuoBiSqliteRead:
__sqlite = ''
__sqliteCur = ''
__tableCur = ''
__nums = 0
def __init__(self):
pass
#打开表
def open(self):
try:
self.__sqlite = sqlite3.connect('./DataBase/HuoBiOrder.db', isolation_level=None)
self.__sqlite.execute('pragma journal_mode=wal;')
#self.__sqliteCur = self.__sqlite.execute('SELECT * from {}'.format(gTableName))
self.__sqliteCur = self.__sqlite.cursor()
except Exception as e:
pprint(e)
def getOrderLastLines(self,lines):
# 表头
# self.__tableCur = self.__sqliteCur.execute('SELECT * FROM {}'.format(gTableName))
cur1 = time.time()
self.__tableCur = self.__sqliteCur.execute('SELECT * FROM {} ORDER BY id DESC limit {}'.format(gTableName,lines))
cur2 = time.time()
pprint(cur2 - cur1)
return self.__tableCur.fetchall()
#测试
if __name__ == '__main__':
sqlRead = HuoBiSqliteRead()
sqlRead.open()
while 1:
time.sleep(2)
#pprint(sqlRead.getAllLineNums())
pass
```
|
{
"source": "jcwhittier/Simple-Stream-Processor",
"score": 4
}
|
#### File: Simple-Stream-Processor/StreamProcessor/CountWindow.py
```python
__author__ = 'jcwhittier'
class CountWindow(object):
"""Constructor to create a new count-based window of a specified size"""
def __init__(self, window_size):
self.window_size = window_size
self.window_tuples = []
def add_tuple(self, tuple):
"""Add a new tuple to the window and make sure that the oldest tuple is removed if the window is full"""
self.window_tuples.append(tuple)
if self.count_tuples_in_window() > self.window_size:
self.window_tuples.pop(0)
def get_tuples(self):
"""Get all the tuples currently in the window"""
return self.window_tuples
def clear_tuples(self):
"""Clear the tuples in the window"""
self.window_tuples = []
def count_tuples_in_window(self):
"""Get the number of tuples in the window"""
return len(self.window_tuples)
```
#### File: Simple-Stream-Processor/StreamProcessor/Query.py
```python
__author__ = 'jcwhittier'
import sys
from enum import Enum
class Query(object):
class QueryOutputFormat(Enum):
STRING = 1
STREAM_TUPLE = 2
def __init__(self, *query_pipeline):
self.query_pipeline = None
for query_op in query_pipeline:
self.append_downstream(query_op)
self.output_file = sys.stdout
self.query_output_format = self.QueryOutputFormat.STREAM_TUPLE
def clear(self):
self.query_pipeline = None
def append_upstream(self, query_to_append):
if not self.query_pipeline:
self.query_pipeline = self.operator_in_query(query_to_append)
else:
self.query_pipeline.get_most_upstream().upstream_operator = self.operator_in_query(query_to_append)
def append_downstream(self, query_to_append):
new_query = self.operator_in_query(query_to_append)
if self.query_pipeline:
new_query.get_most_upstream().upstream_operator = self.query_pipeline
self.query_pipeline = new_query
def process_tuple(self, tup):
result = None
if self.query_pipeline:
result = self.query_pipeline.process_tuple(tup)
if result:
if self.query_output_format is self.QueryOutputFormat.STRING:
result = str(result)
print(result, file=self.output_file)
@staticmethod
def operator_in_query(query):
if query and isinstance(query, Query):
return query.query_pipeline
return query
```
|
{
"source": "jcwill415/investment-calculator",
"score": 4
}
|
#### File: investment-calculator/python/investment-calculator.py
```python
import sys
from classlist import investment_models
from classlist import lifetime_earnings_models
def main():
'''
Introductory message briefly explaining the program.
'''
print("Welcome to the Investment Calculator."
"\nThis calculator contains basic models for ROI, NPV, PP, and DCF."
"\nAdditionally, it contains a model to calculate lifetime earnings, or earnings over n years."
"\nGiven a starting salary x, will provide you with an estimate of your total earnings over n years."
"\nYear-to-year salary increases are estimated at 3%."
"\nEnter the required data when prompted and you will receive a calculation for the chosen model."
"\nType 'exit' at any main prompt to exit the program."
)
print("")
choose()
def choose():
'''
Prompts user to choose between the two main calculators.
'''
choice = input("\nPress 1 for the investment models calculator or press 2 for the lifetime earnings calculator: ")
if choice == "1":
choice_investment_models()
if choice == "2":
choice_lifetime_earnings_model()
choice = choice.upper()
# Converts to uppercase to filter exit command
if choice == 'EXIT':
# Exits program
print("Goodbye.")
sys.exit()
else:
# Re-enters choose function if the user doesn't input a valid command
print("Invalid input.")
choose()
def choice_investment_models():
'''
Prompts user to choose an investment model for calculation.
'''
choice = input("\nWhat would you like to calculate first? (ROI, NPV, PP, or DCF): ")
choice = choice.upper()
# Converts to uppercase to filter input
if choice == "ROI":
ROI()
if choice == "NPV":
NPV()
if choice == "PP":
PP()
if choice == "DCF":
DCF()
if choice == 'EXIT':
print("Goodbye.")
sys.exit()
else:
# Re-enters choice_investment_models function if the user doesn't input a valid command
print("Invalid input.")
choice_investment_models()
def ROI():
'''
Input: user data required for the ROI model
Output: ROI calculation
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
cost = abs(float(input("Enter the initial investment cost: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
gain = float(input("\nEnter the gain or loss from the investment: "))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
# Calls the calc_ROI function from the classlist file in order to calculate the ROI
print("\nYour ROI is " + str(investment_models.calc_ROI(cost, gain)) + "%.")
end()
def NPV():
'''
Input: user data required for the NPV model
Output: NPV calculation
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Rounds to the nearest whole number for calculation
# Abs is to control for negative inputs
time = abs(round(float(input("Enter the length of the project or investment in years (decimals will be rounded to the nearest whole number): "))))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
discount = abs(float(input("Enter the discount rate (please enter in decimal form): ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
cost = abs(float(input("Enter the initial investment cost: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
# Calls the calc_NPV function from the classlist file in order to calculate the NPV
# Prints in dollar format
print("\nYour NPV is " + "${:,.2f}".format(investment_models.calc_NPV(time, discount, cost)))
end()
def PP():
'''
Input: user data required for the PP model
Output: PP calculation
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
cost = abs(float(input("Enter the initial investment cost: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
annual_gain = abs(float(input("Enter the annual net cash flow gained from the investment: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
# Calls the calc_PP function from the classlist file in order to calculate the PP
print("\nYour PP is " + str(investment_models.calc_PP(cost, annual_gain)) + " years.")
end()
def DCF():
'''
Input: user data required for the DCF model
Output: DCF calculation
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Rounds to the nearest whole number for calculation
# Abs is to control for negative inputs
time = abs(round(float(input("Enter the length of the project or investment in years (decimals will be rounded to the nearest whole number): "))))
except ValueError:
print("Invalid input. Please enter an integer.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
discount = abs(float(input("Enter the discount rate (please enter in decimal form): ")))
except ValueError:
print("Invalid input. Please enter an integer.")
continue
break
# Calls the calc_DCF function from the classlist file in order to calculate the DCF
# Prints in dollar format
print("\nYour DCF is " + "${:,.2f}".format(investment_models.calc_DCF(time, discount)))
end()
def choice_lifetime_earnings_model():
'''
Gives user option to include the cost of a degree or certification with the calculation.
'''
choice = (input("Would you like to calculate the earnings of a degree or certification? (Y/N): "))
choice = choice.upper()
# Converts to uppercase to filter exit command
if choice == "Y" or choice == "YES":
earnings_degree()
if choice == "N" or choice == "NO":
earnings()
if choice == "EXIT":
# Exits program
print("\nGoodbye.")
sys.exit()
else:
# Re-enters choice_lifetime_earningsModel function if the user doesn't input a valid command
print("\nInvalid input.")
choice_lifetime_earnings_model()
def earnings():
'''
Input: user data required for earnings calculation (no degree/cert)
Output: earnings calculation without degree/cert
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
starting_salary = abs(float(input("Enter starting salary: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Rounds to the nearest whole number for calculation
# Abs is to control for negative inputs
years = abs(round(float(input("Enter the number of years you'd like to calculate (decimals will be rounded to the nearest whole number): "))))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
# Calls the calc_earnings function from the class-list file in order to calculate the earnings
# Prints in dollar format
print("\nYour earnings over a " + str(years) + " period, given a starting salary of " + "${:,.2f}".format(starting_salary) + ", total " + "${:,.2f}".format(lifetime_earnings_models.calc_earnings(starting_salary, years)))
end()
def earnings_degree():
'''
Input: user data required for earnings calculation
Output: earnings calculation with degree/cert
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
cost = abs(float(input("Enter the total cost of the degree or certification: ")))
except ValueError:
print("Invalid input. Please enter a number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Rounds to the nearest whole number for calculation
# Abs is to control for negative inputs
time = abs(round(float(input("Enter the number of years it will take you to complete your degree or certification (decimals will be rounded to the nearest whole number): "))))
except ValueError:
print("Invalid input. Please enter a number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
temp_salary = abs(float(input("Enter your yearly salary be while you complete this degree or certification: ")))
except ValueError:
print("Invalid input. Please enter a number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
starting_salary = abs(float(input("Enter starting salary: ")))
except ValueError:
print("Invalid input. Please enter a number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Rounds to the nearest whole number for calculation
# Abs is to control for negative inputs
years = abs(round(float(input("Enter the number of years you'd like to calculate (decimals will be rounded to the nearest whole number): "))))
except ValueError:
print("Invalid input. Please enter a number.")
continue
break
# Calls the calc_earnings_degree function from the classList file in order to calculate the earnings with degree/cert
# Prints in dollar format
print("\nYour post-completion earnings over a " + str(years) + " period, given a starting salary of " + "${:,.2f}".format(starting_salary) + ", total " + "${:,.2f}".format(lifetime_earnings_models.calc_earnings_degree(temp_salary, cost, time, starting_salary, years)))
end()
def end():
'''
Gives the user the option to calculate something else or exit the program.
'''
choice = input("\nPress 1 to calculate something else or press 2 (or type 'exit') to exit the program: ")
choice = choice.upper()
# Converts to uppercase to filter exit command
if choice == "1":
choose()
if choice == "2" or choice == "EXIT":
# Exits program
print("Goodbye.")
sys.exit()
else:
# Re-enters end function if the user doesn't input a valid command
print("Invalid input.")
end()
main()
```
|
{
"source": "jcwillox/java-to-pde",
"score": 2
}
|
#### File: jcwillox/java-to-pde/java-to-processing.py
```python
import sys
import os
import shutil
import distutils.core
import getopt
def usage():
print("usage: python java-to-processing.py <source dir> <dest dir> [options...]")
print("options: ")
print(" -o --overwrite Overwrite files in destination folder")
print(" --noformat Do not attempt some basic formatting,\n" +
" use if it produces weirdly formatted code")
print(" -h --help this cruft")
print(" --version displays the version of this script")
#print(" -v --verbose enable verbose logging")
sys.exit()
if (len(sys.argv) < 3):
if ("--version" in sys.argv):
print("[Version] 2.5.0")
sys.exit()
else:
usage()
def getJavaFiles(sourceFolder):
javaFiles = []
for (root, dirs, files) in os.walk(sourceFolder):
### LOCATE .JAVA FILES ###
for name in files:
if (os.path.splitext(name)[-1]==".java" and not name.__contains__("Sketch.java")):
print(" %s" % os.path.join(root, name))
javaFiles.append((os.path.join(root, name), name))
return javaFiles
def getDataFolder(sourceFolder):
for (root, dirs, files) in os.walk(sourceFolder):
### LOCATE DATA DIRECTORY ###
for name in dirs:
if (name=="data" and "src" in root):
return os.path.join(root, name)
return None
### IF DATA FOLDER PRESENT WRITE TO DEST ###
def copyDataFolder(dataFolder, destFolder, overwrite):
if (overwrite):
try:
distutils.dir_util.copy_tree(dataFolder, destFolder + "/data")
except distutils.core.DistutilsError as e:
print('Error: %s' % e)
else:
try:
shutil.copytree(dataFolder, destFolder + "/data")
# Directories are the same
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
print('Directory not copied. Error: %s' % e)
### PARSING METHODS ###
def findEndBracket(lines, start):
openBrackets = 0
for x in range(start, len(lines)):
line = lines[x].replace(" ", "")
if (line.find("{", len(line)-2) > -1):
openBrackets += 1
if (line.find("}", 0, 1) > -1):
openBrackets -= 1
if (openBrackets==0):
return x
def getMainMethod(javaFiles):
for file, name in javaFiles:
file = open(file, "r")
lines = file.readlines()
if (isMainMethod(lines)):
return name
### Statement Parsers ###
def isImport(line):
line = line.replace(" ", "")
if (line.find("import", 0, 6) > -1):
return line.__contains__("processing.")
return False
def isPackage(line): return line.find("package", 0, 7) > -1
def isMainMethod(lines):
if (str(lines).__contains__("public static void main(")): return True
return False
### File Parsers ###
def parseGeneric(lines):
### CONVERT JAVA CLASS TO PROCESSING PDE ###
removeIndexes = []
for idx, line in enumerate(lines):
if (isPackage(line)):
removeIndexes.append(idx)
elif (isImport(line)):
removeIndexes.append(idx)
_lines = []
removeIndexes = set(removeIndexes)
for idx, line in enumerate(lines):
if (idx not in removeIndexes):
_lines.append(line)
return _lines
def formatMainMethod(line):
if (line.find(" ", 0, 4) > -1):
if (not line=="\n"):
line = line[4:-1] # Remove 4 spaces from the beginning of line, to account for removal of main class
line += "\n"
return line
def isClassHeader(line):
line = line.replace(" ", "")
if (line.find("extendsPApplet{", len(line)-16) > -1):
return True
return False
def isJavaMainFunction(line):
line = line.replace(" ", "")
if (line.find("publicstaticvoidmain(String", 0, 28) > -1):
return True
return False
def parseMainMethod(lines):
removeIndexes = []
for idx, line in enumerate(lines):
if (formatCode):
lines[idx] = formatMainMethod(line)
if (isClassHeader(line)):
removeIndexes.append(idx)
y = findEndBracket(lines, idx)
removeIndexes.append(y)
elif (isJavaMainFunction(line)):
y = findEndBracket(lines, idx)
#print(list(range(idx,y+1)))
#print([lines[x] for x in range(idx,y+1)])
removeIndexes.extend(range(idx,y+1))
# Return array minus the remove indexes
_lines = []
removeIndexes = set(removeIndexes)
for idx, line in enumerate(lines):
if (idx not in removeIndexes):
_lines.append(line)
return _lines
formatCode = True
def main():
global formatCode
### Script Vars ###
overwrite = False
verbose = False
sourceFolder = sys.argv[1]
destFolder = sys.argv[2]
### Parse Options ###
try:
opts, args = getopt.getopt(sys.argv[3:], "hvo", ["help", "version", "verbose", "noformat", "overwrite"])
except:
usage()
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
elif opt in ("-o", "--overwrite"):
print("-o")
overwrite = True
elif opt in ("--noformat"):
formatCode = False
elif opt in ("-v", "--verbose"):
verbose = True
print("[Java Files]")
javaFiles = getJavaFiles(sourceFolder)
mainMethod = getMainMethod(javaFiles)
destFolder = os.path.join(destFolder, os.path.splitext(mainMethod)[0])
print("[source] %s" % sourceFolder)
print("[dest] %s" % destFolder)
print("[overwrite] %s" % overwrite)
print("[formatCode] %s" % formatCode)
dataFolder = getDataFolder(sourceFolder)
print("[Data Folder] %s" % dataFolder)
print("[Main Method] %s" % mainMethod)
print()
answer = input("Is this information correct. Continue and Convert? [y/n]: ")
if (answer.lower()!="y"): quit()
if (dataFolder != None): copyDataFolder(dataFolder, destFolder, overwrite)
# Create folder for main method
if (not os.path.exists(destFolder)):
os.makedirs(destFolder)
for file, name in javaFiles:
file = open(file, "r")
lines = file.readlines()
if (name == mainMethod):
print("Parsing Main Method")
lines = parseMainMethod(lines)
lines = parseGeneric(lines)
#for line in lines:
#print(line, end='')
# pass
### Write Output File ###
destPath = "%s/%s.pde" % (destFolder, os.path.splitext(name)[0])
if (overwrite==False and os.path.isfile(destPath)):
print("File already exists, to overwrite use --overwrite")
else:
file = open(destPath, "w")
file.writelines(lines)
file.close()
if __name__ == '__main__':
main()
```
|
{
"source": "jcwilson/aladdin",
"score": 3
}
|
#### File: commands/python/arg_tools.py
```python
import os
def add_namespace_argument(arg_parser):
namespace_def = os.getenv("NAMESPACE", "default")
arg_parser.add_argument(
"--namespace",
"-n",
default=namespace_def,
help="namespace name, defaults to default current : [{}]".format(namespace_def),
)
```
#### File: commands/python/cluster_rules.py
```python
import boto3
from libs.aws.certificate import search_certificate_arn, new_certificate_arn
from libs.aws.dns_mapping import (
get_hostedzone,
create_hostedzone,
get_ns_from_hostedzone,
check_ns_values,
fill_dns_dict,
)
from config import *
class ClusterRules(object):
def __init__(self, rules, namespace="default"):
self.rules = rules
self._namespace = namespace
def __getattr__(self, attr):
if attr in self.rules:
return self.rules.get(attr)
raise AttributeError(
"'{}' object has no attribute '{}'".format(self.__class__.__name__, attr)
)
def get_certificate_arn(self):
cert = self.values.get("service.certificateArn")
# Check against None to allow empty string
if cert is None:
cert = search_certificate_arn(self._boto, self.certificate_dns)
# Check against None to allow empty string
if cert is None:
cert = new_certificate_arn(self._boto, self.certificate_dns)
return cert
@property
def namespace(self):
return self._namespace
@property
def certificate_dns(self):
return "*.{}".format(self.service_dns_suffix)
@property
def sub_dns(self):
"""
The dns we are going to use
"""
return "{}.{}".format(self._namespace, self.root_dns)
@property
def service_dns_suffix(self):
return self.rules.get("service_dns_suffix", self.sub_dns)
@property
def check_branch(self):
return self.rules.get("check_branch", None)
@property
def is_local(self):
return self.rules.get("is_local", False)
@property
def is_prod(self):
return self.rules.get("is_prod", False)
@property
def ingress_info(self):
return self.rules.get("ingress_info", None)
@property
def namespace_init(self):
return self.rules.get("namespace_init", [])
@property
def cluster_init(self):
return self.rules.get("cluster_init", [])
@property
def dual_dns_prefix_annotation_name(self):
return self.rules.get("dual_dns_prefix_annotation_name", None)
@property
def _boto(self):
return boto3.Session(profile_name=self.aws_profile)
def fill_hostedzone(self, services_by_name):
# Apply our dns to the names
service_by_name = {"%s.%s" % (k, self.sub_dns): v for k, v in services_by_name.items()}
# Main DNS is on prod, sub DNS should be on sandbox
sub_dns_id = get_hostedzone(self._boto, self.sub_dns) or create_hostedzone(
self._boto, self.sub_dns
)
main_dns_id = get_hostedzone(self._boto, self.root_dns)
if main_dns_id is None:
raise KeyError("route 53 for [%s] not found" % self.root_dns)
dns_ns = get_ns_from_hostedzone(self._boto, sub_dns_id)
check_ns_values(self._boto, main_dns_id, self.sub_dns, dns_ns)
return fill_dns_dict(self._boto, sub_dns_id, service_by_name)
def cluster_rules(cluster=None, namespace="default"):
if cluster is None:
cluster = os.environ["CLUSTER_CODE"]
default_cluster_config = {}
cluster_config = {}
namespace_override_config = {}
try:
default_cluster_config = load_cluster_config("default")
except FileNotFoundError:
pass
try:
cluster_config = load_cluster_config(cluster)
except FileNotFoundError:
raise FileNotFoundError(f"Could not find config.json file for cluster {cluster}")
try:
namespace_override_config = load_namespace_override_config(cluster, namespace)
except FileNotFoundError:
pass
rules = dict(default_cluster_config)
_update_rules(rules, cluster_config)
if namespace_override_config:
_update_rules(rules, namespace_override_config)
allowed_namespaces = rules["allowed_namespaces"]
if allowed_namespaces != ["*"] and namespace not in allowed_namespaces:
raise KeyError(f"Namespace {namespace} is not allowed on cluster {cluster}")
return ClusterRules(rules, namespace)
def _update_rules(rules, override):
# Update values separately and save it in values variable since it's an inner dictionary
values = rules.get("values", {})
values.update(override.get("values", {}))
rules.update(override)
# Put back updated values
rules["values"] = values
```
#### File: python/command/tail.py
```python
import logging
from arg_tools import add_namespace_argument
from libs.k8s.kubernetes import Kubernetes
def parse_args(sub_parser):
subparser = sub_parser.add_parser("tail", help="Tail logs of multiple pods")
subparser.set_defaults(func=tail_args)
subparser.add_argument(
"--container", default=None, nargs="?", help="which container to view logs for"
)
subparser.add_argument(
"--color",
default="pod",
nargs="?",
choices=["pod", "line", "false"],
help=(
"specify how to colorize the logs, defaults to 'pod'."
" Options:"
" pod: Only the pod name is colorized, but the logged"
" text uses the terminal default color."
" line: The entire line is colorized."
" false: Do not colorize output at all"
),
)
add_namespace_argument(subparser)
pod_group = subparser.add_mutually_exclusive_group()
pod_group.add_argument(
"--deployment",
action="store",
nargs="?",
const="",
default=None,
help="deployment name for pods to view logs for",
)
pod_group.add_argument(
"--pod",
action="store",
nargs="?",
const="",
default=None,
help="full name of pod to view logs for",
)
def tail_args(args):
tail(args.container, args.color, args.deployment, args.pod, args.namespace)
def tail(container_name, color, deployment_name, pod_name, namespace=None):
k = Kubernetes(namespace=namespace)
deployment, pod = None, None
if pod_name is not None:
if pod_name == "":
pod = choose_pod(k)
pod_name = pod.metadata.name
else:
try:
pod = k.get_pod_by_name(pod_name)
except IndexError:
logging.warning(
"Could not find pod with given name, please choose from the available pods"
)
pod = choose_pod(k)
pod_name = pod.metadata.name
else:
if not deployment_name:
deployment = choose_deployment(k)
deployment_name = deployment.metadata.name
else:
deployment = k.get_deployment(deployment_name)
if deployment is None:
logging.warning(
"Could not find deployment with given app name, please choose from "
"the available deployments"
)
deployment = choose_deployment(k)
deployment_name = deployment.metadata.name
if not container_name:
containers = print_containers(k, deployment=deployment, pod=pod)
if len(containers) == 1:
container = containers[0]
else:
idx = int(input("Choose index for the container to tail logs for: "))
container = containers[idx]
container_name = container.name
k.tail_logs(
deployment_name=deployment_name,
pod_name=pod_name,
container_name=container_name,
color=color,
)
def choose_pod(k):
pods = print_pods(k)
idx = int(input("Choose index for the pod to tail logs for: "))
return pods[idx]
def choose_deployment(k):
deployments = print_deployments(k)
idx = int(input("Choose index for the deployment to tail logs for: "))
return deployments[idx]
def print_deployments(k):
deployments = k.get_deployments()
print("\r\nAvailable Deployments:")
print("--------------------")
idx = 0
for deployment in deployments:
print("{idx}: deployment {deployment}".format(idx=idx, deployment=deployment.metadata.name))
idx += 1
return deployments
def print_pods(k):
pods = k.get_pods()
print("\r\nAvailable Pods:")
print("---------------")
idx = 0
for pod in pods:
print("{idx}: pod: {pod_name}".format(idx=idx, pod_name=pod.metadata.name))
idx += 1
return pods
def print_containers(k, deployment=None, pod=None):
if deployment:
containers = deployment.spec.template.spec.containers
elif pod:
containers = pod.spec.containers
if len(containers) == 1:
return containers
idx = 0
for container in containers:
print("{idx}: container {container}".format(idx=idx, container=container.name))
idx += 1
return containers
```
#### File: commands/python/config.py
```python
import json
import os
def load_cluster_configs():
return load_config()["clusters"]
def load_cluster_config(cluster):
return load_config_from_file(f'{os.environ["ALADDIN_CONFIG_DIR"]}/{cluster}/config.json')
def load_namespace_override_config(cluster, namespace):
aladdin_config_dir = os.environ["ALADDIN_CONFIG_DIR"]
return load_config_from_file(
f"{aladdin_config_dir}/{cluster}/namespace-overrides/{namespace}/config.json"
)
def load_publish_configs():
return load_config()["publish"]
def load_kubernetes_configs():
return load_config()["kubernetes"]
def load_git_configs():
return load_config()["git"]
def load_config_from_file(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_data
def load_config():
return load_config_from_file(f'{os.environ["ALADDIN_CONFIG_DIR"]}/config.json')
```
#### File: libs/k8s/helm.py
```python
import os
import subprocess
import logging
from botocore.exceptions import ClientError
from os.path import join, dirname, expanduser
logger = logging.getLogger(__name__)
class Helm(object):
PACKAGE_PATH = "helm_charts/0.0.0/{project_name}/{git_ref}/{project_name}.{git_ref}.tgz"
VALUE_PATH = "helm_charts/0.0.0/{project_name}/{git_ref}/values.{values_name}.yaml"
@property
def helm_home(self):
return join(expanduser("~"), ".helm")
def init(self):
# We need to have local helm initialized for it to works
subprocess.check_call(["helm", "init", "--client-only"])
def publish(self, name, publish_rules, helm_path, hash):
# HelmContext = namedtuple('HelmContext', ['chart_home', 'values_files', 'name'])
version = "0.0.0"
logger.info("Building package")
self.init()
subprocess.check_call(
["helm", "package", "--version", version, name], cwd=dirname(helm_path)
)
package_path = join(dirname(helm_path), "{}-{}.tgz".format(name, version))
bucket_path = self.PACKAGE_PATH.format(project_name=name, git_ref=hash)
logger.info("Uploading chart")
publish_rules.s3_bucket.upload_file(package_path, bucket_path)
os.remove(package_path)
def pull_package(self, project_name, publish_rules, git_ref, extract_dir):
extract_loc = "{}/{}.tgz".format(extract_dir, project_name)
try:
publish_rules.s3_bucket.download_file(
self.PACKAGE_PATH.format(project_name=project_name, git_ref=git_ref), extract_loc
)
except ClientError:
logger.error(
"Error downloading from S3: {}".format(
self.PACKAGE_PATH.format(project_name=project_name, git_ref=git_ref)
)
)
raise
subprocess.check_call(["tar", "-xvzf", extract_loc, "-C", extract_dir])
def find_values(self, chart_path, cluster_name, namespace):
values = []
# Find all possible values yaml files for override in increasing priority
cluster_values_path = join(chart_path, "values", "values.{}.yaml".format(cluster_name))
cluster_namespace_values_path = join(
chart_path, "values", "values.{}.{}.yaml".format(cluster_name, namespace)
)
site_values_path = join(chart_path, "values", "site.yaml") # Only usable on LOCAL
if os.path.isfile(cluster_values_path):
logger.info("Found cluster values file")
values.append(cluster_values_path)
if os.path.isfile(cluster_namespace_values_path):
logger.info("Found cluster namespace values file")
values.append(cluster_namespace_values_path)
if cluster_name == "LOCAL" and os.path.isfile(site_values_path):
logger.info("Found site values file")
values.append(site_values_path)
return values
def stop(self, helm_rules):
release_name = helm_rules.release_name
command = [
"helm",
"delete",
"--purge",
release_name,
]
if self.release_exists(release_name):
subprocess.run(command, check=True)
logger.info("Successfully removed release {}".format(release_name))
else:
logger.warning(
"Could not remove release {} because it doesn't exist".format(release_name)
)
def release_exists(self, release_name):
command = ["helm", "status", release_name]
ret_code = subprocess.run(
command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
).returncode
# If return code is 0, release exists
if ret_code == 0:
return True
else:
return False
def rollback_relative(self, helm_rules, num_versions):
release_name = helm_rules.release_name
# Some ugly logic to get the current revision - probably can be better
output = subprocess.Popen(("helm", "list"), stdout=subprocess.PIPE)
output = subprocess.check_output(("grep", release_name), stdin=output.stdout)
current_revision = int(output.decode("utf-8").replace(" ", "").split("\t")[1])
if num_versions > current_revision:
logger.warning("Can't rollback that far")
return
self.rollback(helm_rules, current_revision - num_versions)
def rollback(self, helm_rules, revision):
release_name = helm_rules.release_name
command = ["helm", "rollback", release_name, str(revision)]
subprocess.run(command, check=True)
def start(
self, helm_rules, chart_path, cluster_name, namespace, force=False, helm_args=None, **values
):
if helm_args is None:
helm_args = []
if force:
helm_args.append("--force")
return self._run(helm_rules, chart_path, cluster_name, namespace, helm_args, **values)
def dry_run(self, helm_rules, chart_path, cluster_name, namespace, helm_args=None, **values):
if helm_args is None:
helm_args = []
helm_args += ["--dry-run", "--debug"]
return self._run(helm_rules, chart_path, cluster_name, namespace, helm_args, **values)
def _run(self, helm_rules, chart_path, cluster_name, namespace, helm_args=None, **values):
release_name = helm_rules.release_name
command = [
"helm",
"upgrade",
release_name,
chart_path,
"--install",
"--namespace={}".format(namespace),
]
for path in self.find_values(chart_path, cluster_name, namespace):
command.append("--values={}".format(path))
for set_name, set_val in values.items():
command.extend(["--set", "{}={}".format(set_name, set_val)])
if helm_args:
command.extend(helm_args)
logger.info("Executing: " + " ".join(command))
subprocess.run(command, check=True)
```
|
{
"source": "jcwilson/aladdin-demo",
"score": 2
}
|
#### File: commands_app/commands/get_pods.py
```python
import os
from kubernetes import client, config
def parse_args(sub_parser):
subparser = sub_parser.add_parser("get-pods", help="Get all aladdin-demo pods")
subparser.set_defaults(func=get_pods)
def get_pods(arg):
print(get_aladdin_demo_pods())
def get_aladdin_demo_pods():
config.load_incluster_config()
v1 = client.CoreV1Api()
res = v1.list_namespaced_pod(
namespace=os.environ["NAMESPACE"],
label_selector=f"project={os.environ['PROJECT_NAME']}")
return [r.metadata.name for r in res.items]
```
|
{
"source": "jcwilson/mohawk",
"score": 2
}
|
#### File: mohawk/mohawk/bewit.py
```python
from base64 import urlsafe_b64encode, b64decode
from collections import namedtuple
import logging
import re
import six
from .base import Resource
from .util import (calculate_mac,
strings_match,
utc_now,
validate_header_attr)
from .exc import (CredentialsLookupError,
InvalidBewit,
MacMismatch,
TokenExpired)
log = logging.getLogger(__name__)
def get_bewit(resource):
"""
Returns a bewit identifier for the resource as a string.
:param resource:
Resource to generate a bewit for
:type resource: `mohawk.base.Resource`
"""
if resource.method != 'GET':
raise ValueError('bewits can only be generated for GET requests')
if resource.nonce != '':
raise ValueError('bewits must use an empty nonce')
mac = calculate_mac(
'bewit',
resource,
None,
)
if isinstance(mac, six.binary_type):
mac = mac.decode('ascii')
if resource.ext is None:
ext = ''
else:
validate_header_attr(resource.ext, name='ext')
ext = resource.ext
# b64encode works only with bytes in python3, but all of our parameters are
# in unicode, so we need to encode them. The cleanest way to do this that
# works in both python 2 and 3 is to use string formatting to get a
# unicode string, and then explicitly encode it to bytes.
inner_bewit = u"{id}\\{exp}\\{mac}\\{ext}".format(
id=resource.credentials['id'],
exp=resource.timestamp,
mac=mac,
ext=ext,
)
inner_bewit_bytes = inner_bewit.encode('ascii')
bewit_bytes = urlsafe_b64encode(inner_bewit_bytes)
# Now decode the resulting bytes back to a unicode string
return bewit_bytes.decode('ascii')
bewittuple = namedtuple('bewittuple', 'id expiration mac ext')
def parse_bewit(bewit):
"""
Returns a `bewittuple` representing the parts of an encoded bewit string.
This has the following named attributes:
(id, expiration, mac, ext)
:param bewit:
A base64 encoded bewit string
:type bewit: str
"""
decoded_bewit = b64decode(bewit).decode('ascii')
bewit_parts = decoded_bewit.split("\\")
if len(bewit_parts) != 4:
raise InvalidBewit('Expected 4 parts to bewit: %s' % decoded_bewit)
return bewittuple(*bewit_parts)
def strip_bewit(url):
"""
Strips the bewit parameter out of a url.
Returns (encoded_bewit, stripped_url)
Raises InvalidBewit if no bewit found.
:param url:
The url containing a bewit parameter
:type url: str
"""
m = re.search('[?&]bewit=([^&]+)', url)
if not m:
raise InvalidBewit('no bewit data found')
bewit = m.group(1)
stripped_url = url[:m.start()] + url[m.end():]
return bewit, stripped_url
def check_bewit(url, credential_lookup, now=None):
"""
Validates the given bewit.
Returns True if the resource has a valid bewit parameter attached,
or raises a subclass of HawkFail otherwise.
:param credential_lookup:
Callable to look up the credentials dict by sender ID.
The credentials dict must have the keys:
``id``, ``key``, and ``algorithm``.
See :ref:`receiving-request` for an example.
:type credential_lookup: callable
:param now=None:
Unix epoch time for the current time to determine if bewit has expired.
If None, then the current time as given by utc_now() is used.
:type now=None: integer
"""
raw_bewit, stripped_url = strip_bewit(url)
bewit = parse_bewit(raw_bewit)
try:
credentials = credential_lookup(bewit.id)
except LookupError:
raise CredentialsLookupError('Could not find credentials for ID {0}'
.format(bewit.id))
res = Resource(url=stripped_url,
method='GET',
credentials=credentials,
timestamp=bewit.expiration,
nonce='',
ext=bewit.ext,
)
mac = calculate_mac('bewit', res, None)
mac = mac.decode('ascii')
if not strings_match(mac, bewit.mac):
raise MacMismatch('bewit with mac {bewit_mac} did not match expected mac {expected_mac}'
.format(bewit_mac=bewit.mac,
expected_mac=mac))
# Check that the timestamp isn't expired
if now is None:
# TODO: Add offset/skew
now = utc_now()
if int(bewit.expiration) < now:
# TODO: Refactor TokenExpired to handle this better
raise TokenExpired('bewit with UTC timestamp {ts} has expired; '
'it was compared to {now}'
.format(ts=bewit.expiration, now=now),
localtime_in_seconds=now,
www_authenticate=''
)
return True
```
#### File: mohawk/mohawk/exc.py
```python
class HawkFail(Exception):
"""
All Mohawk exceptions derive from this base.
"""
class MissingAuthorization(HawkFail):
"""
No authorization header was sent by the client.
"""
class InvalidCredentials(HawkFail):
"""
The specified Hawk credentials are invalid.
For example, the dict could be formatted incorrectly.
"""
class CredentialsLookupError(HawkFail):
"""
A :class:`mohawk.Receiver` could not look up the
credentials for an incoming request.
"""
class BadHeaderValue(HawkFail):
"""
There was an error with an attribute or value when parsing
or creating a Hawk header.
"""
class MacMismatch(HawkFail):
"""
The locally calculated MAC did not match the MAC that was sent.
"""
class MisComputedContentHash(HawkFail):
"""
The signature of the content did not match the actual content.
"""
class TokenExpired(HawkFail):
"""
The timestamp on a message received has expired.
You may also receive this message if your server clock is out of sync.
Consider synchronizing it with something like `TLSdate`_.
If you are unable to synchronize your clock universally,
The `Hawk`_ spec mentions how you can `adjust`_
your sender's time to match that of the receiver in the case
of unexpected expiration.
The ``www_authenticate`` attribute of this exception is a header
that can be returned to the client. If the value is not None, it
will include a timestamp HMAC'd with the sender's credentials.
This will allow the client
to verify the value and safely apply an offset.
.. _`Hawk`: https://github.com/hueniverse/hawk
.. _`adjust`: https://github.com/hueniverse/hawk#future-time-manipulation
.. _`TLSdate`: http://linux-audit.com/tlsdate-the-secure-alternative-for-ntpd-ntpdate-and-rdate/
"""
#: Current local time in seconds that was used to compare timestamps.
localtime_in_seconds = None
# A header containing an HMAC'd server timestamp that the sender can verify.
www_authenticate = None
def __init__(self, *args, **kw):
self.localtime_in_seconds = kw.pop('localtime_in_seconds')
self.www_authenticate = kw.pop('www_authenticate')
super(HawkFail, self).__init__(*args, **kw)
class AlreadyProcessed(HawkFail):
"""
The message has already been processed and cannot be re-processed.
See :ref:`nonce` for details.
"""
class InvalidBewit(HawkFail):
"""
The bewit is invalid; e.g. it doesn't contain the right number of
parameters.
"""
class MissingContent(HawkFail):
"""
A payload's `content` or `content_type` were not provided.
See :ref:`skipping-content-checks` for details.
"""
```
|
{
"source": "jcwilson/pyfakefs",
"score": 2
}
|
#### File: pyfakefs/tests/fake_filesystem_unittest_test.py
```python
import glob
import io
import multiprocessing
import os
import shutil
import sys
import tempfile
import unittest
from distutils.dir_util import copy_tree, remove_tree
from unittest import TestCase
import pyfakefs.tests.import_as_example
from pyfakefs import fake_filesystem_unittest, fake_filesystem
from pyfakefs.extra_packages import pathlib
from pyfakefs.fake_filesystem_unittest import Patcher, Pause, patchfs
from pyfakefs.tests.fixtures import module_with_attributes
class TestPatcher(TestCase):
def test_context_manager(self):
with Patcher() as patcher:
patcher.fs.create_file('/foo/bar', contents='test')
with open('/foo/bar') as f:
contents = f.read()
self.assertEqual('test', contents)
@patchfs
def test_context_decorator(self, fs):
fs.create_file('/foo/bar', contents='test')
with open('/foo/bar') as f:
contents = f.read()
self.assertEqual('test', contents)
class TestPyfakefsUnittestBase(fake_filesystem_unittest.TestCase):
def setUp(self):
"""Set up the fake file system"""
self.setUpPyfakefs()
class TestPyfakefsUnittest(TestPyfakefsUnittestBase): # pylint: disable=R0904
"""Test the `pyfakefs.fake_filesystem_unittest.TestCase` base class."""
def test_open(self):
"""Fake `open()` function is bound"""
self.assertFalse(os.path.exists('/fake_file.txt'))
with open('/fake_file.txt', 'w') as f:
f.write("This test file was created using the open() function.\n")
self.assertTrue(self.fs.exists('/fake_file.txt'))
with open('/fake_file.txt') as f:
content = f.read()
self.assertEqual(content, 'This test file was created using the '
'open() function.\n')
def test_io_open(self):
"""Fake io module is bound"""
self.assertFalse(os.path.exists('/fake_file.txt'))
with io.open('/fake_file.txt', 'w') as f:
f.write("This test file was created using the"
" io.open() function.\n")
self.assertTrue(self.fs.exists('/fake_file.txt'))
with open('/fake_file.txt') as f:
content = f.read()
self.assertEqual(content, 'This test file was created using the '
'io.open() function.\n')
def test_os(self):
"""Fake os module is bound"""
self.assertFalse(self.fs.exists('/test/dir1/dir2'))
os.makedirs('/test/dir1/dir2')
self.assertTrue(self.fs.exists('/test/dir1/dir2'))
def test_glob(self):
"""Fake glob module is bound"""
is_windows = sys.platform.startswith('win')
self.assertEqual(glob.glob('/test/dir1/dir*'),
[])
self.fs.create_dir('/test/dir1/dir2a')
matching_paths = glob.glob('/test/dir1/dir*')
if is_windows:
self.assertEqual(matching_paths, [r'\test\dir1\dir2a'])
else:
self.assertEqual(matching_paths, ['/test/dir1/dir2a'])
self.fs.create_dir('/test/dir1/dir2b')
matching_paths = sorted(glob.glob('/test/dir1/dir*'))
if is_windows:
self.assertEqual(matching_paths,
[r'\test\dir1\dir2a', r'\test\dir1\dir2b'])
else:
self.assertEqual(matching_paths,
['/test/dir1/dir2a', '/test/dir1/dir2b'])
def test_shutil(self):
"""Fake shutil module is bound"""
self.fs.create_dir('/test/dir1/dir2a')
self.fs.create_dir('/test/dir1/dir2b')
self.assertTrue(self.fs.exists('/test/dir1/dir2b'))
self.assertTrue(self.fs.exists('/test/dir1/dir2a'))
shutil.rmtree('/test/dir1')
self.assertFalse(self.fs.exists('/test/dir1'))
@unittest.skipIf(not pathlib, "only run if pathlib is available")
def test_fakepathlib(self):
with pathlib.Path('/fake_file.txt') as p:
with p.open('w') as f:
f.write('text')
is_windows = sys.platform.startswith('win')
if is_windows:
self.assertTrue(self.fs.exists(r'\fake_file.txt'))
else:
self.assertTrue(self.fs.exists('/fake_file.txt'))
class TestPatchingImports(TestPyfakefsUnittestBase):
def test_import_as_other_name(self):
file_path = '/foo/bar/baz'
self.fs.create_file(file_path)
self.assertTrue(self.fs.exists(file_path))
self.assertTrue(
pyfakefs.tests.import_as_example.check_if_exists1(file_path))
def test_import_path_from_os(self):
"""Make sure `from os import path` patches `path`."""
file_path = '/foo/bar/baz'
self.fs.create_file(file_path)
self.assertTrue(self.fs.exists(file_path))
self.assertTrue(
pyfakefs.tests.import_as_example.check_if_exists2(file_path))
if pathlib:
def test_import_path_from_pathlib(self):
file_path = '/foo/bar'
self.fs.create_dir(file_path)
self.assertTrue(
pyfakefs.tests.import_as_example.check_if_exists3(file_path))
def test_import_function_from_os_path(self):
file_path = '/foo/bar'
self.fs.create_dir(file_path)
self.assertTrue(
pyfakefs.tests.import_as_example.check_if_exists5(file_path))
def test_import_function_from_os_path_as_other_name(self):
file_path = '/foo/bar'
self.fs.create_dir(file_path)
self.assertTrue(
pyfakefs.tests.import_as_example.check_if_exists6(file_path))
def test_import_function_from_os(self):
file_path = '/foo/bar'
self.fs.create_file(file_path, contents=b'abc')
stat_result = pyfakefs.tests.import_as_example.file_stat1(file_path)
self.assertEqual(3, stat_result.st_size)
def test_import_function_from_os_as_other_name(self):
file_path = '/foo/bar'
self.fs.create_file(file_path, contents=b'abc')
stat_result = pyfakefs.tests.import_as_example.file_stat2(file_path)
self.assertEqual(3, stat_result.st_size)
def test_import_open_as_other_name(self):
file_path = '/foo/bar'
self.fs.create_file(file_path, contents=b'abc')
contents = pyfakefs.tests.import_as_example.file_contents1(file_path)
self.assertEqual('abc', contents)
def test_import_io_open_as_other_name(self):
file_path = '/foo/bar'
self.fs.create_file(file_path, contents=b'abc')
contents = pyfakefs.tests.import_as_example.file_contents2(file_path)
self.assertEqual('abc', contents)
class TestPatchingDefaultArgs(TestPyfakefsUnittestBase):
def test_path_exists_as_default_arg_in_function(self):
file_path = '/foo/bar'
self.fs.create_dir(file_path)
self.assertTrue(
pyfakefs.tests.import_as_example.check_if_exists4(file_path))
def test_path_exists_as_default_arg_in_method(self):
file_path = '/foo/bar'
self.fs.create_dir(file_path)
sut = pyfakefs.tests.import_as_example.TestDefaultArg()
self.assertTrue(sut.check_if_exists(file_path))
class TestAttributesWithFakeModuleNames(TestPyfakefsUnittestBase):
"""Test that module attributes with names like `path` or `io` are not
stubbed out.
"""
def test_attributes(self):
"""Attributes of module under test are not patched"""
self.assertEqual(module_with_attributes.os, 'os attribute value')
self.assertEqual(module_with_attributes.path, 'path attribute value')
self.assertEqual(module_with_attributes.pathlib,
'pathlib attribute value')
self.assertEqual(module_with_attributes.shutil,
'shutil attribute value')
self.assertEqual(module_with_attributes.io, 'io attribute value')
import math as path # noqa: E402 wanted import not at top
class TestPathNotPatchedIfNotOsPath(TestPyfakefsUnittestBase):
"""Tests that `path` is not patched if it is not `os.path`.
An own path module (in this case an alias to math) can be imported
and used.
"""
def test_own_path_module(self):
self.assertEqual(2, path.floor(2.5))
class FailedPatchingTest(TestPyfakefsUnittestBase):
"""Negative tests: make sure the tests for `modules_to_reload` and
`modules_to_patch` fail if not providing the arguments.
"""
@unittest.expectedFailure
def test_system_stat(self):
file_path = '/foo/bar'
self.fs.create_file(file_path, contents=b'test')
self.assertEqual(
4, pyfakefs.tests.import_as_example.system_stat(file_path).st_size)
class ReloadModuleTest(fake_filesystem_unittest.TestCase):
"""Make sure that reloading a module allows patching of classes not
patched automatically.
"""
def setUp(self):
"""Set up the fake file system"""
self.setUpPyfakefs(
modules_to_reload=[pyfakefs.tests.import_as_example])
class NoSkipNamesTest(fake_filesystem_unittest.TestCase):
"""Reference test for additional_skip_names tests:
make sure that the module is patched by default."""
def test_path_exists(self):
self.assertTrue(
pyfakefs.tests.import_as_example.exists_this_file())
class AdditionalSkipNamesTest(fake_filesystem_unittest.TestCase):
"""Make sure that modules in additional_skip_names are not patched.
Passes module name to `additional_skip_names`."""
def setUp(self):
self.setUpPyfakefs(
additional_skip_names=['pyfakefs.tests.import_as_example'])
def test_path_exists(self):
self.assertFalse(
pyfakefs.tests.import_as_example.exists_this_file())
class AdditionalSkipNamesModuleTest(fake_filesystem_unittest.TestCase):
"""Make sure that modules in additional_skip_names are not patched.
Passes module to `additional_skip_names`."""
def setUp(self):
self.setUpPyfakefs(
additional_skip_names=[pyfakefs.tests.import_as_example])
def test_path_exists(self):
self.assertFalse(
pyfakefs.tests.import_as_example.exists_this_file())
class FakeExampleModule:
"""Used to patch a function that uses system-specific functions that
cannot be patched automatically."""
_orig_module = pyfakefs.tests.import_as_example
def __init__(self, fs):
pass
def system_stat(self, filepath):
return os.stat(filepath)
def __getattr__(self, name):
"""Forwards any non-faked calls to the standard module."""
return getattr(self._orig_module, name)
class PatchModuleTest(fake_filesystem_unittest.TestCase):
"""Make sure that reloading a module allows patching of classes not
patched automatically.
"""
def setUp(self):
"""Set up the fake file system"""
self.setUpPyfakefs(
modules_to_patch={
'pyfakefs.tests.import_as_example': FakeExampleModule})
def test_system_stat(self):
file_path = '/foo/bar'
self.fs.create_file(file_path, contents=b'test')
self.assertEqual(
4, pyfakefs.tests.import_as_example.system_stat(file_path).st_size)
class PatchModuleTestUsingDecorator(unittest.TestCase):
"""Make sure that reloading a module allows patching of classes not
patched automatically - use patchfs decorator with parameter.
"""
@patchfs
@unittest.expectedFailure
def test_system_stat_failing(self, fs):
file_path = '/foo/bar'
fs.create_file(file_path, contents=b'test')
self.assertEqual(
4, pyfakefs.tests.import_as_example.system_stat(file_path).st_size)
@patchfs(modules_to_patch={
'pyfakefs.tests.import_as_example': FakeExampleModule})
def test_system_stat(self, fs):
file_path = '/foo/bar'
fs.create_file(file_path, contents=b'test')
self.assertEqual(
4, pyfakefs.tests.import_as_example.system_stat(file_path).st_size)
class NoRootUserTest(fake_filesystem_unittest.TestCase):
"""Test allow_root_user argument to setUpPyfakefs."""
def setUp(self):
self.setUpPyfakefs(allow_root_user=False)
def test_non_root_behavior(self):
"""Check that fs behaves as non-root user regardless of actual
user rights.
"""
self.fs.is_windows_fs = False
dir_path = '/foo/bar'
self.fs.create_dir(dir_path, perm_bits=0o555)
file_path = dir_path + 'baz'
self.assertRaises(OSError, self.fs.create_file, file_path)
file_path = '/baz'
self.fs.create_file(file_path)
os.chmod(file_path, 0o400)
self.assertRaises(OSError, open, file_path, 'w')
class PauseResumeTest(TestPyfakefsUnittestBase):
def test_pause_resume(self):
fake_temp_file = tempfile.NamedTemporaryFile()
self.assertTrue(self.fs.exists(fake_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
self.pause()
self.assertTrue(self.fs.exists(fake_temp_file.name))
self.assertFalse(os.path.exists(fake_temp_file.name))
real_temp_file = tempfile.NamedTemporaryFile()
self.assertFalse(self.fs.exists(real_temp_file.name))
self.assertTrue(os.path.exists(real_temp_file.name))
self.resume()
self.assertFalse(os.path.exists(real_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
def test_pause_resume_fs(self):
fake_temp_file = tempfile.NamedTemporaryFile()
self.assertTrue(self.fs.exists(fake_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
# resume does nothing if not paused
self.fs.resume()
self.assertTrue(os.path.exists(fake_temp_file.name))
self.fs.pause()
self.assertTrue(self.fs.exists(fake_temp_file.name))
self.assertFalse(os.path.exists(fake_temp_file.name))
real_temp_file = tempfile.NamedTemporaryFile()
self.assertFalse(self.fs.exists(real_temp_file.name))
self.assertTrue(os.path.exists(real_temp_file.name))
# pause does nothing if already paused
self.fs.pause()
self.assertFalse(self.fs.exists(real_temp_file.name))
self.assertTrue(os.path.exists(real_temp_file.name))
self.fs.resume()
self.assertFalse(os.path.exists(real_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
def test_pause_resume_contextmanager(self):
fake_temp_file = tempfile.NamedTemporaryFile()
self.assertTrue(self.fs.exists(fake_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
with Pause(self):
self.assertTrue(self.fs.exists(fake_temp_file.name))
self.assertFalse(os.path.exists(fake_temp_file.name))
real_temp_file = tempfile.NamedTemporaryFile()
self.assertFalse(self.fs.exists(real_temp_file.name))
self.assertTrue(os.path.exists(real_temp_file.name))
self.assertFalse(os.path.exists(real_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
def test_pause_resume_fs_contextmanager(self):
fake_temp_file = tempfile.NamedTemporaryFile()
self.assertTrue(self.fs.exists(fake_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
with Pause(self.fs):
self.assertTrue(self.fs.exists(fake_temp_file.name))
self.assertFalse(os.path.exists(fake_temp_file.name))
real_temp_file = tempfile.NamedTemporaryFile()
self.assertFalse(self.fs.exists(real_temp_file.name))
self.assertTrue(os.path.exists(real_temp_file.name))
self.assertFalse(os.path.exists(real_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
def test_pause_resume_without_patcher(self):
fs = fake_filesystem.FakeFilesystem()
self.assertRaises(RuntimeError, fs.resume)
class PauseResumePatcherTest(fake_filesystem_unittest.TestCase):
def test_pause_resume(self):
with Patcher() as p:
fake_temp_file = tempfile.NamedTemporaryFile()
self.assertTrue(p.fs.exists(fake_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
p.pause()
self.assertTrue(p.fs.exists(fake_temp_file.name))
self.assertFalse(os.path.exists(fake_temp_file.name))
real_temp_file = tempfile.NamedTemporaryFile()
self.assertFalse(p.fs.exists(real_temp_file.name))
self.assertTrue(os.path.exists(real_temp_file.name))
p.resume()
self.assertFalse(os.path.exists(real_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
def test_pause_resume_contextmanager(self):
with Patcher() as p:
fake_temp_file = tempfile.NamedTemporaryFile()
self.assertTrue(p.fs.exists(fake_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
with Pause(p):
self.assertTrue(p.fs.exists(fake_temp_file.name))
self.assertFalse(os.path.exists(fake_temp_file.name))
real_temp_file = tempfile.NamedTemporaryFile()
self.assertFalse(p.fs.exists(real_temp_file.name))
self.assertTrue(os.path.exists(real_temp_file.name))
self.assertFalse(os.path.exists(real_temp_file.name))
self.assertTrue(os.path.exists(fake_temp_file.name))
class TestCopyOrAddRealFile(TestPyfakefsUnittestBase):
"""Tests the `fake_filesystem_unittest.TestCase.copyRealFile()` method.
Note that `copyRealFile()` is deprecated in favor of
`FakeFilesystem.add_real_file()`.
"""
filepath = None
@classmethod
def setUpClass(cls):
filename = __file__
if filename.endswith('.pyc'): # happens on windows / py27
filename = filename[:-1]
cls.filepath = os.path.abspath(filename)
with open(cls.filepath) as f:
cls.real_string_contents = f.read()
with open(cls.filepath, 'rb') as f:
cls.real_byte_contents = f.read()
cls.real_stat = os.stat(cls.filepath)
@unittest.skipIf(sys.platform == 'darwin', 'Different copy behavior')
def test_copy_real_file(self):
"""Typical usage of deprecated copyRealFile()"""
# Use this file as the file to be copied to the fake file system
fake_file = self.copyRealFile(self.filepath)
self.assertTrue(
'class TestCopyOrAddRealFile(TestPyfakefsUnittestBase)'
in self.real_string_contents,
'Verify real file string contents')
self.assertTrue(
b'class TestCopyOrAddRealFile(TestPyfakefsUnittestBase)'
in self.real_byte_contents,
'Verify real file byte contents')
# note that real_string_contents may differ to fake_file.contents
# due to newline conversions in open()
self.assertEqual(fake_file.byte_contents, self.real_byte_contents)
self.assertEqual(oct(fake_file.st_mode), oct(self.real_stat.st_mode))
self.assertEqual(fake_file.st_size, self.real_stat.st_size)
self.assertAlmostEqual(fake_file.st_ctime,
self.real_stat.st_ctime, places=5)
self.assertAlmostEqual(fake_file.st_atime,
self.real_stat.st_atime, places=5)
self.assertLess(fake_file.st_atime, self.real_stat.st_atime + 10)
self.assertAlmostEqual(fake_file.st_mtime,
self.real_stat.st_mtime, places=5)
self.assertEqual(fake_file.st_uid, self.real_stat.st_uid)
self.assertEqual(fake_file.st_gid, self.real_stat.st_gid)
def test_copy_real_file_deprecated_arguments(self):
"""Deprecated copyRealFile() arguments"""
self.assertFalse(self.fs.exists(self.filepath))
# Specify redundant fake file path
self.copyRealFile(self.filepath, self.filepath)
self.assertTrue(self.fs.exists(self.filepath))
# Test deprecated argument values
with self.assertRaises(ValueError):
self.copyRealFile(self.filepath, '/different/filename')
with self.assertRaises(ValueError):
self.copyRealFile(self.filepath, create_missing_dirs=False)
def test_add_real_file(self):
"""Add a real file to the fake file system to be read on demand"""
# this tests only the basic functionality inside a unit test, more
# thorough tests are done in
# fake_filesystem_test.RealFileSystemAccessTest
fake_file = self.fs.add_real_file(self.filepath)
self.assertTrue(self.fs.exists(self.filepath))
self.assertIsNone(fake_file._byte_contents)
self.assertEqual(self.real_byte_contents, fake_file.byte_contents)
def test_add_real_directory(self):
"""Add a real directory and the contained files to the fake file system
to be read on demand"""
# This tests only the basic functionality inside a unit test,
# more thorough tests are done in
# fake_filesystem_test.RealFileSystemAccessTest.
# Note: this test fails (add_real_directory raises) if 'genericpath'
# is not added to SKIPNAMES
real_dir_path = os.path.split(os.path.dirname(self.filepath))[0]
self.fs.add_real_directory(real_dir_path)
self.assertTrue(self.fs.exists(real_dir_path))
self.assertTrue(self.fs.exists(
os.path.join(real_dir_path, 'fake_filesystem.py')))
def test_add_real_directory_with_backslash(self):
"""Add a real directory ending with a path separator."""
real_dir_path = os.path.split(os.path.dirname(self.filepath))[0]
self.fs.add_real_directory(real_dir_path + os.sep)
self.assertTrue(self.fs.exists(real_dir_path))
self.assertTrue(self.fs.exists(
os.path.join(real_dir_path, 'fake_filesystem.py')))
class TestPyfakefsTestCase(unittest.TestCase):
def setUp(self):
class TestTestCase(fake_filesystem_unittest.TestCase):
def runTest(self):
pass
self.test_case = TestTestCase('runTest')
def test_test_case_type(self):
self.assertIsInstance(self.test_case, unittest.TestCase)
self.assertIsInstance(self.test_case,
fake_filesystem_unittest.TestCaseMixin)
class TestTempFileReload(unittest.TestCase):
"""Regression test for #356 to make sure that reloading the tempfile
does not affect other tests."""
def test_fakefs(self):
with Patcher() as patcher:
patcher.fs.create_file('/mytempfile', contents='abcd')
def test_value(self):
v = multiprocessing.Value('I', 0)
self.assertEqual(v.value, 0)
class TestPyfakefsTestCaseMixin(unittest.TestCase,
fake_filesystem_unittest.TestCaseMixin):
def test_set_up_pyfakefs(self):
self.setUpPyfakefs()
self.assertTrue(hasattr(self, 'fs'))
self.assertIsInstance(self.fs, fake_filesystem.FakeFilesystem)
class TestShutilWithZipfile(fake_filesystem_unittest.TestCase):
"""Regression test for #427."""
def setUp(self):
self.setUpPyfakefs()
self.fs.create_file('foo/bar')
def test_a(self):
shutil.make_archive('archive', 'zip', root_dir='foo')
def test_b(self):
# used to fail because 'bar' could not be found
shutil.make_archive('archive', 'zip', root_dir='foo')
class TestDistutilsCopyTree(fake_filesystem_unittest.TestCase):
"""Regression test for #501."""
def setUp(self):
self.setUpPyfakefs()
self.fs.create_dir("./test/subdir/")
self.fs.create_dir("./test/subdir2/")
self.fs.create_file("./test2/subdir/1.txt")
def test_file_copied(self):
copy_tree("./test2/", "./test/")
remove_tree("./test2/")
self.assertTrue(os.path.isfile('./test/subdir/1.txt'))
self.assertFalse(os.path.isdir('./test2/'))
def test_file_copied_again(self):
# used to fail because 'test2' could not be found
self.assertTrue(os.path.isfile('./test2/subdir/1.txt'))
copy_tree("./test2/", "./test/")
remove_tree("./test2/")
self.assertTrue(os.path.isfile('./test/subdir/1.txt'))
self.assertFalse(os.path.isdir('./test2/'))
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jcwimer/openstack-exporter",
"score": 2
}
|
#### File: jcwimer/openstack-exporter/openstack_exporter.py
```python
import prometheus_client as prom
import traceback
import openstack
import time
import argparse
import sys
import os
from lib import instance_deploy
from lib import api_metrics
from lib import hypervisor_metrics
from lib import horizon
def openstack_connection():
if os.environ.get('OS_CLOUD_NAME') is not None:
conn = openstack.connect(cloud=os.environ.get('OS_CLOUD_NAME'))
else:
conn = openstack.connect(cloud='envvars')
return conn
# Set up argparse
def parse_cli_arguments():
parser = argparse.ArgumentParser(
description='Openstack Prometheus Exporter',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
required = parser.add_argument_group(title='required arguments')
required.add_argument(
'--cloud_name',
required=True,
help='Give the cloud a name.',
)
# This gets the optional arguments to print below the required ones.
optional = parser.add_argument_group(title='optional arguments')
optional.add_argument(
'--instance_deploy',
action="store_true",
dest='instancedeploy',
default=False,
help='Enables instance deploy metrics. Requires --flavor --network and --image.'
)
optional.add_argument(
'--flavor',
dest='flavor',
type=str,
help='Flavor name or ID to use for instance deploy metrics.'
)
optional.add_argument(
'--network',
dest='network',
type=str,
help='Pingable (via TCP) network to use for instance deploy metrics.'
)
optional.add_argument(
'--image',
dest='image',
type=str,
help='Image name or ID to use for instance deploy metrics.'
)
optional.add_argument(
'--horizon_url',
dest='horizon_url',
type=str,
help='Url for Horizon.'
)
args = parser.parse_args()
# Validation
if args.instancedeploy is True and (args.image is None or args.flavor is None or args.network is None):
parser.error("argument --instance_deploy: requires --image, --flavor, and --network.")
elif args.image and (args.instancedeploy is False or args.flavor is None or args.network is None):
parser.error("argument --image: requires --instance_deploy, --flavor, and --network.")
elif args.network and (args.instancedeploy is False or args.flavor is None or args.image is None):
parser.error("argument --network: requires --instance_deploy, --flavor, and --image.")
elif args.flavor and (args.instancedeploy is False or args.image is None or args.network is None):
parser.error("argument --flavor: requires --instance_deploy, --image, and --network.")
return args
if __name__ == '__main__':
print("Starting server on port 8000")
prom.start_http_server(port=8000, addr='0.0.0.0')
args = parse_cli_arguments()
while True:
try:
print("Gathering metrics...")
connection = openstack_connection()
api_metrics.generate_nova_metrics(connection,args.cloud_name)
api_metrics.generate_neutron_metrics(connection,args.cloud_name)
api_metrics.generate_cinder_metrics(connection,args.cloud_name)
hypervisor_metrics.generate_hypervisor_metrics(connection,args.cloud_name)
if args.instancedeploy and args.flavor and args.image and args.network:
instance_deploy.get_metrics(connection, args.flavor, args.image, args.network,args.cloud_name)
if args.horizon_url is not None:
horizon.get_metrics(args.horizon_url, args.cloud_name)
connection.close()
print("Waiting 30 seconds to gather more metrics.")
time.sleep(30)
except Exception:
connection.close()
print(traceback.print_exc())
print("Waiting 30 seconds to gather more metrics.")
time.sleep(30)
finally:
connection.close()
```
|
{
"source": "jcwojdel/my_pi_skill",
"score": 3
}
|
#### File: my_pi_skill/tests/test_polish_radio.py
```python
import json
import os
import unittest
from functools import wraps
import mock
from lambdas import my_pi_lambda
EVENTS = json.load(open(os.path.join(os.path.dirname(__file__), 'sample_events.json')))
def forall_events(f):
@wraps(f)
def wrapper(*args, **kwds):
for event_meta in EVENTS:
kwds['event'] = event_meta['event']
kwds['event_name'] = event_meta['name']
return f(*args, **kwds)
return wrapper
def find_event_by_name(name):
for event_meta in EVENTS:
if event_meta['name'] == name:
return event_meta['event']
raise KeyError(name)
class TestEventParsing(unittest.TestCase):
@forall_events
def test_get_intent(self, event, event_name):
intent = my_pi_lambda.get_intent(event)
self.assertTrue(intent.endswith('Intent'), 'Failed to parse intent in event {}'.format(event_name))
def test_get_intent_failure(self):
event = {
'request': {
'type': 'NotIntent'
}
}
with self.assertRaises(ValueError):
intent = my_pi_lambda.get_intent(event)
@forall_events
def test_get_slots(self, event, event_name):
slots = my_pi_lambda.get_slots(event)
self.assertIsInstance(slots, dict, 'Failed to parse slots in event {}'.format(event_name))
def test_get_slots_play_3(self):
event = find_event_by_name('PLAY_3')
slots = my_pi_lambda.get_slots(event)
self.assertEqual(slots, {'Number': '3'})
class TestPolishRadio(unittest.TestCase):
def setUp(self):
self.event = find_event_by_name('PLAY_3')
def test_play(self):
with mock.patch.object(my_pi_lambda.PiController, 'request_method') as request_mock:
res = my_pi_lambda.lambda_handler(self.event, None)
request_mock.assert_called()
self.assertEqual(res['version'], '1.0')
self.assertIn('response', res)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jcwon0/BlurHPE",
"score": 3
}
|
#### File: core/evaluation/mesh_eval.py
```python
import numpy as np
def compute_similarity_transform(source_points, target_points):
"""Computes a similarity transform (sR, t) that takes a set of 3D points
source_points (N x 3) closest to a set of 3D points target_points, where R
is an 3x3 rotation matrix, t 3x1 translation, s scale. And return the
transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal
Procrutes problem.
Notes:
Points number: N
Args:
source_points (np.ndarray([N, 3])): Source point set.
target_points (np.ndarray([N, 3])): Target point set.
Returns:
source_points_hat (np.ndarray([N, 3])): Transformed source point set.
"""
assert target_points.shape[0] == source_points.shape[0]
assert target_points.shape[1] == 3 and source_points.shape[1] == 3
source_points = source_points.T
target_points = target_points.T
# 1. Remove mean.
mu1 = source_points.mean(axis=1, keepdims=True)
mu2 = target_points.mean(axis=1, keepdims=True)
X1 = source_points - mu1
X2 = target_points - mu2
# 2. Compute variance of X1 used for scale.
var1 = np.sum(X1**2)
# 3. The outer product of X1 and X2.
K = X1.dot(X2.T)
# 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
# singular vectors of K.
U, _, Vh = np.linalg.svd(K)
V = Vh.T
# Construct Z that fixes the orientation of R to get det(R)=1.
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
# Construct R.
R = V.dot(Z.dot(U.T))
# 5. Recover scale.
scale = np.trace(R.dot(K)) / var1
# 6. Recover translation.
t = mu2 - scale * (R.dot(mu1))
# 7. Transform the source points:
source_points_hat = scale * R.dot(source_points) + t
source_points_hat = source_points_hat.T
return source_points_hat
```
#### File: core/evaluation/pose3d_eval.py
```python
import numpy as np
from .mesh_eval import compute_similarity_transform
def keypoint_mpjpe(pred, gt, mask):
"""Calculate the mean per-joint position error (MPJPE) and the error after
rigid alignment with the ground truth (P-MPJPE).
batch_size: N
num_keypoints: K
keypoint_dims: C
Args:
pred (np.ndarray[N, K, C]): Predicted keypoint location.
gt (np.ndarray[N, K, C]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
Returns:
tuple: A tuple containing joint position errors
- mpjpe (float|np.ndarray[N]): mean per-joint position error.
- p-mpjpe (float|np.ndarray[N]): mpjpe after rigid alignment with the
ground truth
"""
assert mask.any()
pred_aligned = np.stack(
compute_similarity_transform(pred_i, gt_i)
for pred_i, gt_i in zip(pred, gt))
mpjpe = np.linalg.norm(pred - gt, ord=2, axis=-1)[mask].mean()
p_mpjpe = np.linalg.norm(pred_aligned - gt, ord=2, axis=-1)[mask].mean()
return mpjpe, p_mpjpe
```
#### File: models/mesh_heads/hmr_head.py
```python
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import xavier_init
from ..registry import HEADS
from .geometric_layers import rot6d_to_rotmat
@HEADS.register_module()
class MeshHMRHead(nn.Module):
"""SMPL parameters regressor head of simple baseline paper
ref: <NAME>. ``End-to-end Recovery of Human Shape and Pose''.
Args:
in_channels (int): Number of input channels
in_res (int): The resolution of input feature map.
smpl_mean_parameters (str): The file name of the mean SMPL parameters
n_iter (int): The iterations of estimating delta parameters
"""
def __init__(self, in_channels, smpl_mean_params=None, n_iter=3):
super().__init__()
self.in_channels = in_channels
self.n_iter = n_iter
npose = 24 * 6
nbeta = 10
ncam = 3
hidden_dim = 1024
self.fc1 = nn.Linear(in_channels + npose + nbeta + ncam, hidden_dim)
self.drop1 = nn.Dropout()
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.drop2 = nn.Dropout()
self.decpose = nn.Linear(hidden_dim, npose)
self.decshape = nn.Linear(hidden_dim, nbeta)
self.deccam = nn.Linear(hidden_dim, ncam)
# Load mean SMPL parameters
if smpl_mean_params is None:
init_pose = torch.zeros([1, npose])
init_shape = torch.zeros([1, nbeta])
init_cam = torch.FloatTensor([[1, 0, 0]])
else:
mean_params = np.load(smpl_mean_params)
init_pose = torch.from_numpy(
mean_params['pose'][:]).unsqueeze(0).float()
init_shape = torch.from_numpy(
mean_params['shape'][:]).unsqueeze(0).float()
init_cam = torch.from_numpy(
mean_params['cam']).unsqueeze(0).float()
self.register_buffer('init_pose', init_pose)
self.register_buffer('init_shape', init_shape)
self.register_buffer('init_cam', init_cam)
def forward(self, x):
"""Forward function.
x is the image feature map and is expected to be in shape (batch size x
channel number x height x width)
"""
batch_size = x.shape[0]
# extract the global feature vector by average along
# spatial dimension.
x = x.mean(dim=-1).mean(dim=-1)
init_pose = self.init_pose.expand(batch_size, -1)
init_shape = self.init_shape.expand(batch_size, -1)
init_cam = self.init_cam.expand(batch_size, -1)
pred_pose = init_pose
pred_shape = init_shape
pred_cam = init_cam
for _ in range(self.n_iter):
xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)
xc = self.fc1(xc)
xc = self.drop1(xc)
xc = self.fc2(xc)
xc = self.drop2(xc)
pred_pose = self.decpose(xc) + pred_pose
pred_shape = self.decshape(xc) + pred_shape
pred_cam = self.deccam(xc) + pred_cam
pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
out = (pred_rotmat, pred_shape, pred_cam)
return out
def init_weights(self):
"""Initialize model weights."""
xavier_init(self.decpose, gain=0.01)
xavier_init(self.decshape, gain=0.01)
xavier_init(self.deccam, gain=0.01)
```
#### File: mmpose/utils/collect_env.py
```python
from mmcv.utils import collect_env as collect_basic_env
from mmcv.utils import get_git_hash
import mmpose
def collect_env():
env_info = collect_basic_env()
env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7))
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
```
#### File: tests/test_backbones/test_alexnet.py
```python
import torch
from mmpose.models.backbones import AlexNet
def test_alexnet_backbone():
"""Test alexnet backbone."""
model = AlexNet(-1)
model.train()
imgs = torch.randn(1, 3, 256, 192)
feat = model(imgs)
assert feat.shape == (1, 256, 7, 5)
model = AlexNet(1)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat.shape == (1, 1)
```
#### File: tests/test_loss/test_bottom_up_losses.py
```python
import pytest
import torch
def test_multi_loss_factory():
from mmpose.models import build_loss
# test heatmap loss
loss_cfg = dict(type='HeatmapLoss')
loss = build_loss(loss_cfg)
with pytest.raises(AssertionError):
fake_pred = torch.zeros((2, 3, 64, 64))
fake_label = torch.zeros((1, 3, 64, 64))
fake_mask = torch.zeros((1, 64, 64))
loss(fake_pred, fake_label, fake_mask)
fake_pred = torch.zeros((1, 3, 64, 64))
fake_label = torch.zeros((1, 3, 64, 64))
fake_mask = torch.zeros((1, 64, 64))
assert torch.allclose(
loss(fake_pred, fake_label, fake_mask), torch.tensor(0.))
fake_pred = torch.ones((1, 3, 64, 64))
fake_label = torch.zeros((1, 3, 64, 64))
fake_mask = torch.zeros((1, 64, 64))
assert torch.allclose(
loss(fake_pred, fake_label, fake_mask), torch.tensor(0.))
fake_pred = torch.ones((1, 3, 64, 64))
fake_label = torch.zeros((1, 3, 64, 64))
fake_mask = torch.ones((1, 64, 64))
assert torch.allclose(
loss(fake_pred, fake_label, fake_mask), torch.tensor(1.))
# test AE loss
fake_tags = torch.zeros((1, 18, 1))
fake_joints = torch.zeros((1, 3, 2, 2), dtype=torch.int)
loss_cfg = dict(type='AELoss', loss_type='exp')
loss = build_loss(loss_cfg)
assert torch.allclose(loss(fake_tags, fake_joints)[0], torch.tensor(0.))
assert torch.allclose(loss(fake_tags, fake_joints)[1], torch.tensor(0.))
fake_tags[0, 0, 0] = 1.
fake_tags[0, 10, 0] = 0.
fake_joints[0, 0, 0, :] = torch.IntTensor((0, 1))
fake_joints[0, 0, 1, :] = torch.IntTensor((10, 1))
loss_cfg = dict(type='AELoss', loss_type='exp')
loss = build_loss(loss_cfg)
assert torch.allclose(loss(fake_tags, fake_joints)[0], torch.tensor(0.))
assert torch.allclose(loss(fake_tags, fake_joints)[1], torch.tensor(0.25))
fake_tags[0, 0, 0] = 0
fake_tags[0, 7, 0] = 1.
fake_tags[0, 17, 0] = 1.
fake_joints[0, 1, 0, :] = torch.IntTensor((7, 1))
fake_joints[0, 1, 1, :] = torch.IntTensor((17, 1))
loss_cfg = dict(type='AELoss', loss_type='exp')
loss = build_loss(loss_cfg)
assert torch.allclose(loss(fake_tags, fake_joints)[1], torch.tensor(0.))
loss_cfg = dict(type='AELoss', loss_type='max')
loss = build_loss(loss_cfg)
assert torch.allclose(loss(fake_tags, fake_joints)[0], torch.tensor(0.))
with pytest.raises(ValueError):
loss_cfg = dict(type='AELoss', loss_type='min')
loss = build_loss(loss_cfg)
loss(fake_tags, fake_joints)
# test MultiLossFactory
with pytest.raises(AssertionError):
loss_cfg = dict(
type='MultiLossFactory',
num_joints=2,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=True,
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0])
loss = build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(
type='MultiLossFactory',
num_joints=2,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=0.001,
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0])
loss = build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(
type='MultiLossFactory',
num_joints=2,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=0.001,
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0])
loss = build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(
type='MultiLossFactory',
num_joints=2,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=True,
heatmaps_loss_factor=[1.0])
loss = build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(
type='MultiLossFactory',
num_joints=2,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=1.0)
loss = build_loss(loss_cfg)
loss_cfg = dict(
type='MultiLossFactory',
num_joints=17,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[False],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[False],
heatmaps_loss_factor=[1.0])
loss = build_loss(loss_cfg)
fake_outputs = [torch.zeros((1, 34, 64, 64))]
fake_heatmaps = [torch.zeros((1, 17, 64, 64))]
fake_masks = [torch.ones((1, 64, 64))]
fake_joints = [torch.zeros((1, 30, 17, 2))]
heatmaps_losses, push_losses, pull_losses = \
loss(fake_outputs, fake_heatmaps, fake_masks, fake_joints)
assert heatmaps_losses == [None]
assert pull_losses == [None]
assert push_losses == [None]
loss_cfg = dict(
type='MultiLossFactory',
num_joints=17,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0])
loss = build_loss(loss_cfg)
heatmaps_losses, push_losses, pull_losses = \
loss(fake_outputs, fake_heatmaps, fake_masks, fake_joints)
assert len(heatmaps_losses) == 1
```
#### File: tests/test_model/test_heatmap_3d_head.py
```python
import numpy as np
import torch
from mmpose.models import HeatMap3DHead
def test_heatmap_3d_head():
"""Test interhand 3d head."""
input_shape = (1, 512, 8, 8)
inputs = torch.rand(input_shape, dtype=torch.float32)
target_heatmap3d = inputs.new_zeros([1, 20, 64, 64, 64])
target_weight = inputs.new_ones([1, 20, 1])
img_metas = [{
'img_shape': (224, 224, 3),
'center': np.array([112, 112]),
'scale': np.array([0.5, 0.5]),
'bbox_score': 1.0,
'bbox_id': 0,
'flip_pairs': [],
'inference_channel': np.arange(17),
'image_file': '<demo>.png',
}]
# test 3D heatmap head
head3d = HeatMap3DHead(
in_channels=512,
out_channels=20 * 64,
depth_size=64,
num_deconv_layers=3,
num_deconv_filters=(256, 256, 256),
num_deconv_kernels=(4, 4, 4),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True),
)
head3d.init_weights()
heatmap3d = head3d(inputs)
assert heatmap3d.shape == torch.Size([1, 20, 64, 64, 64])
loss_3d = head3d.get_loss(heatmap3d, target_heatmap3d, target_weight)
assert 'heatmap_loss' in loss_3d
# test inference model
output = head3d.inference_model(inputs, [(0, 1)])
assert isinstance(output, np.ndarray)
assert output.shape == (1, 20, 64, 64, 64)
# test decode
result = head3d.decode(img_metas, output)
assert 'preds' in result
```
#### File: tests/test_model/test_layer.py
```python
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_upsample_layer
def test_build_upsample_layer():
layer1 = nn.ConvTranspose2d(
in_channels=3,
out_channels=10,
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
bias=False)
layer2 = build_upsample_layer(
dict(type='deconv'),
in_channels=3,
out_channels=10,
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
bias=False)
layer2.load_state_dict(layer1.state_dict())
input_shape = (1, 3, 32, 32)
inputs = _demo_inputs(input_shape)
out1 = layer1(inputs)
out2 = layer2(inputs)
assert torch.equal(out1, out2)
def test_build_conv_layer():
layer1 = nn.Conv2d(
in_channels=3, out_channels=10, kernel_size=3, stride=1, padding=1)
layer2 = build_conv_layer(
cfg=dict(type='Conv2d'),
in_channels=3,
out_channels=10,
kernel_size=3,
stride=1,
padding=1)
layer2.load_state_dict(layer1.state_dict())
input_shape = (1, 3, 32, 32)
inputs = _demo_inputs(input_shape)
out1 = layer1(inputs)
out2 = layer2(inputs)
assert torch.equal(out1, out2)
def _demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
Returns:
Random input tensor with the size of input_shape.
"""
inps = np.random.random(input_shape)
inps = torch.FloatTensor(inps)
return inps
```
#### File: tests/test_model/test_temporal_regression_head.py
```python
import numpy as np
import pytest
import torch
from mmpose.models import TemporalRegressionHead
def test_temporal_regression_head():
"""Test temporal head."""
head = TemporalRegressionHead(
in_channels=1024,
num_joints=17,
loss_keypoint=dict(type='MPJPELoss', use_target_weight=True))
head.init_weights()
with pytest.raises(AssertionError):
# ndim of the input tensor should be 3
input_shape = (1, 1024, 1, 1)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
with pytest.raises(AssertionError):
# size of the last dim should be 1
input_shape = (1, 1024, 3)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
input_shape = (1, 1024, 1)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out.shape == torch.Size([1, 17, 3])
loss = head.get_loss(out, out, torch.ones_like(out))
assert torch.allclose(loss['reg_loss'], torch.tensor(0.))
_ = head.inference_model(inputs)
_ = head.inference_model(inputs, [(0, 1), (2, 3)])
acc = head.get_accuracy(out, out, torch.ones_like(out))
assert acc['mpjpe'] == 0.
np.testing.assert_almost_equal(acc['p_mpjpe'], 0.)
def _demo_inputs(input_shape=(1, 1024, 1)):
"""Create a superset of inputs needed to run head.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 1024, 1).
Returns:
Random input tensor with the size of input_shape.
"""
inps = np.random.random(input_shape)
inps = torch.FloatTensor(inps)
return inps
```
#### File: BlurHPE/tests/test_optimizer.py
```python
import torch
import torch.nn as nn
from mmpose.core import build_optimizers
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.model1 = nn.Conv2d(3, 8, kernel_size=3)
self.model2 = nn.Conv2d(3, 4, kernel_size=3)
def forward(self, x):
return x
def test_build_optimizers():
base_lr = 0.0001
base_wd = 0.0002
momentum = 0.9
# basic config with ExampleModel
optimizer_cfg = dict(
model1=dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum),
model2=dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum))
model = ExampleModel()
optimizers = build_optimizers(model, optimizer_cfg)
param_dict = dict(model.named_parameters())
assert isinstance(optimizers, dict)
for i in range(2):
optimizer = optimizers[f'model{i+1}']
param_groups = optimizer.param_groups[0]
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
assert len(param_groups['params']) == 2
assert torch.equal(param_groups['params'][0],
param_dict[f'model{i+1}.weight'])
assert torch.equal(param_groups['params'][1],
param_dict[f'model{i+1}.bias'])
# basic config with Parallel model
model = torch.nn.DataParallel(ExampleModel())
optimizers = build_optimizers(model, optimizer_cfg)
param_dict = dict(model.named_parameters())
assert isinstance(optimizers, dict)
for i in range(2):
optimizer = optimizers[f'model{i+1}']
param_groups = optimizer.param_groups[0]
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
assert len(param_groups['params']) == 2
assert torch.equal(param_groups['params'][0],
param_dict[f'module.model{i+1}.weight'])
assert torch.equal(param_groups['params'][1],
param_dict[f'module.model{i+1}.bias'])
# basic config with ExampleModel (one optimizer)
optimizer_cfg = dict(
type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
model = ExampleModel()
optimizer = build_optimizers(model, optimizer_cfg)
param_dict = dict(model.named_parameters())
assert isinstance(optimizers, dict)
param_groups = optimizer.param_groups[0]
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
assert len(param_groups['params']) == 4
assert torch.equal(param_groups['params'][0], param_dict['model1.weight'])
assert torch.equal(param_groups['params'][1], param_dict['model1.bias'])
assert torch.equal(param_groups['params'][2], param_dict['model2.weight'])
assert torch.equal(param_groups['params'][3], param_dict['model2.bias'])
# basic config with Parallel model (one optimizer)
model = torch.nn.DataParallel(ExampleModel())
optimizer = build_optimizers(model, optimizer_cfg)
param_dict = dict(model.named_parameters())
assert isinstance(optimizers, dict)
param_groups = optimizer.param_groups[0]
assert isinstance(optimizer, torch.optim.SGD)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['momentum'] == momentum
assert optimizer.defaults['weight_decay'] == base_wd
assert len(param_groups['params']) == 4
assert torch.equal(param_groups['params'][0],
param_dict['module.model1.weight'])
assert torch.equal(param_groups['params'][1],
param_dict['module.model1.bias'])
assert torch.equal(param_groups['params'][2],
param_dict['module.model2.weight'])
assert torch.equal(param_groups['params'][3],
param_dict['module.model2.bias'])
```
#### File: BlurHPE/tests/test_version.py
```python
import mmpose
def test_version():
version = mmpose.__version__
assert isinstance(version, str)
assert isinstance(mmpose.short_version, str)
assert mmpose.short_version in version
```
|
{
"source": "jcwoods/BroLog",
"score": 3
}
|
#### File: jcwoods/BroLog/BroLog.py
```python
import sys
import codecs
import ipaddress as ip
import pandas as pd
from datetime import datetime as dt
class BroLogFile:
def doSeparator(self, fields):
sep = fields[1]
if len(sep) == 1: # a literal?
self.separator = sep
elif sep[:2] == '\\x': # a hexadecimal (ASCII) value?
self.separator = chr(int(sep[2:], 16))
else:
raise ValueError('invalid separator format in log file')
return
def default_transform(self, fields):
ntypes = len(self.field_types)
for fno in range(ntypes):
if fields[fno] == self.unset_field:
fields[fno] = None
continue
elif fields[fno] == self.empty_field:
fields[fno] = ''
continue
elif self.field_types[fno] == 'count' or self.field_types[fno] == 'port':
try:
val = int(fields[fno])
fields[fno] = val
except:
pass
elif self.field_types[fno] == 'interval':
try:
val = float(fields[fno])
fields[fno] = val
except:
pass
#elif self.field_types[fno] == 'addr':
# try:
# ip_addr = ip.ip_address(fields[fno])
# fields[fno] = int(ip_addr)
# except ValueError:
# # IPv6 address? TBD...
# fields[fno] = 0
elif self.field_types[fno] == 'time':
ts = float(fields[fno])
t = dt.fromtimestamp(ts).isoformat()
fields[fno] = t
return
def __init__(self, fname, row_transform = None, row_filter = None):
"""
Crete a new Pandas DataFrame from the given file.
fname is the name of the file to be opened.
row_transform is an (optional) function function which will be applied
to each row as it is read. It may modify the individual column
values, such as by performing integer conversions on exptected
numeric fields. This function does not return a value.
row_filter is an (optional) function which will be used to test each
input row. It is executed after row_transform (if one exists),
and must return a boolean value. If True, the row will be
included in the result. If False, the row will be suppressed.
May generate an exception if the file could not be opened or if an
invalid format is found in the separator value.
"""
self.row_transform = row_transform
self.row_filter = row_filter
self.field_names = []
self.field_types = []
self.empty_field = '(empty)'
self.unset_field = '-'
self.set_separator = ','
self.separator = ' '
self.rows = []
self.field_map = None
#f = file(fname, 'r')
f = codecs.open(fname, 'r', encoding = 'utf-8')
line = f.readline()
while line[0] == '#':
fields = line[1:].strip().split(self.separator)
if fields[0] == 'separator':
self.doSeparator(fields)
elif fields[0] == 'empty_field':
self.empty_field = fields[1]
elif fields[0] == 'unset_field':
self.unset_field = fields[1]
elif fields[0] == 'fields':
self.field_names = fields[1:]
elif fields[0] == 'types':
self.field_types = fields[1:]
line = f.readline()
for line in f:
if line[0] == '#': continue
fields = line.rstrip("\r\n").split(self.separator)
if self.row_transform is not None:
self.row_transform(fields)
else:
self.default_transform(fields)
if self.row_filter is not None:
if self.row_filter(fields, self.field_types, self.field_names) is False: continue
self.rows.append(fields)
return
def asDataFrame(self):
df = pd.DataFrame(self.rows, columns = self.field_names)
return df
def __len__(self):
return len(self.rows)
def conn_filter(fields, types, names):
return fields[6] == 'tcp'
def main(argv):
con = BroLogFile(argv[1])
#for n in range(10):
# print(con.rows[n])
df = con.asDataFrame()
print(df.head(10))
print(df.describe())
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
```
|
{
"source": "jcwoods/datagen",
"score": 3
}
|
#### File: datagen/datagen/phonegen.py
```python
import random
import sys
from datagen.entitygenerator import EntityElement, SimpleElement
class PhoneElement(SimpleElement):
def __init__(self, formatted=False, **kwargs):
SimpleElement.__init__(self, **kwargs)
self.formatted = formatted
return
# Rules taken from "Modern plan" found at:
# https://en.wikipedia.org/wiki/North_American_Numbering_Plan
#
# Stated plainly:
# 1. There are three sections to a phone number, NPA (area code), NXX
# (exchange), and XXXX (line number).
# 2. For the NPA:
# a. three digits
# b. 2-9 for first digit, 0-8 for second, and 0-9 for third digits
# (middle digit may not be a '9', which would be a trunk prefix)
# d. when 2nd and 3rd digits are the same, it's classified an ERC
# which, while not invalid, we will choose to avoid.
# 3. For the NXX:
# a. three digits
# b. [2-9] for first digit and [0-9] for second and third digits
# c. second and third digits may not both be '1'
# d. 555 should generally be avoided (used for informational or
# fictional numbers)
# e. 958/959 (testing) and 950/976 (service) should be avoided.
# f. should not match the NPA.
def create(self, **kwargs):
while True:
npai = int(random.random() * 800) + 200
npa = '{0:03d}'.format(npai)
if npa[1] != npa[2] and npa[1] != 9: break
while True:
nxxi = int(random.random() * 800) + 200
nxx = '{0:03d}'.format(nxxi)
if nxx[1:] != '11' and \
nxx not in [ '555', '958', '959', '950', '976' ]:
continue
break
linei = int(random.random() * 10000)
line = '{0:04d}'.format(linei)
if not self.formatted:
p = npa + nxx + line
else:
p = '-'.join((npa,nxx,line))
return p
def main(argv):
phone = PhoneElement(formatted=True)
for n in range(10):
print(phone.create())
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
```
|
{
"source": "jcwoods/DataRake",
"score": 3
}
|
#### File: DataRake/datarake/__init__.py
```python
import argparse
import base64
import csv
import hashlib
import json
import math
import os
import re
import sys
from collections import Counter, OrderedDict
'''
A forensics tool which scans a directory structure for elements such as:
- host names (matching specified domains)
- full URLs/URIs
- email addresses
- keywords (eg, "username", "pw", "authTok", etc)
- key patterns (eg, "Basic <base64 encoded data>", URI encodings, etc.)
We should be allowed to limit searches of files either by:
- whitelist (only authorized file extensions)
- blacklist (exclude unauthorized file extensions)
Eventually, we should look into decoding base64 or URI encoded data and
returning the content rather than the encoded data.
Report output should include:
- type of match
- location data (file name, line number)
- matched vaule (sensitive data), including offset and length within line
- matched context, including offset and length within line
Assumptions:
- input files are UTF-8
- lines are delim by '\n'
- a pattern must exist entirely on a single line
- multiple matches of any type may occur within a line. We want them all.
'''
# Rakes are added to RakeSets. RakeSets are applied to files.
# There are two types of Rakes: File context and Content.
#
# A file context Rake will be executed ONCE for each file. It will evaluate
# properties of the file, such as path, name, or extension.
#
# A content Rake will be executed once for each line in a file. If a file
# is found to contain non-text (specifically, non-UTF8) data, processing will
# be aborted immediately.
#
# While the two types of Rakes may be run independently, the RakeSet
# orchestrates the running of a large number of rakes (of both types) on a
# single file.
#
# How filtering works: Any Rake may add a 'filter' method accepting a
# RakeMatch argument as a single parameter. This method will be called
# (in RakeSet.match()) after the Rake has been executed but before the
# result is added to the output result set. A default filter() method has
# been added to the top level Rake class which will pass all matches if no
# specific filter is added to a rake.
#
# The filter() method will have access to all fields in the RakeMatch to
# use to determine whether the result should be filtered or not. If the match
# should be filtered, the method should return False (this is consistent with
# the expectation of the python built-in filter() method). Otherwise, the
# method should return True (the match is kept).
#
# Note that in the RakePattern class that the match groups from the re.findall
# call are preserved. THis provides pre-parsed fields beyond what is
# available in the RakeMatch value and context fields without having to re-
# parse the text.
# Some neat ideas which might get implemented one day:
# TODO - look inside archive files (.zip, .tgz)?
# TODO - search for passwords in XML
# Design changes
# TODO - all rakes have severity, type, and desc, move to the Rake class.
# Forward declaration of RakeMatch to resolve circular dependency between
# Rake and RakeMatch.
class RakeMatch:
pass
class DirectoryWalker:
def __init__(self, path:str=".", blacklist=None, verbose:bool=False):
'''
path is the path to be traversed.
blacklist is a list of DIRECTORIES to be excluded. By default, source
control directories (.svn, .git) will be used.
'''
if blacklist is None:
blacklist = [ '.svn', '.git', '__pycache__' ]
self.blacklist = blacklist
self.basepath = path
self.verbose = verbose
return
def __iter__(self):
self.w = os.walk(self.basepath)
self.t = None # current tuple (from os.walk).
self.i = 0 # index into file list
return self
def __next__(self):
while self.t is None:
t = self.w.__next__()
# apply blacklist to directories prior to recursion
t[1][:] = [d for d in t[1] if d not in self.blacklist]
if len(t[2]) == 0:
continue
self.i = 0
self.t = t
t = self.t
i = self.i
self.i += 1
if self.i >= len(self.t[2]):
self.t = None
path = t[0]
fnam = t[2][i]
# determine file extension, if any
parts = fnam.split(".")
ext = parts[-1] if len(parts) > 1 else None
# TODO: should this be a top-level RakeContext class?
context = { "basepath": self.basepath,
"path": path,
"filename": fnam,
"fullpath": os.path.join(path, fnam),
"filetype": ext }
# TODO: possibly add size, mode, date to context. They're not
# wanted/needed now, so we're not going to waste the iops.
if self.verbose:
print("* New context: " + str(context), file=sys.stderr)
return context
class Rake(object):
'''
A Rake is an abstract "issue finder". Its subclasses do all of the real
work. When applied or executed, it creates RakeMatch objects. Rake
objects are grouped in RakeSet collections when many Rakes will be
applied repeatedly.
'''
# some common values used in Rake filters
common_usernames = [ 'username', 'usern', 'user' ]
common_passwords = [ 'password', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>' ]
def __init__(self, ptype:str, pdesc:str, severity:str, part:str='content'):
if part not in ['content', 'filemeta']:
raise RuntimeError(f"Invalid part in Rake initializer: {part}")
self.name = self.__class__.__name__
self.ptype = ptype # rake type (password, token, private key, etc)
self.pdesc = pdesc # long(er) description of rake
self.severity = severity # finding severity
self.part = part # where is rake applied? (content, filemeta, etc.)
return
def __str__(self):
return f"<Rake({self.name}, {self.ptype}, {self.part})>"
@staticmethod
def relPath(basepath:str, fullpath:str):
bplen = len(basepath)
relpath = fullpath[bplen:]
while relpath[0] == '/':
relpath = relpath[1:]
return relpath
def filter(self, m:RakeMatch):
'''
A generic filter method. If filter() returns false (eg, a match should
be filtered), the result will not be added to the result set.
Note that this is only a fail-safe, and that filters must be
implemented within the context of a specific Rake-type. The 'm'
(match) parameter may be of different types and must be interpreted
differently.
'''
return True
class RakeFileMeta(Rake):
'''
Creates RakeMatch based on file metadata (name, path, extension) rather
than content.
If self.all_required is set to true, all patterns which have been
defined MUST return true. Otherwise, we will return a positive
result when the first match is made.
Being a Rake applied to context (once per file rather than once per line),
the match() method associated with a RakeFileMeta-based class must return
either a single RakeMatch object or None (no match).
'''
def __init__(self, ptype:str, pdesc:str, severity:str,
path:str=None, # pattern applied to path (dirname)
file:str=None, # pattern applied to file name (basename)
ext:str=None, # pattern applied to file extension
all:str=True, # pattern applied to path
ignorecase:bool=True, # set re.IGNORECASE on regex matching
**kwargs):
Rake.__init__(self, ptype, pdesc, severity, part='filemeta', **kwargs)
f = re.IGNORECASE if ignorecase else 0
self.path_pattern = None if path is None else re.compile(path, flags=f)
self.file_pattern = None if file is None else re.compile(file, flags=f)
self.ext_pattern = None if ext is None else re.compile(ext, flags=f)
self.all_required = all
return
def match(self, context:dict):
path = context.get('path', None)
fnam = context.get('filename', None)
ext = context.get('filetype', None)
full = context.get('fullpath', None)
# create the match up front, just in case!
rm = RakeMatch(self, file=full, line=None)
any_part = False
if self.path_pattern is not None:
pm = self.path_pattern.match(path)
if not self.all_required and pm is not None: return rm
if self.all_required and pm is None: return None
any_part = True
if self.file_pattern is not None:
fm = self.file_pattern.match(fnam)
if not self.all_required and fm is not None: return rm
if self.all_required and fm is None: return None
any_part = True
if self.ext_pattern is not None and ext is not None:
xm = self.ext_pattern.match(ext)
if not self.all_required and xm is not None: return rm
if self.all_required and xm is None: return None
any_part = True
if self.filter(rm): return None
if any_part: return rm
return None
class RakeSSHIdentity(RakeFileMeta):
'''
SSH identity files (eg, "private keys")
'''
def __init__(self):
RakeFileMeta.__init__(self, 'ssh identity file',
'file (likely) containing an ssh private key',
'HIGH',
file=r"^id_(rsa1?|dsa|ecdsa|ed25519)$",
ignorecase=False)
return
class RakeNetrc(RakeFileMeta):
'''
Network credential storage.
'''
def __init__(self):
RakeFileMeta.__init__(self, 'netrc file',
'file containing network credentials',
'HIGH',
file=r".?netrc$",
ext=r"netrc",
all=False)
return
class RakePKIKeyFiles(RakeFileMeta):
'''
Files often related with PKI/X509 keys.
'''
def __init__(self):
RakeFileMeta.__init__(self, 'x509 key file',
'files often related to PKI/X509 (server certificates and/or keys)',
'MEDIUM',
ext=r"^(pem|pfx|p12|p7b|key)$")
return
class RakeKeystoreFiles(RakeFileMeta):
'''
Files often related with Java keystores.
TODO: test default/simple passwords (changeit, changeme, password)
'''
def __init__(self):
RakeFileMeta.__init__(self, 'java keystore',
'patterns in file name are associated with Java keystores',
'MEDIUM',
file=r"^keystore$",
ext=r"^(jks|keystore)$",
all=False)
return
class RakeHtpasswdFiles(RakeFileMeta):
'''
Apache htpasswd files.
'''
def __init__(self):
RakeFileMeta.__init__(self, 'apache htpasswd',
'may contain credentials used to access Apache resources',
'LOW',
file=r"^\.?htpasswd$")
return
class RakePattern(Rake):
'''
This is a basic pattern. It will be compiled into a regex before use in
matching.
Note that the re.findall() method is used rather than a re.search() or
re.match(). This affects the grouping and counting of the groups within
the regex.
Being a Rake applied to content (once per line rather than once per file),
the match() method associated with a RakePattern-based class must return
either a list of matches or an empty list. All of the results returned
in the list will be aggregated and returned as a combined group in RakeSet.
'''
def __init__(self, pattern:str, ptype:str, pdesc:str, severity:str,
ctx_group:int=None, val_group:int=None, ignorecase:bool=True):
'''
pattern is the pattern to be matched in the input text.
ptype is the type of the pattern, supplied by the subclass.
'''
Rake.__init__(self, ptype, pdesc, severity, part='content')
flags = 0
if ignorecase:
flags = re.IGNORECASE
self.pattern = re.compile(pattern, flags=flags)
self.ctx_group = ctx_group # position (group) of context match in output tuple
self.val_group = val_group # position (group) of value match in output tuple
self.regex_filters = list()
return
def addRegexFilter(self, regex:str, ftype:str="value", ignorecase:bool=False):
'''
Adds a pattern which will be used to filter matches later. The
filterType may be used to specify HOW the match is applied (eg, value
or context).
ftype must be one of ["value", "context", "file"] and will specify
which part of the match will be 'matched' by the regex. "value" will
match against the secret value, "context" will match against the
secret context (eg, "password=""<PASSWORD>"""), and "meta" will match
against the file path/name/line etc.
'''
if ftype not in ['value', 'context', 'file']:
raise RuntimeError(f"Invalid filter type: {ftype}")
flags = re.I if ignorecase else 0
r = re.compile(regex, flags=flags)
f = { "type": ftype, "pattern": r, "text": regex }
self.regex_filters.append(f)
return
def match(self, context:dict, text:str):
mset = []
relpath = None
offset = 0
for m in self.pattern.findall(text):
if isinstance(m, tuple):
val = m[self.val_group] if self.val_group is not None else None
ctx = m[self.ctx_group] if self.ctx_group is not None else None
else:
val = m if self.val_group is not None else None
ctx = m if self.ctx_group is not None else None
if relpath is None:
relpath = self.relPath(context['basepath'], context['fullpath'])
rm = RakeMatch(self,
file=relpath,
line=context['lineno'])
val_off = 0
if val is not None:
val_off = text.find(val, offset)
val_len = len(val)
rm.set_value(val, val_off, val_len)
ctx_off = 0
if ctx is not None:
ctx_off = text.find(ctx, offset)
ctx_len = len(ctx)
rm.set_context(ctx, ctx_off, ctx_len)
offset = max(ctx_off, val_off) + 1
rm.match_groups = m # save the groups for later use in filters
rm.full_context = context # save the context for later use in filters
mset.append(rm)
results = filter(self.filter, mset)
return results
def filter(self, m:RakeMatch):
'''
return False if result should be filtered.
'''
# check all filters. A single positive match is enough to return
# False (indicating result should be filtered).
for rf in self.regex_filters:
mf = rf['type'] # match field (to be matched vs. pattern)
try:
val = m.__getattr__(mf)
except AttributeError:
continue
if val is None: continue
if rf['pattern'].match(val): return False
return True
class FiletypeContextRake(Rake):
'''
Manages a set of Rakes, applying each based on file type (extension).
'''
def __init__(self, ptype:str, # type of rake (short desc)
pdesc:str, # description of rake (long desc)
severity:str, # default description (LOW, MEDIUM, HIGH)
blacklist:list=None, # list of file types to skip
**kwargs):
Rake.__init__(self, ptype, pdesc, severity, part='content', **kwargs)
self.blacklist = blacklist if blacklist is not None else []
self.rakes = dict()
return
def addRakePattern(self, filetype:str, rake:RakePattern):
self.rakes[filetype] = rake
return
def match(self, context:dict, text:str):
filetype = context.get("filetype", None)
if filetype in self.blacklist: return []
rake = None
if filetype is not None:
# find a rake matching file type
filetype = filetype.lower()
rake = self.rakes.get(filetype, None)
if rake is None:
# is there a default rake?
rake = self.rakes.get(None, None)
if rake is None:
# no default, empty match set
return []
mset = rake.match(context, text)
return filter(self.filter, mset)
class RakeToken(FiletypeContextRake):
'''
Detect tokens embedded in code or configurations. This is context
sensitive (based on file type).
'''
def __init__(self, minlength:int=6, **kwargs):
FiletypeContextRake.__init__(self, 'token',
'possible token, authentication key, or similar',
'HIGH',
blacklist=['htm', 'html'],
**kwargs)
self.minlength = minlength
# add the default pattern (no other match)
r = r"(([\"']?)[a-z0-9_]{0,32}tok(en)?(\2)[ \t]*[=:][ \t]*(['\"]?)([\x21\x23-\x26\x28-\x7e]{" + str(minlength) + r",})(\5))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=5)
rake.addRegexFilter(r'\$\{[a-z_]+\}["\'`]?[.,;]?$', ignorecase=True) # remove ${VARIABLE} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'\{\{\s*[a-z_.]+\s*\}\}["\'`]?[.,;]?$', ignorecase=True) # remove {{TEMPLATE}} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'\$[a-z_]+\$?$', ignorecase=True) # remove $VARIABLE hits
rake.addRegexFilter(r'("?)[pP]ublicKeyToken(\1)\s*=', ftype='context', ignorecase=False) # common in csproj/resx/csproj files
self.addRakePattern(None, rake)
# c, c++, java
r = r'([a-z0-9_]{0,32}tok(en)?[ \t]*=[ \t]*"([\x21\x23-\x26\x28-\x7e]{' + str(minlength) + r',})")'
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=2)
self.addRakePattern("c", rake)
self.addRakePattern("h", rake)
self.addRakePattern("cc", rake)
self.addRakePattern("cpp", rake)
self.addRakePattern("hpp", rake)
self.addRakePattern("cs", rake)
self.addRakePattern("groovy", rake)
self.addRakePattern("java", rake)
# js, ts, py
r = r"([a-z0-9_]{0,32}tok(en)?[ \t]*=[ \t]*(['\"])([\x21\x23-\x26\x28-\x7e]{" + str(minlength) + r",})(\3))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=3)
self.addRakePattern("js", rake)
self.addRakePattern("ts", rake)
self.addRakePattern("py", rake)
# yaml, yml
r = r"([a-z0-9_]{0,32}tok(en)?[ \t]*:[ \t]*(['\"]?)([\x21\x23-\x26\x28-\x7e]{" + str(minlength) + r",})(\3))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=3)
self.addRakePattern("yaml", rake)
self.addRakePattern("yml", rake)
# shell, ini
r = r"([a-z0-9_]{0,32}tok(en)?[ \t]*=[ \t]*(['\"]?)([^\$][\x21\x23-\x26\x28-\x7e]{" + str(minlength-1) + r",})(\3))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=3)
self.addRakePattern("sh", rake)
self.addRakePattern("ini", rake)
# json
r = r"(\"[a-z0-9_]{0,32}tok(en)?\"[ \t]*:[ \t]*\"([\x21\x23-\x26\x28-\x7e]{" + str(minlength) + r",})\")"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=2)
rake.addRegexFilter(r'\$\{[a-z_]+\}["\'`]?[.,;]?$', ignorecase=True) # remove ${VARIABLE} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'#\{[0-9a-z_\.]+\}["\'`]?[.,;]?$', ignorecase=True) # remove #{TEMPLATE} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'\{\{\s*[a-z_]+\s*\}\}["\'`]?[.,;]?$', ignorecase=True) # remove {{TEMPLATE}} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'.*package-lock\.json$', ftype='file') # ignore anything in package-lock.json
self.addRakePattern("json", rake)
# cfm, cfc (ColdFusion)
r = r"([a-z0-9_]{0,32}tok(en)\s*[=:]\s*(\"?)([\x21\x23-\x26\x28-\x7e]{6,})(\4))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=3)
rake.addRegexFilter(r'#[\x21\x23-\x26\x28-\x7e]+#', ignorecase=True) # remove #TEMPLATE# hits
self.addRakePattern("cfm", rake)
self.addRakePattern("cfc", rake)
return
def filter(self, m:RakeMatch=None):
return True
class RakePassword(FiletypeContextRake):
'''
Detect passwords embedded in code or configurations. This is context
sensitive (based on file type).
TODO: support php "key => value" syntax
'''
def __init__(self, minlength:int=6, **kwargs):
FiletypeContextRake.__init__(self, 'password',
'possible plaintext password',
'HIGH',
blacklist=['htm', 'html'],
**kwargs)
self.minlength = minlength
# add the default pattern (no other match)
r = r"(([\"']?)[a-z0-9]{0,32}pass(w(ord)?)?(\2)[ \t]*[=:][ \t]*(['\"]?)([\x21\x23-\x26\x28-\x7e]{" + str(minlength) + r",})(\6))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=6)
rake.addRegexFilter(r'\$\{[a-z_]+\}["\'`]?[.,;]?$', ignorecase=True) # remove ${VARIABLE} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'\{\{\s*[0-9a-z_.]+\s*\}\}["\'`]?[.,;]?$', ignorecase=True) # remove {{TEMPLATE}} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'\$[a-z_]+\$?$', ignorecase=True) # remove $VARIABLE hits
self.addRakePattern(None, rake)
# c, c++, java
r = r'([a-z0-9_]{0,32}pass(w(ord)?)?[ \t]*=[ \t]*"([\x21\x23-\x26\x28-\x7e]{' + str(minlength) + r',})")'
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=3)
self.addRakePattern("c", rake)
self.addRakePattern("h", rake)
self.addRakePattern("cc", rake)
self.addRakePattern("cpp", rake)
self.addRakePattern("hpp", rake)
self.addRakePattern("cs", rake)
self.addRakePattern("groovy", rake)
self.addRakePattern("java", rake)
# js, ts, py
r = r"([a-z0-9_]{0,32}pass(w(ord)?)?[ \t]*=[ \t]*(['\"])([\x21\x23-\x26\x28-\x7e]{" + str(minlength) + r",})(\4))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=4)
self.addRakePattern("js", rake)
self.addRakePattern("ts", rake)
self.addRakePattern("py", rake)
# yaml, yml
r = r"([a-z0-9_]{0,32}pass(w(ord)?)?[ \t]*:[ \t]*(['\"]?)([\x21\x23-\x26\x28-\x7e]{" + str(minlength) + r",})(\4))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=4)
rake.addRegexFilter(r'\{\{\s*[a-z_]+\s*\}\}$', ignorecase=True) # remove {{TEMPLATE}} hits
self.addRakePattern("yaml", rake)
self.addRakePattern("yml", rake)
# shell, ini
r = r"([a-z0-9_]{0,32}pass(w(ord)?)?[ \t]*=[ \t]*(['\"]?)([^\$][\x21\x23-\x26\x28-\x7e]{" + str(minlength-1) + r",})(\4))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=4)
self.addRakePattern("sh", rake)
self.addRakePattern("ini", rake)
# json
r = r"(\"[a-z0-9_]{0,32}pass(w(ord)?)?\"[ \t]*:[ \t]*\"([\x21\x23-\x26\x28-\x7e]{" + str(minlength) + r",})\")"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=3)
rake.addRegexFilter(r'\$\{[a-z_]+\}["\'`]?[.,;]?$', ignorecase=True) # remove ${VARIABLE} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'#\{[a-z_]+\}["\'`]?[.,;]?$', ignorecase=True) # remove #{TEMPLATE} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'\{\{\s*[a-z_]+\s*\}\}["\'`]?[.,;]?$', ignorecase=True) # remove {{TEMPLATE}} hits (w/ optional trailing punctuation)
rake.addRegexFilter(r'.*package-lock\.json$', ftype='file') # ignore anything in package-lock.json
self.addRakePattern("json", rake)
# cfm, cfc (ColdFusion)
r = r"([a-z0-9_]{0,32}pass(w(ord))\s*[=:]\s*(\"?)([\x21\x23-\x26\x28-\x7e]{6,})(\4))"
rake = RakePattern(r, self.ptype, self.pdesc, self.severity, ctx_group=0, val_group=4)
rake.addRegexFilter(r'#[\x21\x23-\x26\x28-\x7e]+#', ignorecase=True) # remove #TEMPLATE# hits
self.addRakePattern("cfm", rake)
self.addRakePattern("cfc", rake)
return
def filter(self, m:RakeMatch):
val = m.value
if val is None: return True
if val.lower() in Rake.common_passwords: return False
return super().filter(m)
class RakeSet(object):
'''
A wrapper (list) of RakePattern objects. Each pattern in this list will
be evaluated against each line of input text.
'''
def __init__(self, verbose:bool=False):
self.content_rakes = list()
self.meta_rakes = list()
self.verbose = verbose
# metrics for this rake set
self.total_files = 0
self.total_lines = 0
self.total_hits = 0
self.total_size = 0
return
def add(self, rake:Rake):
if self.verbose:
print("* Adding new Rake: " + str(rake), file=sys.stderr)
if rake.part == 'filemeta':
self.meta_rakes.append(rake)
return
if rake.part == 'content':
self.content_rakes.append(rake)
return
raise RuntimeError("Unknown rake type")
def match_context(self, context:dict):
hits = list()
for rake in self.meta_rakes:
if rake.match(context):
rm = RakeMatch(rake,
file=Rake.relPath(context['basepath'],
context['fullpath']),
line=None)
if rake.filter(rm) is False: continue
hits.append(rm)
return hits
def match_content(self, context, text:str):
matches = []
for rake in self.content_rakes:
if self.verbose: print(f"using rake: {rake} at {context}: {text}")
mset = rake.match(context, text)
for m in mset:
if rake.filter(m) is False: continue
matches.append(m)
return matches
def match(self, context:dict, blacklist=None):
if blacklist is None:
blacklist = [".exe", ".dll", ".jpg", ".jpeg", ".png", ".gif", ".bmp",
".tiff", ".zip", ".doc", ".docx", ".xls", ".xlsx",
".pdf", ".tar", ".tgz", ".gz", ".tar.gz",
".jar", ".war", "ear", ".class", ".css" ]
if self.verbose:
print(f"* New context: {str(context)}", file=sys.stderr)
path = context.get("path", None)
filename = context.get("filename", None)
context['lineno'] = None
findings = list()
if path is None or filename is None:
# log an error here
if self.verbose:
print("* Context is invalid?", file=sys.stderr)
return findings
fullpath = context.get("fullpath", None)
if fullpath is None:
fullpath = os.path.join(path, filename)
for ext in blacklist:
# log message here
if ext == filename[-len(ext):].lower():
if self.verbose:
print(f"* File matches blacklisted extension: {ext}", file=sys.stderr)
return findings
if self.verbose:
print("* Applying context Rakes", file=sys.stderr)
context_hits = self.match_context(context)
if len(context_hits) > 0:
findings.extend(context_hits)
try:
fd = open(fullpath, encoding="utf-8")
except FileNotFoundError:
if self.verbose:
print(f"* Unable to open file: {fullpath}", file=sys.stderr)
return findings
if self.verbose:
print(f"* Applying content Rakes", file=sys.stderr)
try:
lineno = 1
for line in fd:
if self.verbose and lineno % 100 == 0:
print(f"* {lineno} lines processed ({fullpath})", file=sys.stderr)
context['lineno'] = lineno
hits = self.match_content(context, line)
findings.extend(hits)
lineno += 1
except UnicodeDecodeError:
# simply can't process this file due to encoding -- skip it.
lineno = 0
fd.close()
self.total_files += 1
self.total_lines += lineno
self.total_hits += len(findings)
try:
self.total_size += os.stat(fullpath).st_size
except (FileNotFoundError, PermissionError):
pass
return findings
class RakeMatch(object):
'''
Metadata used along with matches and match sets recording where the match
came from. Offset will be measured in characters, not bytes (for UTF-8).
An offset of 1 is the first column of the line.
'''
# list of fields to be included (or not included) in output. See
fields = OrderedDict((('file', True),
('line', True),
('label', True),
('severity', True),
('description', True),
('value_offset', True),
('value_length', True),
('value', True),
('context_offset', True),
('context_length', True),
('context', True)))
_secure = False
_disable_context = False
_disable_value = False
_has_been_read = False
def __init__(self, rake:Rake, file:str=None, line:int=0):
self._label = rake.ptype
self._description = rake.pdesc
self._severity = rake.severity
self._file = file
self._line = line
# these will be set by set_value() and set_context()
self._value = None
self._context = None
return
def secureContext(self):
'''
Produce a hash which can be use to (reasonably) securely track a
secret if it moves within a file. The hash will consist of the file
name, length of the context, and the literal value of the context.
'''
ctx = self._context[2]
if ctx is None:
return ""
md5 = hashlib.md5()
md5.update(self._file.encode('utf-8'))
md5.update(bytes([0x00]))
md5.update(ctx.encode('utf-8')) # context value
return md5.hexdigest()
def __eq__(self, match):
if self._value is None and match._value is not None: return False
if self._value is not None and match._value is None: return False
if self._value is not None and match._value is not None:
if self._value[0] != match._value[0]: return False # offset
if self._value[1] != match._value[1]: return False # length
if self._value[2] != match._value[2]: return False # value
if self._context is None and match._context is not None: return False
if self._context is not None and match._context is None: return False
if self._context is not None and match._context is not None:
if self._context[0] != match._context[0]: return False # offset
if self._context[1] != match._context[1]: return False # length
if self._context[2] != match._context[2]: return False # value
if self._label != match._label: return False
if self._description != match._description: return False
if self._severity != match._severity: return False
if self._file != match._file: return False
if self._line != match._line: return False
return True
def __getattr__(self, k):
RakeMatch._has_been_read = True
if k in RakeMatch.fields.keys():
if k == 'file': return self._file
if k == 'line': return self._line
if k == 'label': return self._label
if k == 'severity': return self._severity
if k == 'description': return self._description
if k == 'value_offset':
if self._value is None: return None
return self._value[0]
if k == 'value_length':
if self._value is None: return None
return self._value[1]
if k == 'value':
if RakeMatch._secure or self._value is None: return None
return self._value[2]
if k == 'context_offset':
if self._context is None: return None
return self._context[0]
if k == 'context_length':
if self._context is None: return None
return self._context[1]
if k == 'context':
if self._context is None: return None
if RakeMatch._secure: return self.secureContext()
return self._context[2]
if k == 'external_id':
i = "\u001e".join(map(lambda x: str(x), self.aslist())) # \u001e is information (field) separator
return hashlib.md5(i.encode('utf-8')).hexdigest()
raise KeyError(f"Invalid key for RakeMatch: {k}")
@staticmethod
def csv_header():
RakeMatch._has_been_read = True
fields = []
for f in RakeMatch.fields.keys():
if RakeMatch.fields[f]:
fields.append(f)
return fields
@staticmethod
def set_secure():
if RakeMatch._has_been_read:
raise RuntimeError("must not modify RakeMatch structure after read")
RakeMatch._secure = True
RakeMatch.fields['context'] = True
RakeMatch.fields['value'] = False
return
@staticmethod
def disable_context():
if RakeMatch._has_been_read:
raise RuntimeError("must not modify RakeMatch structure after read")
RakeMatch._disable_context = True
RakeMatch.fields['context_offset'] = False
RakeMatch.fields['context_length'] = False
RakeMatch.fields['context'] = False
return
@staticmethod
def disable_value():
if RakeMatch._has_been_read:
raise RuntimeError("must not modify RakeMatch structure after read")
RakeMatch._disable_value = True
RakeMatch.fields['value_offset'] = False
RakeMatch.fields['value_length'] = False
RakeMatch.fields['value'] = False
return
def set_value(self, value:str=None, offset:int=None, length:int=None):
if length is None:
length = len(value)
self._value = (offset, length, value)
return
def set_context(self, value:str=None, offset:int=None, length:int=None):
if length is None:
self._length = len(value)
self._context = (offset, length, value)
return
def __str__(self):
RakeMatch._has_been_read = True
return "|".join(map(lambda x: str(x), self.aslist()))
def aslist(self):
RakeMatch._has_been_read = True
outp = []
if RakeMatch.fields['file']: outp.append(self.file)
if RakeMatch.fields['line']: outp.append(self.line)
if RakeMatch.fields['label']: outp.append(self.label)
if RakeMatch.fields['severity']: outp.append(self.severity)
if RakeMatch.fields['description']: outp.append(self.description)
if RakeMatch.fields['value_offset']: outp.append(self.value_offset)
if RakeMatch.fields['value_length']: outp.append(self.value_length)
val = self.value if not RakeMatch._secure else None
if RakeMatch.fields['value']: outp.append(val)
if RakeMatch.fields['context_offset']: outp.append(self.context_offset)
if RakeMatch.fields['context_length']: outp.append(self.context_length)
ctx = self.context if not RakeMatch._disable_context else None
if RakeMatch.fields['context']: outp.append(ctx)
return outp
def asdict(self):
RakeMatch._has_been_read = True
d = { "path": self.file,
"line": int(self.line) if self.line is not None else None,
"type": self.label,
"description": self.description,
"severity": self.severity }
if not RakeMatch._disable_context:
d['context'] = { "value": self.context,
"offset": self.context_offset,
"length": self.context_length }
if not RakeMatch._disable_value:
d['value'] = { "value": self.value if not RakeMatch._secure else None,
"offset": self.value_offset,
"length": self.value_length }
return d
class RakeHostname(RakePattern):
'''
A RakeHostname acts as a 'root', meaning that it will match any valid hosts
in the domain which share the root value. For example, root="abc.com"
will match not only "abc.com", but also "xyz.abc.com" and
"foo.xyz.abc.com".
A domain name may include A-Z, 0-9, and '-'. The '-' may not appear at
the beginning or end of the name. A hostname must be less than 255
characters in length, and no individual component of the hostname can
exceed 63 characters.
Any number of subdomains (equal to or beyond the depth inherent in the
root) are supported.
'''
# a list of TLDs for hostname checks (these account for more than 99% of
# hosts on the internet)
TLDs = [ 'au', 'br', 'cn', 'com', 'de', 'edu', 'gov', 'in', 'info', 'ir',
'mil', 'net', 'nl', 'org', 'ru', 'tk', 'top', 'uk', 'xyz' ]
def __init__(self, domain:str=None, **kwargs):
if domain is not None:
d = re.escape(domain)
r = r'\b(([a-z1-9\-]{1,63}\.)+' + d + r')\b'
else:
# going to make an arbitrary call here... domain must be 2 or
# more "parts". A name will need to be "host.d1.d2", We'll miss
# things like "localhost.localdomain" but that should be
# acceptable since we're not picking up 'a.b'-type symbols. If
# you don't like this, change the "{2,}" below to a simple "+".
r = r'\b([a-z1-9\-]{1,63}(\.[a-z1-9\-]{1,63}){2,6})\b'
rdesc = 'a hostname (possible information disclosure)'
if domain is not None:
rdesc += f" matching domain '{domain}'"
RakePattern.__init__(self, r,
'hostname',
rdesc,
"LOW",
ctx_group=0,
val_group=0,
**kwargs)
return
@staticmethod
def isValidHostname(fqdn:str, minparts:int=3):
# length of FQDN must be <= 255
l = len(fqdn)
if l < 2 or l > 255: return False
labels = fqdn.split(".")
if len(labels) < minparts: return False
# last label must be a valid TLD (we'll default to "common" here!)
if labels[-1].lower() not in RakeHostname.TLDs:
return False
# each individual
for label in labels:
if len(label) > 63: return False
return True
def filter(self, m:RakeMatch):
fqdn = m.value
if not RakeHostname.isValidHostname(fqdn):
return False
return super().filter(m)
class RakeURL(RakePattern):
'''
Detect URLs containing basic auth credentials.
'''
def __init__(self, **kwargs):
'''
a very crude pattern to match URLs, roughly of the pattern:
xxx://user:[email protected]:ppp/zzz+
'''
r = r'\b([a-z]{2,8}://(([a-z0-9%+/=\-]+):([a-z0-9%+/=\-]+))@([A-Z0-9_-]{1,63}(\.[A-Z0-9_-]{1,63}){1,6})(:\d{1,5})?(/(\S*)?)?)\b'
RakePattern.__init__(self,
r,
'auth url',
'URL containing credentials (basic auth)',
'HIGH',
ctx_group=0, val_group=3, **kwargs)
return
def filter(self, m:RakeMatch=None):
'''
If this method returns False, the match (m) will be suppressed. See
the filter() method on the Rake class for more information.
'''
if m is None: return True
groups = m.match_groups
usern = groups[2]
passw = groups[3]
host = groups[4]
if usern.lower() in Rake.common_usernames and \
passw.lower() in Rake.common_passwords:
return False
dparts = host.lower().split('.')
if len(dparts) >= 2 and \
dparts[-2] in ['example', 'host', 'hostname', 'domain', 'domainname'] and \
dparts[-1] in ['org', 'com', 'net']: return False
return super().filter(m)
class RakeEmail(RakePattern):
'''
Detect email addresses. If domain is not None, the domain associated with
the email account must match the specified domain.
'''
def __init__(self, domain:str=None, **kwargs):
if domain is not None:
d = re.escape(domain)
r = r'([a-zA-Z1-9_.\-]{1,63}@' + d + r')'
else:
r = r'([a-zA-Z0-9_.\-]{1,63}@[A-Za-z0-9_\-]{1,63}(\.[A-Za-z0-9_\-]{1,63}){1,6})'
rdesc = 'an email address (possible information disclosure)'
if domain is not None:
rdesc += f" matching domain '{domain}'"
RakePattern.__init__(self, r,
'email',
rdesc,
'LOW',
ctx_group=0, val_group=0,
**kwargs)
return
def filter(self, m:RakeMatch):
email = m.value
try:
user, host = email.split("@")
except ValueError:
return False
if not RakeHostname.isValidHostname(host, minparts=2):
return False
return super().filter(m)
class RakePrivateKey(RakePattern):
'''
Find PEM headers for private key files (SSH, X.509, etc). One of:
-----BEGIN PRIVATE KEY-----
-----BEGIN RSA PRIVATE KEY-----
-----BEGIN DSA PRIVATE KEY-----
-----BEGIN EC PRIVATE KEY-----
-----BEGIN OPENSSH PRIVATE KEY-----
TODO: certificates are (often) base64-encoded DER. Can we
specifically detect a private key based on the DER?
'''
def __init__(self, **kwargs):
kp = r'^(-----BEGIN ([A-Z0-9]{2,} )?PRIVATE KEY-----)$'
RakePattern.__init__(self, kp,
'private key',
'header indicating presence of a private key in a PEM format',
'HIGH',
ctx_group=0, ignorecase=False, **kwargs)
return
class RakeBearerAuth(RakePattern):
'''
Find likely Bearer auth tokens (as used in HTTP headers). Eg,
Authorization: Bearer <PASSWORD>
'''
def __init__(self, **kwargs):
kp = r'(Bearer\s+([^\'"]\S{7,}))'
RakePattern.__init__(self, kp,
'auth bearer',
'possible value used with an Authorization: header',
'HIGH',
ctx_group=0, val_group=1, ignorecase=False, **kwargs)
self.addRegexFilter(r'\$\{[a-z_]+\}["\'`]?[.,;]?$', ignorecase=True) # remove ${VARIABLE} hits (w/ optional trailing punctuation)
self.addRegexFilter(r'\{\{\s*[a-z_]+\s*\}\}["\'`]?[.,;]?$', ignorecase=True) # remove {{TEMPLATE}} hits (w/ optional trailing punctuation)
self.addRegexFilter(r'\$[a-z_]+\$?$', ignorecase=True) # remove $VARIABLE hits
return
class RakeBasicAuth(RakePattern):
'''
Find likely Basic auth tokens (as used in HTTP headers). Eg,
Authorization: Basic dXNlcjpwYXNzd29yZAo=
Note that we use a minimum (practical) length of 16 when matching
base64 data patterns. If a potential base64-encoded value is found,
we will decode it and make sure we have a ':' somewhere in the string
as a minimal check.
'''
def __init__(self, minlen:int=16, encoding:str='utf-8', **kwargs):
kp = r'(Basic ([A-Za-z0-9+/]{'+ str(minlen) + r',}={0,8}))$'
RakePattern.__init__(self, kp,
'auth basic',
'possible value used with an Authorization: header',
'HIGH',
ctx_group=0, val_group=1, ignorecase=False, **kwargs)
self.encoding = encoding
return
def match(self, context:dict, text:str):
mset = []
for match in self.pattern.findall(text):
# we may or may not have something juicy... let's attempt to
# decode it and see if it checks out!
try:
encoded = match[1] # skip leading "Basic " label
val = base64.b64decode(encoded, validate=True).decode(self.encoding).strip()
except Exception:
# not base64 means this probably isn't Basic auth
continue
# does it smell like basic auth? (user:pass)
if not val.isprintable() or val.find(":") < 1:
continue
m = RakeMatch(self,
file=self.relPath(context['basepath'], context['fullpath']),
line=context['lineno'])
m.set_context(match[0], offset=0, length=len(match[0]))
m.set_value(value=match[1], offset=6, length=len(match[1]))
mset.append(m)
return filter(self.filter, mset)
class RakeJWTAuth(RakePattern):
'''
Find likely JWT tokens. Eg,
<KEY>
This is three sections of data, formatted: header.payload.signature
The header and payload must be base64-encoded JSON. We assume that the
third section is either the signature or is non-standard, so we will make
no attempt to decode or otherise validate it.
Also note that we use a minimum (practical) length of 24 when matching
base64 data patterns. In reality, it would be difficult to encode a
header or payload in this length, but it serves as an effective filter.
JWT tokens are not supposed to include sensitive data, but they might
still have been generated on a server and saved for use in later
authorizations. This STORAGE of JWT is dangerous and should be flagged.
'''
def __init__(self, encoding:str='utf-8', **kwargs):
kp = r'\b(([A-Za-z0-9+/]{24,}={0,2})\.([A-Za-z0-9+/]{24,}={0,2})\.([A-Za-z0-9+/_-]{24,}={0,2}))\b'
RakePattern.__init__(self, kp,
'auth jwt',
'possible JavaScript web token',
'MEDIUM',
ignorecase=False, **kwargs)
self.encoding = encoding
return
def match(self, context:dict, text:str):
mset = []
relfile = None
for m in self.pattern.findall(text):
# we may or may not have something juicy... let's attempt to
# decode it and see if it checks out!
if self.encoding is not None:
try:
# all we care is that the base64 and JSON decode works on
# the first two parts of the token. If either fail, this
# isn't JWT.
for st in [ m[1], m[2] ]: # st is subtoken
npad = len(st) % 4 # npad is num padding (req'd)
st = st + ("=" * npad)
td = base64.b64decode(st).decode('utf-8') # td is token data
json.loads(td)
except Exception:
continue
token = m[0]
if relfile is None:
relfile = self.relPath(context['basepath'], context['fullpath'])
rm = RakeMatch(self,
file=relfile,
line=context['lineno'])
rm.set_value(value=token, length=len(token), offset=text.find(token))
mset.append(rm)
return filter(self.filter, mset)
class RakeAWSHMACAuth(RakePattern):
'''
Find AWS4-HMAC-SHA256 authorization headers, eg:
Authorization: AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, SignedHeaders=host;range;x-amz-date, Signature=fe5f80f77d5fa3beca038a248ff027d0445342fe2855ddc963176630326f1024
See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
'''
def __init__(self, **kwargs):
kp = r'(AWS-HMAC-SHA256 (.+))$'
RakePattern.__init__(self, kp,
'auth aws-hmac-sha256',
'possible AWS HMAC-SHA256 authorization key',
'HIGH',
ctx_group=0, val_group=1, ignorecase=False, **kwargs)
return
class RakeSSHPass(RakePattern):
'''
Find uses of sshpass command (non-interactive ssh authentication).
'''
def __init__(self, **kwargs):
kp = r'\b(sshpass .*-p\s?([\'"]?)(\S+)(\2))'
RakePattern.__init__(self, kp,
'sshpass use',
'Use of sshpass using hard-coded password',
'HIGH',
ctx_group=0, val_group=2, ignorecase=False, **kwargs)
return
```
|
{
"source": "jcwoods/timefn",
"score": 3
}
|
#### File: jcwoods/timefn/timefn.py
```python
import atexit
import time
class TimeCollector(object):
'''
Only one TimeCollector object (a singleton). This class tracks all of
the FunctionTimeAccumulator objects allocated during the run.
'''
instance = None
class __InnerTimeCollector(object):
def __init__(self):
self.tracked_functions = [] # list of FTA objects
atexit.register(self.report) # make sure we dump stats last.
def report(self):
for tao in TimeCollector.instance.tracked_functions:
tao.show_report()
return
def __init__(self):
if TimeCollector.instance is None:
TimeCollector.instance = TimeCollector.__InnerTimeCollector()
def add_accumulator(self, fta):
'''
Add the given FunctionTimeAccumulator to the TimeCollector
'''
TimeCollector.instance.tracked_functions.append(fta)
def report(self):
'''
Generate a report for each FunctionTimeAccumulator.
'''
TimeCollector.instance.report()
return
class FunctionTimeAccumulator(object):
def __init__(self, fn):
self.func = fn
self.count = 0
self.timings = []
def show_report(self):
n = 0
total = 0.0
min_val = None
max_val = None
min_pos = None
max_pos = None
for t in self.timings:
if min_val is None or t < min_val:
min_val = t
min_pos = n
if max_val is None or t > max_val:
max_val = t
max_pos = n
total += t
n += 1
print("")
print("Report for: " + self.func.__name__)
print(" times called: {0:8d}".format(n))
if n == 0:
return
average = total / n
min_pos = min_pos
max_pos = max_pos
print(" min value: {0:3.6f} [{1:d}]".format(min_val, min_pos))
print(" max value: {0:3.6f} [{1:d}]".format(max_val, max_pos))
print(" avg value: {0:3.6f}".format(average))
return
def __call__(self, *args, **kwargs):
st = time.time()
result = self.func(*args, **kwargs)
et = time.time() - st
self.timings.append(et)
self.count += 1
return result
def timefn(fn):
'''
This is the annotation used to mark functions for tracking. For example,
@timereport
def myadder(a, b):
return a + b
The report will be displayed when the process is shutting down (atexit).
'''
tc = TimeCollector()
fta = FunctionTimeAccumulator(fn)
tc.add_accumulator(fta)
return fta
'''
Example usage:
@timefn
def doadd(a, b):
return a + b
@timefn
def dosub(a, b):
return a - b
for i in range(1, 100):
doadd(i, i+1)
for i in range(1, 100):
dosub(i, i+1)
'''
```
|
{
"source": "jcwright77/pleiades",
"score": 2
}
|
#### File: pleiades/analysis/helpers.py
```python
import numpy as np
import scipy.integrate
from scipy.interpolate import interp1d, interp2d, RectBivariateSpline,UnivariateSpline,griddata
from scipy.spatial import Delaunay, ConvexHull
from matplotlib.collections import LineCollection
from scipy.interpolate import splprep,splev
from scipy.optimize import fmin
from matplotlib.path import Path
from pleiades.analysis.math import get_gpsi
#import analysis.datahelpers as dh
def scale_patches(patches,scale):
"""scale patches by desired factor."""
t = mpl.transforms.Affine2D().scale(scale)
for p in patches:
p.set_transform(p.get_transform()+t)
return patches
def get_mirror_patches(patches,axis=0,scale=1):
"""Get mirror image patches across desired axis.
axis = 0 means reflect across x axis, axis = 1
means reflect across y axis. Optional scale
argument can be used as well."""
x_ref = np.array([[1,0,0],[0,-1,0],[0,0,1]])
y_ref = np.array([[-1,0,0],[0,1,0],[0,0,1]])
if axis == 0:
matrix = x_ref
else:
matrix = y_ref
t = mpl.transforms.Affine2D(matrix=matrix).scale(scale)
mirror_patches = []
for p in patches:
m_patch = copy.copy(p)
m_patch.set_transform(m_patch.get_transform()+t)
mirror_patches.append(m_patch)
return mirror_patches
def transpose_patches(patches):
"""Transpose patches (reflect across line y=x)."""
transpose = np.array([[0,1,0],[1,0,0],[0,0,1]])
t = mpl.transforms.Affine2D(matrix=transpose)
mirror_patches = []
for p in patches:
p.set_transform(p.get_transform()+t)
#return patches
class Boundary(object):
def __init__(self,vertices):
self._interpolate_verts(vertices)
def _interpolate_verts(self,vertices,u=None,s=0.0,npts=200):
tck,u = splprep(vertices.T,u=u,s=s)
u_new = np.linspace(u.min(),u.max(),npts)
self.tck = tck
self.u = u_new
r_new,z_new = splev(u_new,tck,der=0)
self.verts = np.vstack((r_new,z_new)).T
def interpolate(self,u):
return splev(u,self.tck,der=0)
class FieldLine(object):
def __init__(self,psi,verts):
self.psi = psi
self._verts = verts
self._interpolate_verts()
self.reorder_verts()
def is_closed(self):
return np.all(self._verts[0,:] == self._verts[-1,:])
def _interpolate_verts(self,u=None,s=0.0,k=2,npts=1000):
if self.is_closed():
per = 1
else:
per = 0
tck,u = splprep(self._verts.T,u=u,k=k,s=s,per=per)
u_new = np.linspace(u.min(),u.max(),npts)
self.tck = tck
self.u = u_new
r_new,z_new = splev(u_new,tck,der=0)
self.verts = np.vstack((r_new,z_new)).T
def interpolate(self,u):
return splev(u,self.tck,der=0)
def reorder_verts(self,steptol=0.1):
if not self.is_closed():
istart = np.argmin(self.verts[:,0])
tmpvert = np.roll(self.verts,-istart,axis=0)
if (tmpvert[1,1]-tmpvert[0,1])**2 + (tmpvert[1,0]-tmpvert[0,0])**2 > steptol**2:
tmpvert = np.roll(tmpvert,-1,axis=0)[::-1,:]
self.verts = tmpvert
def get_svec(self):
s = np.zeros(self.verts.shape[0])
r,z = self.verts[:,0], self.verts[:,1]
s[1:] = np.cumsum(np.sqrt((r[1:]-r[0:-1])**2+(z[1:]-z[0:-1])**2))
return s
def get_length(self):
return self.get_svec()[-1]
def get_ds(self):
r,z = self.verts[:,0], self.verts[:,1]
return np.sqrt((r[1]-r[0])**2+(z[1]-z[0])**2)
def interpolate_onto(self,R,Z,Q,method="cubic"):
return griddata((R.ravel(),Z.ravel()),Q.ravel(),xi=(self.verts[:,0],self.verts[:,1]),method=method)
def get_kappa_n(self,R,Z,BR,BZ,method="cubic"):
modB = np.sqrt(BR**2+BZ**2)
bhatr, bhatz = BR/modB, BZ/modB
bhatr_terp = self.interpolate_onto(R,Z,bhatr)
bhatz_terp = self.interpolate_onto(R,Z,bhatz)
signb = np.sign(self.verts[0,0]*bhatr_terp[0] + self.verts[0,1]*bhatz_terp[0])
kap_r, kap_z = signb*self.d_ds(bhatr_terp), signb*self.d_ds(bhatz_terp)
return kap_r, kap_z
def d_ds(self,Q):
ds = self.get_ds()
res = np.zeros_like(Q)
res[1:-1] = (Q[2:] - Q[:-2]) / (2*ds)
res[0] = (-3.0/2.0*Q[0] + 2*Q[1] - 1.0/2.0*Q[2]) / ds
res[-1] = (1.0/2.0*Q[-3] - 2*Q[-2] + 3.0/2.0*Q[-1]) / ds
return res
def get_gradpsi(self,R,Z,BR,BZ,method="cubic"):
gradpsi_r = self.interpolate_onto(R,Z,R*BZ)
gradpsi_z = -self.interpolate_onto(R,Z,R*BR)
return gradpsi_r,gradpsi_z
def intersection(self,boundary):
def _distfunc(self,boundary,s1,s2):
rfl,zfl = self.interpolate(s1)
rb,zb = boundary.interpolate(s2)
return (rfl-rb)**2 + (zfl-zb)**2
distfunc = lambda x0: _distfunc(self,boundary,x0[0],x0[1])
res = fmin(distfunc,[.5,.5],disp=0)
return res[0]
def apply_boundary(self,b1,b2):
self.ubound0 = self.intersection(b1)
self.ubound1 = self.intersection(b2)
def get_bounded_fl(self,npts=1000):
return self.interpolate(np.linspace(self.ubound0,self.ubound1,npts))
def contour_points(contourset):
condict = {}
for ilev, lev in enumerate(contourset.levels):
condict[lev] = [FieldLine(lev,seg) for seg in contourset.allsegs[ilev]]
return condict
def regular_grid(xx,yy,*args,**kwargs):
nx = kwargs.pop("nx",200)
ny = kwargs.pop("ny",200)
xi = kwargs.pop("xi",None)
yi = kwargs.pop("yi",None)
method = kwargs.pop("method","linear")
""" interpolate irregularly gridded data onto regular grid."""
if xi is not None and yi is not None:
pass
else:
x = np.linspace(xx.min(), xx.max(), nx)
y = np.linspace(yy.min(), yy.max(), ny)
xi, yi = np.meshgrid(x,y,indexing="ij")
#then, interpolate your data onto this grid:
points = np.vstack((xx.flatten(),yy.flatten())).T
zs = []
for z in args:
zi = griddata(points,z.flatten(),(xi,yi),method=method)
zs.append(zi)
return (xi,yi) + tuple(zs)
def get_deltapsi(data,Req,Zeq):
""" Returns contribution to psi from fast ion currents.
Args:
data (netcdf4 Dataset object)
Req (2D R grid from eqdsk)
Zeq (2D Z grid from eqdsk)
Returns:
deltapsi (psi from fast ion currents on eqdsk grid)
"""
var = data.variables
dims = data.dimensions
nrj = dims["nreqadim"].size
nzj = dims["nzeqadim"].size
req = np.linspace(np.min(Req),np.max(Req),nrj)
zeq = np.linspace(np.min(Zeq),np.max(Zeq),nzj)
dr,dz = req[1]-req[0], zeq[1]-zeq[0]
rr,zz = np.meshgrid(req,zeq)
gpsi_jphi = get_gpsi(rr,zz)
jphi = var["curr_diamcurv_phi"][:]
if len(jphi.shape) > 2:
jphi = np.sum(jphi,axis=0)
jphi*=1E4 # A/cm^2 to A/m^2
Iphi = jphi*dr*dz
deltapsi = (gpsi_jphi.dot(Iphi.flatten())).reshape(rr.shape)
_,_,deltapsi = regular_grid(rr,zz,deltapsi,xi=Req,yi=Zeq)
return deltapsi
def poly_fit(x,y,order=3):
n = order+1
m = len(y)
basis_fns = [(lambda z,i=i: z**i) for i in range(n)]
A = np.zeros((m,n))
for i in range(m):
for j in range(n):
A[i,j] = basis_fns[j](x[i])
(u,s,vt) = np.linalg.svd(A)
Sinv = np.zeros((n,m))
s[ s<1.0e-10 ] = 0.0
s[ s>=1.0e-10 ] = 1.0/s[ s>=1.0e-10]
Sinv[:n,:n] = np.diag(s)
c = np.dot(Sinv,u.T)
c = np.dot(vt.T,c)
c = np.dot(c,y)
return basis_fns,c
def reflect_and_hstack(Rho, Z, *args):
"""Reflect and concatenate grid and quantities in args to plot both half
planes (rho>=0 and rho<=0). Currently this function only reflects across
the z axis since that is the symmetry convention we've taken for the
machine.
Parameters
----------
Rho : np.array
2D array for the R coordinates of the grid
Z : np.array
2D array for the Z coordinates of the grid
args : tuple
2D arrays of any quantity on this grid you wish to plot in both half
planes
"""
Rho_total = np.hstack((-Rho[:,-1:0:-1],Rho))
Z_total = np.hstack((Z[:,-1:0:-1],Z))
arglist = []
for arg in args:
assert arg.shape == Rho.shape
arglist.append(np.hstack((arg[:,-1:0:-1],arg)))
return (Rho_total,Z_total)+tuple(arglist)
def get_concave_hull(Rho,Z,Q):
points = np.array([[rho0,z0] for rho0,z0,q in zip(Rho.flatten(),Z.flatten(),Q.flatten()) if ~np.isnan(q)])
tri = Delaunay(points)
# Make a list of line segments:
# edge_points = [ ((x1_1, y1_1), (x2_1, y2_1)),
# ((x1_2, y1_2), (x2_2, y2_2)),
# ... ]
edge_points = []
edges = set()
def add_edge(i, j):
"""Add a line between the i-th and j-th points, if not in the list already"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(points[ [i, j] ])
# loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
# plot it: the LineCollection is just a (maybe) faster way to plot lots of
# lines at once
lines = LineCollection(edge_points)
plt.figure()
plt.title('Delaunay triangulation')
plt.gca().add_collection(lines)
#plt.plot(points[:,0], points[:,1], 'o', hold=1)
plt.xlim(-20, 20)
plt.ylim(-10, 10)
plt.show()
def transform_to_rtheta(Rho,Z,rho_component,z_component):
"""Transform Rho Z grid and rho,z components of vector field to polar coordinates"""
R = np.sqrt(Rho**2+Z**2)
Theta = np.pi/2+np.arctan2(Z,Rho)
sin_t = np.sin(Theta)
cos_t = np.cos(Theta)
r_component = rho_component*sin_t + z_component*cos_t
theta_component = rho_component*cos_t - z_component*sin_t
return R,Theta,r_component,theta_component
def transform_to_rhoz(R,Theta,r_component,theta_component):
"""Transform R Theta grid and r theta components of vector field to cylindrical coordinates"""
Rho = R*np.sin(Theta)
Z = R*np.cos(Theta)
rho_component = (r_component*Rho + theta_component*Z)/R
z_component = (r_component*Z - theta_component*Rho)/R
return Rho,Z,rho_component,z_component
def locs_to_vals(X,Y,Q,coord_list):
"""Picks values of field Q at desired coordinates.
Args:
X (2D array): grid representing column coordinates of Q
Y (2D array): grid representing row coordinates of Q
Q (2D array): value of Q on grid
coord_list (list):list of tuples (x,y) for desired coordinates
"""
q_vals = []
for x,y in coord_list:
x0_idx,y0_idx = np.argmin(np.abs(X[0,:]-x)),np.argmin(np.abs(Y[:,0]-y))
q_vals.append(Q[y0_idx,x0_idx])
return q_vals
def locs_to_vals_griddata(X,Y,Q,coord_list):
"""Picks values of field Q at desired coordinates.
Args:
X (2D array): grid representing column coordinates of Q
Y (2D array): grid representing row coordinates of Q
Q (2D array): value of Q on grid
coord_list (list):list of tuples (x,y) for desired coordinates
"""
xi,yi = zip(*coord_list)
return griddata((X.flatten(),Y.flatten()),Q.flatten(),(xi,yi))
def locs_to_vals1D(X,Y,Q,coord_list):
"""Picks values of field Q at desired coordinates.
Args:
X (2D array): grid representing column coordinates of Q
Y (2D array): grid representing row coordinates of Q
Q (2D array): value of Q on grid
coord_list (list):list of tuples (x,y) for desired coordinates
"""
q_vals = []
for x,y in coord_list:
idx = ((X-x)**2 + (Y-y)**2).argmin()
q_vals.append(Q[idx])
return q_vals
def get_fieldlines(contourset,level,start_coord=None,end_coord=None,clockwise=True,idx_check=[]):
"""Return coordinates for segments comprising a flux surface (Nx2 array).
Args:
contourset (matplotlib.contour.QuadContourSet instance): i.e.
ax.contour call
level (float): desired contour level
start_coord (tuple): coordinate (x,y) at which to start the field line
end_coord (tuple): coordinate (x,y) at which to end the field line
clockwise (bool): whether to order the field line coordinates clockwise or
counterclockwise
"""
## Find desired flux surface and get its vertices
assert level in list(contourset.levels), "level: {0} not found in contourset".format(level)
idx = list(contourset.levels).index(level)
segs = contourset.allsegs[idx]
len_list = [s.shape[0] for s in segs]
max_idx = len_list.index(max(len_list))
flpoints = parse_segment(segs[max_idx],start_coord=start_coord,end_coord=end_coord,clockwise=clockwise)
# if idx in idx_check:
# fig_pts,ax_pts = plt.subplots()
# fig_B,ax_B = plt.subplots()
# for i,pts in enumerate(segs):
# tmp_pts = parse_segment(pts,start_coord=start_coord,end_coord=end_coord,clockwise=clockwise)
# ax_pts.plot(tmp_pts[:,0],tmp_pts[:,1],"o")
# ax_pts.plot(tmp_pts[0,0],tmp_pts[0,1],"go")
# ax_pts.plot(tmp_pts[-1,0],tmp_pts[-1,1],"ro")
# ax_pts.set_xlim(0,2.50)
# ax_pts.set_ylim(-1.25,1.25)
# ax_pts.set_aspect(1)
# B_interp = interp(Rho,Z,B,tmp_pts)
# s = get_fieldline_distance(tmp_pts)
# ax_B.plot(s,B_interp,"o")
# plt.show()
return flpoints
def parse_segment(flpoints,start_coord=None,end_coord=None,clockwise=True):
if start_coord != None:
i_start = np.argmin(np.array([x0**2+y0**2 for x0,y0 in zip(flpoints[:,0]-start_coord[0],flpoints[:,1]-start_coord[1])]))
flpoints = np.roll(flpoints,-i_start,axis=0)
## Find out if curve is cw or ccw
x = flpoints[:,0]
y = flpoints[:,1]
iscw = np.sum((x[1:]-x[0:-1])*(y[1:]+y[0:-1]))+(x[0]-x[-1])*(y[0]+y[-1]) > 0
if clockwise != iscw:
flpoints = np.roll(flpoints[::-1,:],1,axis=0)
i_end = len(x)-1
if end_coord != None:
i_end = np.argmin(np.array([x0**2+y0**2 for x0,y0 in zip(flpoints[:,0]-end_coord[0],flpoints[:,1]-end_coord[1])]))
if i_end < len(x)-1:
i_end += 1
flpoints = flpoints[0:i_end,:]
return flpoints
def get_fieldline_distance(flpoints):
"""Return cumulative field line distance vector
"""
s = np.zeros(flpoints.shape[0])
x = flpoints[:,0]
y = flpoints[:,1]
s[1:] = np.cumsum(np.sqrt((x[1:]-x[0:-1])**2+(y[1:]-y[0:-1])**2))
return s
def interp(Rho,Z,Q,flpoints):
"""interpolate quantity Q on Rho, Z grid onto flpoints (Nx2 array of x,y pairs).
"""
x0 = Rho[0,:].squeeze()
y0 = Z[:,0].squeeze()
f = RectBivariateSpline(y0,x0,Q)
return np.array([float(f(yi,xi)[0]) for xi,yi in zip(flpoints[:,0],flpoints[:,1])])
def flux_surface_avg(Rho,Z,B,flpoints,Q=None):
"""Compute flux surface average of quantity Q or return dVdpsi (dl_B)
"""
## Interpolate B and quantity Q onto flux line
B_interp = interp(Rho,Z,B,flpoints)
s = get_fieldline_distance(flpoints)
dl_B = scipy.integrate.trapz(y=1.0/B_interp,x=s)
if Q != None:
Q_interp = interp(Rho,Z,Q,flpoints)
fsa = 1/dl_B*scipy.integrate.trapz(y=Q_interp/B_interp,x=s)
return fsa
else:
return dl_B
def diff_central(x, y):
x0 = x[:-2]
x1 = x[1:-1]
x2 = x[2:]
y0 = y[:-2]
y1 = y[1:-1]
y2 = y[2:]
f = (x2 - x1)/(x2 - x0)
return (1-f)*(y2 - y1)/(x2 - x1) + f*(y1 - y0)/(x1 - x0)
# need to remove datahelpers dependency from this before using
#def get_F(Rho,Z,psi,B,min_rho,max_rho,start_coord=None,end_coord=None,clockwise=True,plotit=False,dfl_tol=.1):
# gamma = 5.0/3.0
# figf,axf = plt.subplots()
# psi_min,psi_edge = locs_to_vals(Rho,Z,psi,[(min_rho,0),(max_rho,0)])
# psi_levels = np.linspace(psi_min,psi_edge,500)
# cff = axf.contour(Rho,Z,psi,tuple(psi_levels))
# dl_B_list = []
# for psi0 in psi_levels:
# if psi0 == 0.0:
# flpoints = get_fieldlines(cff,psi0,start_coord=start_coord,end_coord=end_coord,clockwise=False)
# else:
# flpoints = get_fieldlines(cff,psi0,start_coord=start_coord,end_coord=end_coord,clockwise=clockwise)
# s = get_fieldline_distance(flpoints)
# if np.max(s[1:]-s[0:-1]) > dfl_tol:
# raise ValueError("fieldline distance between successive points greater than dfl_tol: {0}m".format(dfl_tol))
# if plotit:
# x,y = flpoints[:,0],flpoints[:,1]
# axf.plot(x,y,'bo')
# axf.plot(x[0],y[0],'go')
# axf.plot(x[-1],y[-1],'ro')
# dl_B_list.append(flux_surface_avg(Rho,Z,B,flpoints))
# psi_new = psi_levels
# dl_B_new = np.array(dl_B_list)
# dl_B_new = dh.smooth(dl_B_new,5,mode="valid")
# psi_new = dh.smooth(psi_new,5,mode="valid")
# U = UnivariateSpline(psi_new,dl_B_new,k=4,s=0)
# dUdpsi = UnivariateSpline(psi_new[1:-1],diff_central(psi_new,dl_B_new),k=1,s=0,ext="const")
# d2Udpsi2 = UnivariateSpline(psi_new[2:-2],diff_central(psi_new[1:-1],diff_central(psi_new,dl_B_new)),k=1,s=0,ext="const")
# #dUdpsi = UnivariateSpline(psi_new[1:-1],diff_central(psi_new,dl_B_new),k=3,s=0,ext="const")
# #d2Udpsi2 = UnivariateSpline(psi_new[2:-2],diff_central(psi_new[1:-1],diff_central(psi_new,dl_B_new)),k=3,s=0,ext="const")
# term1 = lambda x: gamma*x/U(x)*(dUdpsi(x))**2
# term2 = lambda x: dUdpsi(x) + x*d2Udpsi2(x)
# F = lambda x: term1(x) - term2(x)
# if plotit:
# z0_idx = np.abs(Z[:,0]).argmin()
# end_idx = np.abs(Rho[z0_idx,:]-max_rho).argmin()
# R_of_psi = UnivariateSpline(psi[z0_idx,0:end_idx],Rho[z0_idx,0:end_idx],s=0)
# psi_test = np.linspace(psi_min,psi_edge,1000)
# psi_norm = psi_test/psi_edge
# fig9,(ax9a,ax9b,ax9c) = plt.subplots(3,1,sharex="col")
# ax9a.plot(psi_new,dl_B_new,"o")
# ax9a.plot(psi_test,U(psi_test),"r")
# ax9b.plot(psi_new[1:-1],dUdpsi(psi_new[1:-1]),"o")
# ax9b.plot(psi_test,dUdpsi(psi_test),"r")
# ax9c.plot(psi_new[2:-2],d2Udpsi2(psi_new[2:-2]),"o")
# ax9c.plot(psi_test,d2Udpsi2(psi_test),"r")
# fig0,((ax0,ax1),(ax2,ax3)) = plt.subplots(2,2,sharex="all",figsize=(18,9))
# ax0.plot(psi_new/psi_edge,dl_B_new,"o")
# ax0.plot(psi_norm,U(psi_test),lw=2)
# ax0.set_ylabel("U")
# ax0top = ax0.twiny()
# new_labels = ["{0:1.2f}".format(r) for r in R_of_psi(psi_edge*np.array(ax0.get_xticks()))]
# ax0top.set_xticklabels(new_labels)
# ax0top.set_xlabel("R (m)")
# ax1.plot(psi_norm,dUdpsi(psi_test),'o')
# ax1.set_ylabel("U'")
# ax1top = ax1.twiny()
# ax1top.set_xticklabels(new_labels)
# ax1top.set_xlabel("R (m)")
# ax2.plot(psi_norm,term1(psi_test),'o')
# ax2.plot(psi_norm,term2(psi_test),'o')
# ax2.set_xlabel("$\\psi/\\psi_{lim}$")
# ax2.set_ylabel("Term1 and term2")
# F_clean = dh.smooth(F(psi_test),20)
# ax3.plot(psi_norm,F_clean,lw=2)
# ax3.set_xlabel("$\\psi/\\psi_{lim}$")
# ax3.set_ylabel("F($\\psi$)")
# #ax3.set_ylim(-1.2,1.2)
# plt.tight_layout()
# return F
# Added by Roger some simple check for field lines looping back on them selves
# def get_F_v2(Rho,Z,psi,B,min_rho,max_rho,start_coord=None,end_coord=None,clockwise=True,plotit=False,plotdots=True,thresh=.2,num_psi=500):
# gamma = 5.0/3.0
# figf,axf = plt.subplots()
# psi_min,psi_edge = locs_to_vals(Rho,Z,psi,[(min_rho,0),(max_rho,0)])
# psi_levels = np.linspace(psi_min,psi_edge,num_psi)
# cff = axf.contour(Rho,Z,psi,tuple(psi_levels))
# dl_B_list = []
# for psi0 in psi_levels:
# if psi0 == 0.0:
# flpoints = get_fieldlines(cff,psi0,start_coord=start_coord,end_coord=end_coord,clockwise=False)
# else:
# flpoints = get_fieldlines(cff,psi0,start_coord=start_coord,end_coord=end_coord,clockwise=clockwise)
# if np.abs(flpoints[0][0]-start_coord[0]) > thresh:
# raise ValueError("I think some of these contours start after the separatrix.")
# if plotdots:
# x,y = flpoints[:,0],flpoints[:,1]
# axf.plot(x,y,'bo')
# axf.plot(x[0],y[0],'go')
# axf.plot(x[-1],y[-1],'ro')
# else:
# plt.close()
# dl_B_list.append(flux_surface_avg(Rho,Z,B,flpoints))
# psi_new = psi_levels
# dl_B_new = np.array(dl_B_list)
# dl_B_new = dh.smooth(dl_B_new,5,mode="valid")
# psi_new = dh.smooth(psi_new,5,mode="valid")
# U = UnivariateSpline(psi_new,dl_B_new,k=4,s=0)
# dUdpsi = UnivariateSpline(psi_new[1:-1],diff_central(psi_new,dl_B_new),k=1,s=0,ext="const")
# d2Udpsi2 = UnivariateSpline(psi_new[2:-2],diff_central(psi_new[1:-1],diff_central(psi_new,dl_B_new)),k=1,s=0,ext="const")
# #dUdpsi = UnivariateSpline(psi_new[1:-1],diff_central(psi_new,dl_B_new),k=3,s=0,ext="const")
# #d2Udpsi2 = UnivariateSpline(psi_new[2:-2],diff_central(psi_new[1:-1],diff_central(psi_new,dl_B_new)),k=3,s=0,ext="const")
# term1 = lambda x: gamma*x/U(x)*(dUdpsi(x))**2
# term2 = lambda x: dUdpsi(x) + x*d2Udpsi2(x)
# F = lambda x: term1(x) - term2(x)
# if plotit:
# z0_idx = np.abs(Z[:,0]).argmin()
# end_idx = np.abs(Rho[z0_idx,:]-max_rho).argmin()
# R_of_psi = UnivariateSpline(psi[z0_idx,0:end_idx],Rho[z0_idx,0:end_idx],s=0)
# psi_test = np.linspace(psi_min,psi_edge,1000)
# psi_norm = psi_test/psi_edge
# fig9,(ax9a,ax9b,ax9c) = plt.subplots(3,1,sharex="col")
# ax9a.plot(psi_new,dl_B_new,"o")
# ax9a.plot(psi_test,U(psi_test),"r")
# ax9b.plot(psi_new[1:-1],dUdpsi(psi_new[1:-1]),"o")
# ax9b.plot(psi_test,dUdpsi(psi_test),"r")
# ax9c.plot(psi_new[2:-2],d2Udpsi2(psi_new[2:-2]),"o")
# ax9c.plot(psi_test,d2Udpsi2(psi_test),"r")
# fig0,((ax0,ax1),(ax2,ax3)) = plt.subplots(2,2,sharex="all",figsize=(18,9))
# ax0.plot(psi_new/psi_edge,dl_B_new,"o")
# ax0.plot(psi_norm,U(psi_test),lw=2)
# ax0.set_ylabel("U")
# ax0top = ax0.twiny()
# new_labels = ["{0:1.2f}".format(r) for r in R_of_psi(psi_edge*np.array(ax0.get_xticks()))]
# ax0top.set_xticklabels(new_labels)
# ax0top.set_xlabel("R (m)")
# ax1.plot(psi_norm,dUdpsi(psi_test),'o')
# ax1.set_ylabel("U'")
# ax1top = ax1.twiny()
# ax1top.set_xticklabels(new_labels)
# ax1top.set_xlabel("R (m)")
# ax2.plot(psi_norm,term1(psi_test),'o')
# ax2.plot(psi_norm,term2(psi_test),'o')
# ax2.set_xlabel("$\\psi/\\psi_{lim}$")
# ax2.set_ylabel("Term1 and term2")
# F_clean = dh.smooth(F(psi_test),20)
# ax3.plot(psi_norm,F_clean,lw=2)
# ax3.set_xlabel("$\\psi/\\psi_{lim}$")
# ax3.set_ylabel("F($\\psi$)")
# #ax3.set_ylim(-1.2,1.2)
# plt.tight_layout()
# return F
```
#### File: pleiades/pleiades/fields.py
```python
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Iterable
from warnings import warn, simplefilter
import math
import numpy as np
from scipy.special import ellipk, ellipe
from multiprocessing import Pool, sharedctypes
from pleiades.mesh import Mesh
import pleiades.checkvalue as cv
from pleiades.transforms import rotate
class FieldsOperator(metaclass=ABCMeta):
"""Mixin class for computing fields on meshes
Parameters
----------
mesh : pleiades.Mesh object, optional
The mesh to use for calculating fields
rank : int (1 or 2)
Indicator of whether the current attribute is a scalar or vector
Variables
---------
current : float or ndarray
Current values in this object
rzw : ndarray or list of ndarray
Nx3 arrays of centroid positions and weights
mesh : pleiades.Mesh object
The mesh to use for calculating fields
"""
def __init__(self, mesh=None, rank=1, **kwargs):
# mesh should accept 2d, 3d or 2 1d or 2 2d)
self._gpsi = None
self._gBR = None
self._gBZ = None
if rank == 1:
self._uptodate = False
self.rank = rank
self.mesh = mesh
@abstractproperty
def current(self):
pass
@abstractproperty
def rzw(self):
pass
@property
def mesh(self):
return self._mesh
@mesh.setter
@cv.flag_greens_on_set
def mesh(self, mesh):
if not isinstance(mesh, Mesh) and mesh is not None:
mesh = Mesh.from_array(mesh)
self._mesh = mesh
def gpsi(self, mesh=None):
"""Compute the Green's function for magnetic flux, :math:`psi`.
Parameters
----------
mesh : ndarray, optional
An Nx2 array of points representing (R, Z) coordinates at which to
calculate the magnetic flux. Defaults to None, in which case the
CurrentFilamentSet.mesh attribute is used.
Returns
-------
gpsi : ndarray
1D array representing the Green's function for flux and whose size
is equal to the number of mesh.
"""
if mesh is None:
if not self._uptodate:
self._compute_greens()
return self._gpsi
return compute_greens(self.rzw, Mesh.to_points(mesh))[0]
def gBR(self, mesh=None):
"""Compute the Green's function for the radial magnetic field, BR
Parameters
----------
mesh : ndarray, optional
An Nx2 array of points representing (R, Z) coordinates at which to
calculate BR. Defaults to None, in which case the
CurrentFilamentSet.mesh attribute is used.
Returns
-------
gBR : ndarray
1D array representing the Green's function for BR and whose size
is equal to the number of mesh.
"""
if mesh is None:
if not self._uptodate:
self._compute_greens()
return self._gBR
return compute_greens(self.rzw, Mesh.to_points(mesh))[1]
def gBZ(self, mesh=None):
"""Compute the Green's function for the vertical magnetic field, BZ
Parameters
----------
mesh : ndarray, optional
An Nx2 array of points representing (R, Z) coordinates at which to
calculate BZ. Defaults to None, in which case the
CurrentFilamentSet.mesh attribute is used.
Returns
-------
gBZ : ndarray
1D array representing the Green's function for BZ and whose size
is equal to the number of mesh.
"""
if mesh is None:
if not self._uptodate:
self._compute_greens()
return self._gBZ
return compute_greens(self.rzw, Mesh.to_points(mesh))[2]
def psi(self, current=None, mesh=None):
"""Compute the magnetic flux, :math:`psi`.
Parameters
----------
current : float, optional
Specify a current value in amps to use instead of
CurrentFilamentSet.current. Defaults to None, in which case the
current attribute is used to calculate the flux.
mesh : ndarray, optional
An Nx2 array of points representing (R, Z) coordinates at which to
calculate the magnetic flux. Defaults to None, in which case the
CurrentFilamentSet.mesh attribute is used.
Returns
-------
psi : ndarray
"""
current = current if current is not None else self.current
if self.rank == 1:
return current*self.gpsi(mesh=mesh)
if self.rank == 2:
return current @ self.gpsi(mesh=mesh)
def BR(self, current=None, mesh=None):
"""Compute the radial component of the magnetic field, BR.
Parameters
----------
current : float, optional
Specify a current value to override the current attribute for
calculating the field. Defaults to None, which causes the current
attribute to be used for the calculation
Returns
-------
BR : np.array
"""
current = current if current is not None else self.current
if self.rank == 1:
return current*self.gBR(mesh=mesh)
if self.rank == 2:
return current @ self.gBR(mesh=mesh)
def BZ(self, current=None, mesh=None):
"""Compute the z component of the magnetic field, BZ.
Parameters
----------
current : float, optional
Specify a current value to override the current attribute for
calculating the field. Defaults to None, which causes the current
attribute to be used for the calculation
Returns
-------
BZ : np.array
"""
current = current if current is not None else self.current
if self.rank == 1:
return current*self.gBZ(mesh=mesh)
if self.rank == 2:
return current @ self.gBZ(mesh=mesh)
def _compute_greens(self):
"""Compute and assign the Green's functions for psi, BR, and BZ"""
# Calculate Green's functions
if self.rank == 1:
gpsi, gBR, gBZ = compute_greens(self.rzw, Mesh.to_points(self.mesh))
if self.rank == 2:
m = len(self.current)
n = len(self.R.ravel())
gpsi = np.empty((m, n))
gBR = np.empty((m, n))
gBZ = np.empty((m, n))
for i, cset in enumerate(self):
gpsi[i, :] = cset.gpsi().ravel()
gBR[i, :] = cset.gBR().ravel()
gBZ[i, :] = cset.gBZ().ravel()
self._gpsi = gpsi
self._gBR = gBR
self._gBZ = gBZ
# Notify instance that the Green's functions are up to date only if it's
# rank 1. Rank 2 FieldOperators get their status from associated rank 1s
if self.rank == 1:
self._uptodate = True
def compute_greens(rzw, rz_pts):
"""Compute axisymmetric Green's functions for magnetic fields
Parameters
----------
rzw: ndarray or iterable of ndarray
An Nx3 array whose columns are r locations, z locations, and current
weights respectively for the current filaments.
rz_pts: Nx2 np.array
An Nx2 array whose columns are r locations and z locations for the mesh
points where we want to calculate the Green's functions.
Returns
-------
tuple :
3-tuple of 1D np.array representing the Green's function for psi, BR,
and Bz respectively.
"""
if isinstance(rzw, list):
return _compute_greens_2d(rzw, rz_pts)
else:
return _compute_greens_1d(rzw, rz_pts)
def _compute_greens_1d(rzw, rz_pts):
"""Compute axisymmetric Green's functions for magnetic fields
Parameters
----------
rzw: Nx3 np.array
An Nx3 array whose columns are r locations, z locations, and current
weights respectively for the current filaments.
rz_pts: Nx2 np.array
An Nx2 array whose columns are r locations and z locations for the mesh
points where we want to calculate the Green's functions.
Returns
-------
tuple :
3-tuple of 1D np.array representing the Green's function for psi, BR,
and Bz respectively.
"""
simplefilter('ignore', RuntimeWarning)
# Begin calculation of Green's functions based on vector potential
# psi = R*A_phi from a current loop at r0, z0 on a mesh specified by
# r and z in cylindrical coordinates and with SI units.
r, z = rz_pts[:, 0], rz_pts[:, 1]
n = len(r)
gpsi = np.zeros(n)
gBR = np.zeros(n)
gBZ = np.zeros(n)
r2 = r*r
# Prefactor c1 for vector potential is mu_0/4pi = 1E-7
c1 = 1E-7
for r0, z0, wgt in rzw:
# Check if the coil position is close to 0 if so skip it
if np.isclose(r0, 0, rtol=0, atol=1E-12):
continue
# Compute factors that are reused in equations
fac0 = (z - z0)*(z - z0)
d = np.sqrt(fac0 + (r + r0)*(r + r0))
d_ = np.sqrt(fac0 + (r - r0)*(r - r0))
k_2 = 4*r*r0 / (d*d)
K = ellipk(k_2)
E = ellipe(k_2)
denom = d*d_ *d_
fac1 = K*d_ *d_
fac2 = (fac0 + r2 + r0*r0)*E
# Compute Green's functions for psi, BR, BZ
gpsi_tmp = wgt*c1*r*r0*4 / d / k_2*((2 - k_2)*K - 2*E)
gBR_tmp = -2*wgt*c1*(z - z0)*(fac1 - fac2) / (r*denom)
gBZ_tmp = 2*wgt*c1*(fac1 - (fac2 - 2*r0*r0*E)) / denom
# Correct for infinities and add sum
gpsi_tmp[~np.isfinite(gpsi_tmp)] = 0
gpsi += gpsi_tmp
gBR_tmp[~np.isfinite(gBR_tmp)] = 0
gBR += gBR_tmp
gBZ_tmp[~np.isfinite(gBZ_tmp)] = 0
gBZ += gBZ_tmp
return gpsi, gBR, gBZ
def _compute_greens_2d(rzw_list, rz_pts):
"""Compute axisymmetric Green's functions for magnetic fields
Parameters
----------
rzw: list
A list of Nx3 arrays whose columns are r locations, z locations, and
current weights respectively for the current filaments.
rz_pts: Nx2 np.array
An Nx2 array whose columns are r locations and z locations for the mesh
points where we want to calculate the Green's functions.
Returns
-------
tuple :
3-tuple of 1D np.array representing the Green's function for psi, BR,
and Bz respectively.
"""
simplefilter('ignore', RuntimeWarning)
# Begin calculation of Green's functions based on vector potential
# psi = R*A_phi from a current loop at r0, z0 on a mesh specified by
# r and z in cylindrical coordinates and with SI units.
r, z = rz_pts[:, 0], rz_pts[:, 1]
n = len(r)
m = len(rzw_list)
gpsi = np.zeros(m, n)
gBR = np.zeros(m, n)
gBZ = np.zeros(m, n)
r2 = r*r
# Prefactor c1 for vector potential is mu_0/4pi = 1E-7
c1 = 1E-7
for i in range(m):
for r0, z0, wgt in rzw_list[i]:
# Check if the coil position is close to 0 if so skip it
if np.isclose(r0, 0, rtol=0, atol=1E-12):
continue
# Compute factors that are reused in equations
fac0 = (z - z0)*(z - z0)
d = np.sqrt(fac0 + (r + r0)*(r + r0))
d_ = np.sqrt(fac0 + (r - r0)*(r - r0))
k_2 = 4*r*r0 / (d*d)
K = ellipk(k_2)
E = ellipe(k_2)
denom = d*d_ *d_
fac1 = K*d_ *d_
fac2 = (fac0 + r2 + r0*r0)*E
# Compute Green's functions for psi, BR, BZ
gpsi_tmp = wgt*c1*r*r0*4 / d / k_2*((2 - k_2)*K - 2*E)
gBR_tmp = -2*wgt*c1*(z - z0)*(fac1 - fac2) / (r*denom)
gBZ_tmp = 2*wgt*c1*(fac1 - (fac2 - 2*r0*r0*E)) / denom
# Correct for infinities and add sum
gpsi_tmp[~np.isfinite(gpsi_tmp)] = 0
gpsi[i, :] += gpsi_tmp
gBR_tmp[~np.isfinite(gBR_tmp)] = 0
gBR[i, :] += gBR_tmp
gBZ_tmp[~np.isfinite(gBZ_tmp)] = 0
gBZ[i, :] += gBZ_tmp
return gpsi, gBR, gBZ
def _compute_greens_mp(rzw, rz_pts):
# Multiprocessing version
size = rz_pts.shape[0]
block_size = 100000
r, z = rz_pts[:,0], rz_pts[:, 1]
r2 = r*r
result = np.ctypeslib.as_ctypes(np.zeros((3, size)))
shared_array = sharedctypes.RawArray(result._type, result)
def fill_per_window(window_y):
tmp = np.ctypeslib.as_array(shared_array)
simplefilter('ignore', RuntimeWarning)
# Prefactor c1 for vector potential is mu_0/4pi = 1E-7
c1 = 1E-7
for idx_y in range(window_y, window_y + block_size):
for r0, z0, wgt in rzw:
# Check if the coil position is close to 0 if so skip it
if np.isclose(r0, 0, rtol=0, atol=1E-12):
continue
# Compute factors that are reused in equations
fac0 = (z - z0)*(z - z0)
d = np.sqrt(fac0 + (r + r0)*(r + r0))
d_ = np.sqrt(fac0 + (r - r0)*(r - r0))
k_2 = 4*r*r0 / (d*d)
K = ellipk(k_2)
E = ellipe(k_2)
denom = d*d_ *d_
fac1 = K*d_ *d_
fac2 = (fac0 + r2 + r0*r0)*E
# Compute Green's functions for psi, BR, BZ
gpsi_tmp = wgt*c1*r*r0*4 / d / k_2*((2 - k_2)*K - 2*E)
gBR_tmp = -2*wgt*c1*(z - z0)*(fac1 - fac2) / (r*denom)
gBZ_tmp = 2*wgt*c1*(fac1 - (fac2 - 2*r0*r0*E)) / denom
gpsi_tmp[~np.isfinite(gpsi_tmp)] = 0
gBR_tmp[~np.isfinite(gBR_tmp)] = 0
gBZ_tmp[~np.isfinite(gBZ_tmp)] = 0
tmp[0, idx_y] += gpsi_tmp
tmp[1, idx_y] += gBR_tmp
tmp[2, idx_y] += gBZ_tmp
window_idxs = [(i, j) for i, j in
zip(range(0, size, block_size),
range(block_size, size + block_size, block_size))]
p = Pool()
res = p.map(fill_per_window, window_idxs)
result = np.ctypeslib.as_array(shared_array)
return result[0, :], result[1, :], result[2, :]
```
#### File: pleiades/pleiades/transforms.py
```python
import math
import numpy as np
def rotate(pts, angle, pivot=(0., 0.)):
pivot = np.asarray(pivot)
angle = math.pi*angle/180
c, s = np.cos(angle), np.sin(angle)
rotation = np.array([[c, -s], [s, c]])
return (np.asarray(pts) - pivot) @ rotation + pivot
```
|
{
"source": "jcydlxc/czsc",
"score": 2
}
|
#### File: czsc/czsc/factors.py
```python
from collections import OrderedDict
from pyecharts.charts import Tab
from pyecharts.components import Table
from pyecharts.options import ComponentTitleOpts
from .signals import KlineSignals
from .utils.kline_generator import KlineGeneratorBy1Min, KlineGeneratorByTick
from .utils.plot import ka_to_echarts
class KlineFactors:
"""缠中说禅技术分析理论之多级别联立因子"""
freqs = ['1分钟', '5分钟', '30分钟', '日线']
def __init__(self, kg: [KlineGeneratorByTick, KlineGeneratorBy1Min], bi_mode="new", max_count=1000):
"""
:param kg: 基于tick或1分钟的K线合成器
:param bi_mode: 使用的笔计算模式,new 表示新笔,old 表示老笔
:param max_count: 单个级别最大K线数量
"""
assert max_count >= 1000, "为了保证因子能够顺利计算,max_count 不允许设置小于1000"
self.kg = kg
klines = self.kg.get_klines({k: max_count for k in self.freqs})
self.kas = {k: KlineSignals(klines[k], name=k, bi_mode=bi_mode, max_count=max_count,
use_xd=False, use_ta=False) for k in klines.keys()}
self.symbol = self.kas["1分钟"].symbol
self.end_dt = self.kas["1分钟"].end_dt
self.latest_price = self.kas["1分钟"].latest_price
self.s = self._calculate_factors()
self.cache = OrderedDict()
def take_snapshot(self, file_html=None, width="1400px", height="580px"):
"""获取快照
:param file_html: str
交易快照保存的 html 文件名
:param width: str
图表宽度
:param height: str
图表高度
:return:
"""
tab = Tab(page_title="{}@{}".format(self.symbol, self.end_dt.strftime("%Y-%m-%d %H:%M")))
for freq in self.freqs:
chart = ka_to_echarts(self.kas[freq], width, height)
tab.add(chart, freq)
t1 = Table()
t1.add(["名称", "数据"], [[k, v] for k, v in self.s.items() if "_" in k])
t1.set_global_opts(title_opts=ComponentTitleOpts(title="缠中说禅信号表", subtitle=""))
tab.add(t1, "信号表")
t2 = Table()
t2.add(["名称", "数据"], [[k, v] for k, v in self.s.items() if "_" not in k])
t2.set_global_opts(title_opts=ComponentTitleOpts(title="缠中说禅因子表", subtitle=""))
tab.add(t2, "因子表")
if file_html:
tab.render(file_html)
else:
return tab
def _calculate_signals(self):
"""计算信号"""
s = OrderedDict(self.kas['1分钟'].kline_raw[-1])
for freq, ks in self.kas.items():
if freq in ["日线", '30分钟', '15分钟', '5分钟', '1分钟']:
s.update(ks.get_signals())
return s
def _calculate_factors(self):
"""计算因子"""
s = self._calculate_signals()
if "5分钟" in self.freqs and "1分钟" in self.freqs:
s.update({
"1分钟最近三根K线站稳5分钟第N笔上沿": False,
"1分钟最近三根K线跌破5分钟第N笔下沿": False,
"5分钟笔多头右侧开仓A": False,
"5分钟笔多头右侧开仓B": False,
"5分钟笔多头右侧开仓C": False,
"5分钟笔多头右侧开仓D": False,
"5分钟笔多头左侧平仓A": False,
"5分钟笔多头左侧平仓B": False,
"5分钟笔多头右侧平仓A": False,
"5分钟笔多头右侧平仓B": False,
"5分钟笔多头右侧平仓C": False,
})
if sum([x['low'] > s['5分钟_第N笔结束标记的上边沿'] for x in self.kas['1分钟'].kline_raw[-3:]]) == 3:
s['1分钟最近三根K线站稳5分钟第N笔上沿'] = True
if sum([x['high'] < s['5分钟_第N笔结束标记的下边沿'] for x in self.kas['1分钟'].kline_raw[-3:]]) == 3:
s['1分钟最近三根K线跌破5分钟第N笔下沿'] = True
# 笔多头开仓 ABCD
long_open_right_a = s['1分钟最近三根K线站稳5分钟第N笔上沿'] or s['5分钟_第N笔结束标记的分型强弱'] == 'strong'
long_open_right_b = s['1分钟_当下笔多头两重有效阻击'] or s['1分钟_当下笔多头三重有效阻击']
long_open_must = (not s['5分钟_第N笔向下发生破坏']) and s['dt'].minute % 1 == 0
if long_open_must:
if s['5分钟_当下笔多头两重有效阻击']:
if long_open_right_a:
s['5分钟笔多头右侧开仓A'] = True
if long_open_right_b:
s['5分钟笔多头右侧开仓B'] = True
if s['5分钟_当下笔多头三重有效阻击']:
if long_open_right_a:
s['5分钟笔多头右侧开仓C'] = True
if long_open_right_b:
s['5分钟笔多头右侧开仓D'] = True
# 笔多头平仓 ABCD
long_close_left_a = (s['1分钟_第N笔出井'] == '向上大井' or s['1分钟_五笔趋势类背驰'] == 'up') \
and s['1分钟_第N笔结束标记的分型强弱'] == 'strong'
long_close_left_b = s['1分钟_第N笔涨跌力度'] == '向上笔新高盘背' and s['1分钟_第N笔结束标记的分型强弱'] == 'strong'
long_close_right_a = s['1分钟最近三根K线跌破5分钟第N笔下沿'] and s['5分钟_第N笔结束标记的分型强弱'] == 'strong'
long_close_right_b = s['1分钟_第N笔结束标记的上边沿'] < s['5分钟_第N笔结束标记的下边沿'] and "向上" in s['1分钟_第N笔涨跌力度']
long_close_right_c = s['1分钟_当下笔空头两重有效阻击'] or s['1分钟_当下笔空头三重有效阻击']
long_close_must = (not s['5分钟_第N笔向上发生破坏']) and s['dt'].minute % 5 == 0
if long_close_must:
if s['5分钟_第N笔涨跌力度'] in ['向上笔不创新高', "向上笔新高盘背"]:
if long_close_left_a:
s['5分钟笔多头左侧平仓A'] = True
if long_close_left_b:
s['5分钟笔多头左侧平仓B'] = True
if long_close_right_a:
s['5分钟笔多头右侧平仓A'] = True
if long_close_right_b:
s['5分钟笔多头右侧平仓B'] = True
if long_close_right_c:
s['5分钟笔多头右侧平仓C'] = True
if "30分钟" in self.freqs and "5分钟" in self.freqs and "1分钟" in self.freqs:
s.update({
"5分钟最近三根K线站稳30分钟第N笔上沿": False,
"5分钟最近三根K线跌破30分钟第N笔下沿": False,
"30分钟笔多头右侧开仓A": False,
"30分钟笔多头右侧开仓B": False,
"30分钟笔多头右侧开仓C": False,
"30分钟笔多头右侧开仓D": False,
"30分钟笔多头左侧平仓A": False,
"30分钟笔多头左侧平仓B": False,
"30分钟笔多头右侧平仓A": False,
"30分钟笔多头右侧平仓B": False,
"30分钟笔多头右侧平仓C": False,
})
if sum([x['low'] > s['30分钟_第N笔结束标记的上边沿'] for x in self.kas['5分钟'].kline_raw[-3:]]) == 3:
s['5分钟最近三根K线站稳30分钟第N笔上沿'] = True
if sum([x['high'] < s['30分钟_第N笔结束标记的下边沿'] for x in self.kas['5分钟'].kline_raw[-3:]]) == 3:
s['5分钟最近三根K线跌破30分钟第N笔下沿'] = True
# 笔多头开仓 ABCD
long_open_right_a = s['5分钟最近三根K线站稳30分钟第N笔上沿'] or s['30分钟_第N笔结束标记的分型强弱'] == 'strong'
long_open_right_b = s['5分钟_当下笔多头两重有效阻击'] or s['5分钟_当下笔多头三重有效阻击']
long_open_must = (not s['30分钟_第N笔向下发生破坏']) and s['dt'].minute % 5 == 0
if long_open_must:
if s['30分钟_当下笔多头两重有效阻击']:
if long_open_right_a:
s['30分钟笔多头右侧开仓A'] = True
if long_open_right_b:
s['30分钟笔多头右侧开仓B'] = True
if s['30分钟_当下笔多头三重有效阻击']:
if long_open_right_a:
s['30分钟笔多头右侧开仓C'] = True
if long_open_right_b:
s['30分钟笔多头右侧开仓D'] = True
# 笔多头平仓 ABCD
long_close_left_a = (s['5分钟_第N笔出井'] == '向上大井' or s['5分钟_五笔趋势类背驰'] == 'up') \
and s['5分钟_第N笔结束标记的分型强弱'] == 'strong'
long_close_left_b = s['5分钟_第N笔涨跌力度'] == '向上笔新高盘背' and s['5分钟_第N笔结束标记的分型强弱'] == 'strong'
long_close_right_a = s['5分钟最近三根K线跌破30分钟第N笔下沿'] and s['30分钟_第N笔结束标记的分型强弱'] == 'strong'
long_close_right_b = s['5分钟_第N笔结束标记的上边沿'] < s['30分钟_第N笔结束标记的下边沿'] \
and "向上" in s['5分钟_第N笔涨跌力度']
long_close_right_c = s['5分钟_当下笔空头两重有效阻击'] or s['5分钟_当下笔空头三重有效阻击']
long_close_must = (not s['30分钟_第N笔向上发生破坏']) and s['dt'].minute % 5 == 0
if long_close_must:
if s['30分钟_第N笔涨跌力度'] in ['向上笔不创新高', "向上笔新高盘背"]:
if long_close_left_a:
s['30分钟笔多头左侧平仓A'] = True
if long_close_left_b:
s['30分钟笔多头左侧平仓B'] = True
if long_close_right_a:
s['30分钟笔多头右侧平仓A'] = True
if long_close_right_b:
s['30分钟笔多头右侧平仓B'] = True
if long_close_right_c:
s['30分钟笔多头右侧平仓C'] = True
if "日线" in self.freqs and "30分钟" in self.freqs and "5分钟" in self.freqs:
s.update({
"30分钟最近三根K线站稳日线第N笔上沿": False,
"30分钟最近三根K线跌破日线第N笔下沿": False,
"日线笔多头右侧开仓A": False,
"日线笔多头右侧开仓B": False,
"日线笔多头右侧开仓C": False,
"日线笔多头右侧开仓D": False,
"日线笔多头左侧平仓A": False,
"日线笔多头左侧平仓B": False,
"日线笔多头右侧平仓A": False,
"日线笔多头右侧平仓B": False,
"日线笔多头右侧平仓C": False,
})
if sum([x['low'] > s['日线_第N笔结束标记的上边沿'] for x in self.kas['30分钟'].kline_raw[-3:]]) == 3 \
and "向下" in s['日线_第N笔涨跌力度']:
s['30分钟最近三根K线站稳日线第N笔上沿'] = True
if sum([x['high'] < s['日线_第N笔结束标记的下边沿'] for x in self.kas['30分钟'].kline_raw[-3:]]) == 3 \
and "向上" in s['日线_第N笔涨跌力度']:
s['30分钟最近三根K线跌破日线第N笔下沿'] = True
# 笔多头开仓 ABCD
long_open_right_a = s['日线_第N笔结束标记的分型强弱'] == 'strong' and s['30分钟最近三根K线站稳日线第N笔上沿']
long_open_right_b = s['5分钟_第N笔出井'] == '向下大井' \
or ("向下小井" in s['5分钟_第N笔出井'] and "向下小井" in s['5分钟_第N-2笔出井']) \
or ((s['5分钟_当下笔多头两重有效阻击'] or s['5分钟_当下笔多头三重有效阻击'])
and s['5分钟_第N笔涨跌力度'] == "向下笔新低盘背")
long_open_right_c = s['日线_最近一个分型类型'] == 'd' \
and (s['5分钟_当下笔多头三重有效阻击'] or s['5分钟_当下笔多头两重有效阻击']) \
and s['5分钟_第N笔涨跌力度'] == "向下笔不创新低"
long_open_right_d = s['5分钟_最近两个笔中枢状态'] == '向下' \
and (s['5分钟_当下笔多头三重有效阻击'] or s['5分钟_当下笔多头两重有效阻击'])
long_open_must = (not s['日线_第N笔向下发生破坏']) \
and (s['日线_当下笔多头两重有效阻击'] or s['日线_当下笔多头三重有效阻击'])
if long_open_must:
if long_open_right_a:
s['日线笔多头右侧开仓A'] = True
if long_open_right_b:
s['日线笔多头右侧开仓B'] = True
if long_open_right_c:
s['日线笔多头右侧开仓C'] = True
if long_open_right_d:
s['日线笔多头右侧开仓D'] = True
# 笔多头平仓 ABCD
long_close_left_a = (s['30分钟_第N笔出井'] == '向上大井' or s['30分钟_五笔趋势类背驰'] == 'up') \
and s['30分钟_第N笔结束标记的分型强弱'] == 'strong'
long_close_left_b = s['30分钟_第N笔涨跌力度'] == '向上笔新高盘背' and s['30分钟_第N笔结束标记的分型强弱'] == 'strong'
long_close_right_a = s['30分钟最近三根K线跌破日线第N笔下沿'] and s['日线_第N笔结束标记的分型强弱'] == 'strong'
long_close_right_b = s['30分钟_第N笔结束标记的上边沿'] < s['日线_第N笔结束标记的下边沿'] and "向上" in s['30分钟_第N笔涨跌力度']
long_close_right_c = s['30分钟_当下笔空头两重有效阻击'] or s['30分钟_当下笔空头三重有效阻击']
long_close_must = (not s['日线_第N笔向上发生破坏']) and s['dt'].minute % 30 == 0
if long_close_must:
if s['日线_第N笔涨跌力度'] in ['向上笔不创新高', "向上笔新高盘背"]:
if long_close_left_a:
s['日线笔多头左侧平仓A'] = True
if long_close_left_b:
s['日线笔多头左侧平仓B'] = True
if long_close_right_a:
s['日线笔多头右侧平仓A'] = True
if long_close_right_b:
s['日线笔多头右侧平仓B'] = True
if long_close_right_c:
s['日线笔多头右侧平仓C'] = True
return s
def update_factors(self, data):
"""更新多级别联立因子"""
for row in data:
self.kg.update(row)
klines_one = self.kg.get_klines({k: 1 for k in self.freqs})
for freq, klines_ in klines_one.items():
k = klines_[-1]
self.kas[freq].update(k)
self.symbol = self.kas["1分钟"].symbol
self.end_dt = self.kas["1分钟"].end_dt
self.latest_price = self.kas["1分钟"].latest_price
self.s = self._calculate_factors()
```
|
{
"source": "jcyk/greedyCWS",
"score": 2
}
|
#### File: greedyCWS/src/model.py
```python
import random,time,os
from collections import Counter, namedtuple
import numpy as np
import dynet as dy
from tools import initCemb, prepareData
from test import test
np.random.seed(970)
Sentence = namedtuple('Sentence',['score','score_expr','LSTMState','y','prevState','wlen','golden'])
class CWS (object):
def __init__(self,Cemb,character_idx_map,options):
model = dy.Model()
self.trainer = dy.MomentumSGDTrainer(model,options['lr'],options['momentum'],options['edecay']) # we use Momentum SGD
self.params = self.initParams(model,Cemb,options)
self.options = options
self.model = model
self.character_idx_map = character_idx_map
self.known_words = None
def load(self,filename):
self.model.load(filename)
def save(self,filename):
self.model.save(filename)
def use_word_embed(self,known_words):
self.known_words = known_words
self.params['word_embed'] = self.model.add_lookup_parameters((len(known_words),self.options['word_dims']))
def initParams(self,model,Cemb,options):
# initialize the model parameters
params = dict()
params['embed'] = model.add_lookup_parameters(Cemb.shape)
for row_num,vec in enumerate(Cemb):
params['embed'].init_row(row_num, vec)
params['lstm'] = dy.LSTMBuilder(1,options['word_dims'],options['nhiddens'],model)
params['reset_gate_W'] = []
params['reset_gate_b'] = []
params['com_W'] = []
params['com_b'] = []
params['word_score_U'] = model.add_parameters(options['word_dims'])
params['predict_W'] = model.add_parameters((options['word_dims'],options['nhiddens']))
params['predict_b'] = model.add_parameters(options['word_dims'])
for wlen in xrange(1,options['max_word_len']+1):
params['reset_gate_W'].append(model.add_parameters((wlen*options['char_dims'],wlen*options['char_dims'])))
params['reset_gate_b'].append(model.add_parameters(wlen*options['char_dims']))
params['com_W'].append(model.add_parameters((options['word_dims'],wlen*options['char_dims'])))
params['com_b'].append(model.add_parameters(options['word_dims']))
params['<BoS>'] = model.add_parameters(options['word_dims'])
return params
def renew_cg(self):
# renew the compute graph for every single instance
dy.renew_cg()
param_exprs = dict()
param_exprs['U'] = dy.parameter(self.params['word_score_U'])
param_exprs['pW'] = dy.parameter(self.params['predict_W'])
param_exprs['pb'] = dy.parameter(self.params['predict_b'])
param_exprs['<bos>'] = dy.parameter(self.params['<BoS>'])
self.param_exprs = param_exprs
def word_repr(self, char_seq, cembs):
# obtain the word representation when given its character sequence
wlen = len(char_seq)
if 'rgW%d'%wlen not in self.param_exprs:
self.param_exprs['rgW%d'%wlen] = dy.parameter(self.params['reset_gate_W'][wlen-1])
self.param_exprs['rgb%d'%wlen] = dy.parameter(self.params['reset_gate_b'][wlen-1])
self.param_exprs['cW%d'%wlen] = dy.parameter(self.params['com_W'][wlen-1])
self.param_exprs['cb%d'%wlen] = dy.parameter(self.params['com_b'][wlen-1])
chars = dy.concatenate(cembs)
reset_gate = dy.logistic(self.param_exprs['rgW%d'%wlen] * chars + self.param_exprs['rgb%d'%wlen])
word = dy.tanh(self.param_exprs['cW%d'%wlen] * dy.cmult(reset_gate,chars) + self.param_exprs['cb%d'%wlen])
if self.known_words is not None and tuple(char_seq) in self.known_words:
return (word + dy.lookup(self.params['word_embed'],self.known_words[tuple(char_seq)]))/2.
return word
def greedy_search(self, char_seq, truth = None, mu =0.):
init_state = self.params['lstm'].initial_state().add_input(self.param_exprs['<bos>'])
init_y = dy.tanh(self.param_exprs['pW'] * init_state.output() + self.param_exprs['pb'])
init_score = dy.scalarInput(0.)
init_sentence = Sentence(score=init_score.scalar_value(),score_expr=init_score,LSTMState =init_state, y= init_y , prevState = None, wlen=None, golden=True)
if truth is not None:
cembs = [ dy.dropout(dy.lookup(self.params['embed'],char),self.options['dropout_rate']) for char in char_seq ]
else:
cembs = [dy.lookup(self.params['embed'],char) for char in char_seq ]
start_agenda = init_sentence
agenda = [start_agenda]
for idx, _ in enumerate(char_seq,1): # from left to right, character by character
now = None
for wlen in xrange(1,min(idx,self.options['max_word_len'])+1): # generate word candidate vectors
# join segmentation sent + word
word = self.word_repr(char_seq[idx-wlen:idx], cembs[idx-wlen:idx])
sent = agenda[idx-wlen]
if truth is not None:
word = dy.dropout(word,self.options['dropout_rate'])
word_score = dy.dot_product(word,self.param_exprs['U'])
if truth is not None:
golden = sent.golden and truth[idx-1]==wlen
margin = dy.scalarInput(mu*wlen if truth[idx-1]!=wlen else 0.)
score = margin + sent.score_expr + dy.dot_product(sent.y, word) + word_score
else:
golden = False
score = sent.score_expr + dy.dot_product(sent.y, word) + word_score
good = (now is None or now.score < score.scalar_value())
if golden or good:
new_state = sent.LSTMState.add_input(word)
new_y = dy.tanh(self.param_exprs['pW'] * new_state.output() + self.param_exprs['pb'])
new_sent = Sentence(score=score.scalar_value(),score_expr=score,LSTMState=new_state,y=new_y, prevState=sent, wlen=wlen, golden=golden)
if good:
now = new_sent
if golden:
golden_sent = new_sent
agenda.append(now)
if truth is not None and truth[idx-1]>0 and (not now.golden):
return (now.score_expr - golden_sent.score_expr)
if truth is not None:
return (now.score_expr - golden_sent.score_expr)
return agenda
def forward(self, char_seq):
self.renew_cg()
agenda = self.greedy_search(char_seq)
now = agenda[-1]
ans = []
while now.prevState is not None:
ans.append(now.wlen)
now = now.prevState
return reversed(ans)
def backward(self, char_seq, truth):
self.renew_cg()
loss = self.greedy_search(char_seq,truth,self.options['margin_loss_discount'])
res = loss.scalar_value()
loss.backward()
return res
def dy_train_model(
max_epochs = 30,
batch_size = 256,
char_dims = 50,
word_dims = 100,
nhiddens = 50,
dropout_rate = 0.2,
margin_loss_discount = 0.2,
max_word_len = 4,
load_params = None,
max_sent_len = 60,
shuffle_data = True,
train_file = '../data/train',
dev_file = '../data/dev',
lr = 0.5,
edecay = 0.1,
momentum = 0.5,
pre_trained = '../w2v/char_vecs_100',
word_proportion = 0.5
):
options = locals().copy()
print 'Model options:'
for kk,vv in options.iteritems():
print '\t',kk,'\t',vv
Cemb, character_idx_map = initCemb(char_dims,train_file,pre_trained)
cws = CWS(Cemb,character_idx_map,options)
if load_params is not None:
cws.load(load_params)
test(cws, dev_file, 'result')
return
char_seq, _ , truth = prepareData(character_idx_map,train_file)
if max_sent_len is not None:
survived = []
for idx,seq in enumerate(char_seq):
if len(seq)<=max_sent_len and len(seq)>1:
survived.append(idx)
char_seq = [ char_seq[idx] for idx in survived]
truth = [ truth[idx] for idx in survived]
if word_proportion > 0:
word_counter = Counter()
for chars,labels in zip(char_seq,truth):
word_counter.update(tuple(chars[idx-label:idx]) for idx,label in enumerate(labels,1))
known_word_count = int(word_proportion*len(word_counter))
known_words = dict(word_counter.most_common()[:known_word_count])
idx = 0
for word in known_words:
known_words[word] = idx
idx+=1
cws.use_word_embed(known_words)
n = len(char_seq)
print 'Total number of training instances:',n
print 'Start training model'
start_time = time.time()
nsamples = 0
for eidx in xrange(max_epochs):
idx_list = range(n)
if shuffle_data:
np.random.shuffle(idx_list)
for idx in idx_list:
loss = cws.backward(char_seq[idx],truth[idx])
if np.isnan(loss):
print 'somthing went wrong, loss is nan.'
return
nsamples += 1
if nsamples % batch_size == 0:
cws.trainer.update(1.)
cws.trainer.update_epoch(1.)
end_time = time.time()
print 'Trained %s epoch(s) (%d samples) took %.lfs per epoch'%(eidx+1,nsamples,(end_time-start_time)/(eidx+1))
test(cws,dev_file,'../result/dev_result%d'%(eidx+1))
os.system('python score.py %s %d %d'%(dev_file,eidx+1,eidx+1))
cws.save('epoch%d'%(eidx+1))
print 'Current model saved'
```
|
{
"source": "jcyongqin/MerryChristmas2016",
"score": 4
}
|
#### File: MerryChristmas2016/APP/__init__.py
```python
print('<NAME>mas!!!')
import sys
#
# int main(int argc, char* argv[]) {
# int n = argc > 1 ? atoi(argv[1]) : 4;
# for (int j = 1; j <= n; j++) {
# int s = 1 << j, k = (1 << n) - s, x;
# for (int y = s - j; y >= 0; y--, putchar('\n')) {
# for (x = 0; x < y + k; x++) printf(" ");
# for (x = 0; x + y < s; x++) printf("%c ", '!' ^ y & x);
# for (x = 1; x + y < s; x++) printf("%c ", '!' ^ y & (s - y - x - 1));
# }
# }
# }
def main(*args):
# """上面的是我尝试尽量用最少代码来画一个抽象一点的圣诞树,因此树干都没有."""
if args.__len__() > 1:
n = args[1]
else:
n = 4
for j in range(n):
s = 1 << j
k = (1 << n) - s
x = 0
for y in range(s - j)[::-1]:
for x in range(y + k):
print(" ", end="")
for x in range(s - y):
print("%s " % chr(ord('!') ^ y & x), end="")
for x in range(1, s - y + 1):
print("%s " % chr(ord('!') ^ y & (s - y - x - 1)), end="")
print("")
if __name__ == "__main__":
main(sys.argv)
```
|
{
"source": "jcyrss/hytest",
"score": 3
}
|
#### File: src/utils/signal.py
```python
class Signal:
_clients = []
_curMethodName = None
def register(self, client):
if isinstance(client,list):
self._clients += client
else:
self._clients.append(client)
def _broadcast(self,*arg,**kargs):
for logger in self._clients:
method = getattr(logger,self._curMethodName,None)
if method:
method(*arg,**kargs)
def __getattr__(self, attr):
self._curMethodName = attr
return self._broadcast
signal = Signal()
```
|
{
"source": "jczaja/Paddle",
"score": 2
}
|
#### File: cluster/vgg16/vgg16_v2.py
```python
import gzip
import paddle.v2.dataset.cifar as cifar
import paddle.v2 as paddle
import time
import os
DATA_DIM = 3 * 32 * 32
CLASS_DIM = 10
BATCH_SIZE = os.getenv("BATCH_SIZE")
if BATCH_SIZE:
BATCH_SIZE = int(BATCH_SIZE)
else:
BATCH_SIZE = 128
print "batch_size", BATCH_SIZE
NODE_COUNT = int(os.getenv("TRAINERS"))
ts = 0
def vgg(input, nums, class_dim):
def conv_block(input, num_filter, groups, num_channels=None):
return paddle.networks.img_conv_group(
input=input,
num_channels=num_channels,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act=paddle.activation.Relu(),
pool_type=paddle.pooling.Max())
assert len(nums) == 5
# the channel of input feature is 3
conv1 = conv_block(input, 64, nums[0], 3)
conv2 = conv_block(conv1, 128, nums[1])
conv3 = conv_block(conv2, 256, nums[2])
conv4 = conv_block(conv3, 512, nums[3])
conv5 = conv_block(conv4, 512, nums[4])
fc_dim = 512
fc1 = paddle.layer.fc(input=conv5,
size=fc_dim,
act=paddle.activation.Relu(),
layer_attr=paddle.attr.Extra(drop_rate=0.5))
fc2 = paddle.layer.fc(input=fc1,
size=fc_dim,
act=paddle.activation.Relu(),
layer_attr=paddle.attr.Extra(drop_rate=0.5))
out = paddle.layer.fc(input=fc2,
size=class_dim,
act=paddle.activation.Softmax())
return out
def vgg13(input, class_dim):
nums = [2, 2, 2, 2, 2]
return vgg(input, nums, class_dim)
def vgg16(input, class_dim):
nums = [2, 2, 3, 3, 3]
return vgg(input, nums, class_dim)
def vgg19(input, class_dim):
nums = [2, 2, 4, 4, 4]
return vgg(input, nums, class_dim)
def main():
global ts
paddle.init(use_gpu=False)
image = paddle.layer.data(
name="image", type=paddle.data_type.dense_vector(DATA_DIM))
lbl = paddle.layer.data(
name="label", type=paddle.data_type.integer_value(CLASS_DIM))
extra_layers = None
# NOTE: for v2 distributed training need averaging updates.
learning_rate = 1e-3 / NODE_COUNT
out = vgg16(image, class_dim=CLASS_DIM)
cost = paddle.layer.classification_cost(input=out, label=lbl)
# Create parameters
parameters = paddle.parameters.create(cost)
# Create optimizer
optimizer = paddle.optimizer.Momentum(
momentum=0.9,
regularization=paddle.optimizer.L2Regularization(rate=0.0005 *
BATCH_SIZE),
learning_rate=learning_rate / BATCH_SIZE,
learning_rate_decay_a=0.1,
learning_rate_decay_b=128000 * 35,
learning_rate_schedule="discexp", )
train_reader = paddle.batch(
paddle.reader.shuffle(
cifar.train10(),
# To use other data, replace the above line with:
# reader.train_reader('train.list'),
buf_size=1000),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
cifar.test10(),
# To use other data, replace the above line with:
# reader.test_reader('val.list'),
batch_size=BATCH_SIZE)
# Create trainer
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=optimizer,
extra_layers=extra_layers,
is_local=False)
# End batch and end pass event handler
def event_handler(event):
global ts, ts_pass
if isinstance(event, paddle.event.BeginPass):
ts_pass = time.time()
if isinstance(event, paddle.event.BeginIteration):
ts = time.time()
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 1 == 0:
print "\nPass %d, Batch %d, Cost %f, %s, spent: %f" % (
event.pass_id, event.batch_id, event.cost, event.metrics,
time.time() - ts)
if isinstance(event, paddle.event.EndPass):
print "Pass %d end, spent: %f" % (event.pass_id,
time.time() - ts_pass)
result = trainer.test(reader=test_reader)
print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
trainer.train(
reader=train_reader, num_passes=200, event_handler=event_handler)
if __name__ == '__main__':
main()
```
#### File: tensorflow/image/googlenet.py
```python
from six.moves import xrange
from datetime import datetime
import math
import time
import tensorflow.python.platform
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 128, """Batch size.""")
tf.app.flags.DEFINE_integer('num_batches', 100, """Number of batches to run.""")
tf.app.flags.DEFINE_boolean('forward_only', False,
"""Only run the forward pass.""")
tf.app.flags.DEFINE_boolean('forward_backward_only', False,
"""Only run the forward-forward pass.""")
tf.app.flags.DEFINE_string('data_format', 'NCHW',
"""The data format for Convnet operations.
Can be either NHWC or NCHW.
""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
parameters = []
conv_counter = 1
pool_counter = 1
affine_counter = 1
def _conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType, wd=0.0005):
global conv_counter
global parameters
name = 'conv' + str(conv_counter)
conv_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(
tf.truncated_normal(
[kH, kW, nIn, nOut], dtype=tf.float32, stddev=1e-1),
name='weights')
if wd is not None and wd > 0:
weight_decay = tf.mul(tf.nn.l2_loss(kernel), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
if FLAGS.data_format == 'NCHW':
strides = [1, 1, dH, dW]
else:
strides = [1, dH, dW, 1]
conv = tf.nn.conv2d(
inpOp,
kernel,
strides,
padding=padType,
data_format=FLAGS.data_format)
biases = tf.Variable(
tf.constant(
0.0, shape=[nOut], dtype=tf.float32),
trainable=True,
name='biases')
bias = tf.reshape(
tf.nn.bias_add(
conv, biases, data_format=FLAGS.data_format),
conv.get_shape())
conv1 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
return conv1
def _affine(inpOp, nIn, nOut, act=True, wd=0.0005):
global affine_counter
global parameters
name = 'affine' + str(affine_counter)
affine_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(
tf.truncated_normal(
[nIn, nOut], dtype=tf.float32, stddev=1e-1),
name='weights')
if wd is not None and wd > 0:
weight_decay = tf.mul(tf.nn.l2_loss(kernel), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
biases = tf.Variable(
tf.constant(
0.0, shape=[nOut], dtype=tf.float32),
trainable=True,
name='biases')
affine1 = tf.nn.relu_layer(
inpOp, kernel, biases,
name=name) if act else tf.matmul(inpOp, kernel) + biases
parameters += [kernel, biases]
return affine1
def _mpool(inpOp, kH, kW, dH, dW, padding):
global pool_counter
global parameters
name = 'pool' + str(pool_counter)
pool_counter += 1
if FLAGS.data_format == 'NCHW':
ksize = [1, 1, kH, kW]
strides = [1, 1, dH, dW]
else:
ksize = [1, kH, kW, 1]
strides = [1, dH, dW, 1]
return tf.nn.max_pool(
inpOp,
ksize=ksize,
strides=strides,
padding=padding,
data_format=FLAGS.data_format,
name=name)
def _apool(inpOp, kH, kW, dH, dW, padding):
global pool_counter
global parameters
name = 'pool' + str(pool_counter)
pool_counter += 1
if FLAGS.data_format == 'NCHW':
ksize = [1, 1, kH, kW]
strides = [1, 1, dH, dW]
else:
ksize = [1, kH, kW, 1]
strides = [1, dH, dW, 1]
return tf.nn.avg_pool(
inpOp,
ksize=ksize,
strides=strides,
padding=padding,
data_format=FLAGS.data_format,
name=name)
def _inception(inp, inSize, o1s, o2s1, o2s2, o3s1, o3s2, o4s1, o4s2):
conv1 = _conv(inp, inSize, o1s, 1, 1, 1, 1, 'VALID')
conv3_ = _conv(inp, inSize, o2s1, 1, 1, 1, 1, 'VALID')
conv3 = _conv(conv3_, o2s1, o2s2, 3, 3, 1, 1, 'SAME')
conv5_ = _conv(inp, inSize, o3s1, 1, 1, 1, 1, 'VALID')
conv5 = _conv(conv5_, o3s1, o3s2, 5, 5, 1, 1, 'SAME')
pool_ = _mpool(inp, o4s1, o4s1, 1, 1, 'SAME')
pool = _conv(pool_, inSize, o4s2, 1, 1, 1, 1, 'VALID')
if FLAGS.data_format == 'NCHW':
channel_dim = 1
else:
channel_dim = 3
incept = tf.concat(channel_dim, [conv1, conv3, conv5, pool])
return incept
def loss(logits, labels):
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(concated,
tf.pack([batch_size, 1000]), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, onehot_labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def inference(images):
# stage 1
conv1 = _conv(images, 3, 64, 7, 7, 2, 2, 'SAME')
pool1 = _mpool(conv1, 3, 3, 2, 2, 'SAME')
# stage 2
conv2 = _conv(pool1, 64, 64, 1, 1, 1, 1, 'VALID')
conv3 = _conv(conv2, 64, 192, 3, 3, 1, 1, 'SAME')
pool3 = _mpool(conv3, 3, 3, 2, 2, 'SAME')
# stage 3
incept3a = _inception(pool3, 192, 64, 96, 128, 16, 32, 3, 32)
incept3b = _inception(incept3a, 256, 128, 128, 192, 32, 96, 3, 64)
pool4 = _mpool(incept3b, 3, 3, 2, 2, 'SAME')
# stage 4
incept4a = _inception(pool4, 480, 192, 96, 208, 16, 48, 3, 64)
incept4b = _inception(incept4a, 512, 160, 112, 224, 24, 64, 3, 64)
incept4c = _inception(incept4b, 512, 128, 128, 256, 24, 64, 3, 64)
incept4d = _inception(incept4c, 512, 112, 144, 288, 32, 64, 3, 64)
incept4e = _inception(incept4d, 528, 256, 160, 320, 32, 128, 3, 128)
pool5 = _mpool(incept4e, 3, 3, 2, 2, 'SAME')
# stage 5
incept5a = _inception(pool5, 832, 256, 160, 320, 32, 128, 3, 128)
incept5b = _inception(incept5a, 832, 384, 192, 384, 48, 128, 3, 128)
pool6 = _apool(incept5b, 7, 7, 1, 1, 'VALID')
# output 1
resh1 = tf.reshape(pool6, [-1, 1024])
drop = tf.nn.dropout(resh1, 0.4)
affn1 = _affine(resh1, 1024, 1000, act=False)
return affn1
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
if not isinstance(target, list):
target = [target]
target_op = tf.group(*target)
for i in range(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target_op)
duration = time.time() - start_time
if i > num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' %
(datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def run_benchmark():
global parameters
with tf.Graph().as_default():
# Generate some dummy images.
image_size = 224
if FLAGS.data_format == 'NCHW':
image_shape = [FLAGS.batch_size, 3, image_size, image_size]
else:
image_shape = [FLAGS.batch_size, image_size, image_size, 3]
images = tf.get_variable(
'image',
image_shape,
initializer=tf.truncated_normal_initializer(
stddev=0.1, dtype=tf.float32),
dtype=tf.float32,
trainable=False)
labels = tf.get_variable(
'label', [FLAGS.batch_size],
initializer=tf.constant_initializer(1),
dtype=tf.int32,
trainable=False)
# Build a Graph that computes the logits predictions from the
# inference model.
last_layer = inference(images)
objective = loss(last_layer, labels)
# Compute gradients.
# opt = tf.train.GradientDescentOptimizer(0.001)
opt = tf.train.MomentumOptimizer(0.001, 0.9)
grads = opt.compute_gradients(objective)
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(
0.0, dtype=tf.float32),
trainable=False,
dtype=tf.float32)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(0.9, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables(
))
# Build an initialization operation.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
run_forward = True
run_forward_backward = True
if FLAGS.forward_only and FLAGS.forward_backward_only:
raise ValueError("Cannot specify --forward_only and "
"--forward_backward_only at the same time.")
if FLAGS.forward_only:
run_forward_backward = False
elif FLAGS.forward_backward_only:
run_forward = False
if run_forward:
# Run the forward benchmark.
time_tensorflow_run(sess, last_layer, "Forward")
if run_forward_backward:
with tf.control_dependencies(
[apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
time_tensorflow_run(sess, [train_op, objective], "Forward-backward")
def main(_):
run_benchmark()
if __name__ == '__main__':
tf.app.run()
```
#### File: local/src/reduce_min_pool_size.py
```python
@provider(min_pool_size=0, ...)
def process(settings, filename):
os.system('shuf %s > %s.shuf' % (filename, filename)) # shuffle before.
with open('%s.shuf' % filename, 'r') as f:
for line in f:
yield get_sample_from_line(line)
```
#### File: v2/fluid/distribute_transpiler_simple.py
```python
import framework
from framework import Program, default_main_program, Parameter, Variable
import optimizer
from layer_helper import LayerHelper
def hash_name_to_server(params_grads, pserver_endpoints):
"""
:param param_grads:
:return: a map of pserver endpoint ->
params -> [param list]
grads -> [grad list]
"""
def _hash_param(param_name, total):
return hash(param_name) % total
param_grad_map = dict()
for param, grad in params_grads:
if param.trainable is True and grad is not None:
server_id = _hash_param(param.name, len(pserver_endpoints))
server_for_param = pserver_endpoints[server_id]
if not param_grad_map.has_key(server_for_param):
param_grad_map[server_for_param] = {"params": [], "grads": []}
param_grad_map[server_for_param]["params"].append(param)
param_grad_map[server_for_param]["grads"].append(grad)
return param_grad_map
def round_robin(params_grads, pserver_endpoints):
assert (len(params_grads) > len(pserver_endpoints))
param_grad_map = dict()
pserver_idx = 0
for param, grad in params_grads:
if param.trainable is True:
server_for_param = pserver_endpoints[pserver_idx]
if not param_grad_map.has_key(server_for_param):
param_grad_map[server_for_param] = {"params": [], "grads": []}
param_grad_map[server_for_param]["params"].append(param)
param_grad_map[server_for_param]["grads"].append(grad)
pserver_idx += 1
if pserver_idx >= len(pserver_endpoints):
pserver_idx = 0
return param_grad_map
class SimpleDistributeTranspiler:
def transpile(self,
optimize_ops,
params_grads,
program=None,
pservers="127.0.0.1:6174",
trainers=1,
split_method=round_robin):
"""
Transpile the program to a distributed data-parallelism programs.
The main_program will be transform to use a remote parameter server
to do parameter optimization. And the optimization graph will be put
in to a parameter server program.
Use different methods to split trainable varialbles to different
parameter servers.
Example to run:
exe = fluid.Executor(place)
t = fluid.DistributeTranspiler()
t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1)
pserver_endpoint = os.getenv("PSERVER")
if pserver_endpoint:
pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops)
exe.run(fluid.default_startup_program())
exe.run(pserver_prog)
else:
feeder = fluid.DataFeeder(feed_list=[images, label], place=place)
exe.run(fluid.default_startup_program())
for pass_id in range(PASS_NUM):
...
:param optimize_ops: op list of optimization, should be the
return value of Optimizer.minimize
:type optimize_ops: list
:param program: program to optimize, default default_main_program
:param pservers: parameter server endpoints like "m1:6174,m2:6174"
:type pservers: string
:return: return a list of programs
"""
if program is None:
program = default_main_program()
self.program = program
self.trainers = trainers
self.optimize_ops = optimize_ops
self._optimize_distributed(
optimize_ops,
program,
params_grads,
pservers=pservers,
trainers=trainers,
split_method=split_method)
def _clone_param(self, block, v):
assert isinstance(v, Parameter)
new_p = Parameter(
block=block,
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=v.stop_gradient,
trainable=v.trainable,
optimize_attr=v.optimize_attr,
regularizer=v.regularizer,
name=v.name)
block.vars[new_p.name] = new_p
def _clone_var(self, block, var):
assert isinstance(var, Variable)
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=var.persistable)
def _optimize_distributed(self, optimize_ops, program, params_and_grads,
**kwargs):
if kwargs.has_key("split_method"):
split_method = kwargs["split_method"]
else:
split_method = round_robin
assert (callable(split_method))
pserver_endpoints = kwargs["pservers"].split(",")
self.param_grad_map = split_method(params_and_grads, pserver_endpoints)
send_op_ordered_inputs = []
send_op_ordered_outputs = []
epmap = []
for ep, v in self.param_grad_map.iteritems():
send_op_ordered_inputs.extend(v["grads"])
send_op_ordered_outputs.extend(v["params"])
for i in v["grads"]:
epmap.append(ep)
send_op = program.global_block().append_op(
type="send",
inputs={"X": send_op_ordered_inputs
}, # inputs is a list of tensors to be send
outputs={"Out": send_op_ordered_outputs},
attrs={"endpoints": pserver_endpoints,
"epmap": epmap})
def get_trainer_program(self):
# remove optimize ops and add a send op to main_program
self.program.global_block().delete_ops(self.optimize_ops)
return self.program
def _create_var_for_trainers(self, block, var, trainers):
var_list = []
for i in xrange(trainers):
var_each = block.create_var(
name="%s.trainer_%d" % (var.name, i),
psersistable=var.persistable,
dtype=var.dtype,
shape=var.shape)
var_list.append(var_each)
return var_list
def get_pserver_program(self, endpoint, optimize_ops):
pserver_program = Program()
for v in self.param_grad_map[endpoint]["params"]:
self._clone_param(pserver_program.global_block(), v)
optimize_sub_program = Program()
grad_var_names = [
var.name for var in self.param_grad_map[endpoint]["grads"]
]
for opt_op in optimize_ops:
for _, var in opt_op.inputs.iteritems():
# NOTE: append operators to merge gradients from multiple
# trainers. If trainers == 1, this is not needed.
if self.trainers > 1 and var.name in grad_var_names:
vars2merge = self._create_var_for_trainers(
optimize_sub_program.global_block(), var, self.trainers)
merged_var = optimize_sub_program.global_block().create_var(
name=var.name,
persistable=var.persistable,
dtype=var.dtype,
shape=var.shape)
optimize_sub_program.global_block().append_op(
type="sum",
inputs={"X": vars2merge},
outputs={"Out": merged_var})
optimize_sub_program.global_block().append_op(
type="scale",
inputs={"X": merged_var},
outputs={"Out": merged_var},
attrs={"scale": 1.0 / float(self.trainers)})
else:
optimize_sub_program.global_block().create_var(
name=var.name,
persistable=var.persistable,
dtype=var.dtype,
shape=var.shape)
if opt_op.inputs.has_key("Grad"):
if opt_op.inputs["Grad"].name in grad_var_names:
optimize_sub_program.global_block().append_op(
type=opt_op.type,
inputs=opt_op.inputs,
outputs=opt_op.outputs,
attrs=opt_op.attrs)
else:
optimize_sub_program.global_block().append_op(
type=opt_op.type,
inputs=opt_op.inputs,
outputs=opt_op.outputs,
attrs=opt_op.attrs)
pserver_program.global_block().append_op(
type="recv",
inputs={"RX":
self.param_grad_map[endpoint]["grads"]}, # grads to recv
outputs={},
attrs={
"OptimizeBlock": optimize_sub_program.global_block(),
"endpoint": endpoint,
"ParamList":
[p.name for p in self.param_grad_map[endpoint]["params"]],
"GradList":
[p.name for p in self.param_grad_map[endpoint]["grads"]],
"Trainers": self.trainers
})
pserver_program.sync_with_cpp()
return pserver_program
```
#### File: v2/fluid/graphviz.py
```python
import os
import random
import subprocess
import logging
def crepr(v):
if type(v) is str or type(v) is unicode:
return '"%s"' % v
return str(v)
class Rank(object):
def __init__(self, kind, name, priority):
'''
kind: str
name: str
priority: int
'''
self.kind = kind
self.name = name
self.priority = priority
self.nodes = []
def __str__(self):
if not self.nodes:
return ''
return '{' + 'rank={};'.format(self.kind) + \
','.join([node.name for node in self.nodes]) + '}'
class Graph(object):
rank_counter = 0
def __init__(self, title, **attrs):
self.title = title
self.attrs = attrs
self.nodes = []
self.edges = []
self.rank_groups = {}
def code(self):
return self.__str__()
def rank_group(self, kind, priority):
name = "rankgroup-%d" % Graph.rank_counter
Graph.rank_counter += 1
rank = Rank(kind, name, priority)
self.rank_groups[name] = rank
return name
def node(self, label, prefix, description="", **attrs):
node = Node(label, prefix, description, **attrs)
if 'rank' in attrs:
rank = self.rank_groups[attrs['rank']]
del attrs['rank']
rank.nodes.append(node)
self.nodes.append(node)
return node
def edge(self, source, target, **attrs):
edge = Edge(source, target, **attrs)
self.edges.append(edge)
return edge
def compile(self, dot_path):
file = open(dot_path, 'w')
file.write(self.__str__())
image_path = os.path.join(
os.path.dirname(__file__), dot_path[:-3] + "pdf")
cmd = ["dot", "-Tpdf", dot_path, "-o", image_path]
subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logging.warning("write block debug graph to {}".format(image_path))
return image_path
def show(self, dot_path):
image = self.compile(dot_path)
cmd = ["open", image]
subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def _rank_repr(self):
ranks = sorted(
self.rank_groups.items(),
cmp=lambda a, b: a[1].priority > b[1].priority)
repr = []
for x in ranks:
repr.append(str(x[1]))
return '\n'.join(repr) + '\n'
def __str__(self):
reprs = [
'digraph G {',
'title = {}'.format(crepr(self.title)),
]
for attr in self.attrs:
reprs.append("{key}={value};".format(
key=attr, value=crepr(self.attrs[attr])))
reprs.append(self._rank_repr())
random.shuffle(self.nodes)
reprs += [str(node) for node in self.nodes]
for x in self.edges:
reprs.append(str(x))
reprs.append('}')
return '\n'.join(reprs)
class Node(object):
counter = 1
def __init__(self, label, prefix, description="", **attrs):
self.label = label
self.name = "%s_%d" % (prefix, Node.counter)
self.description = description
self.attrs = attrs
Node.counter += 1
def __str__(self):
reprs = '{name} [label={label} {extra} ];'.format(
name=self.name,
label=self.label,
extra=',' + ','.join("%s=%s" % (key, crepr(value))
for key, value in self.attrs.items())
if self.attrs else "")
return reprs
class Edge(object):
def __init__(self, source, target, **attrs):
'''
Link source to target.
:param source: Node
:param target: Node
:param graph: Graph
:param attrs: dic
'''
self.source = source
self.target = target
self.attrs = attrs
def __str__(self):
repr = "{source} -> {target} {extra}".format(
source=self.source.name,
target=self.target.name,
extra="" if not self.attrs else
"[" + ','.join("{}={}".format(attr[0], crepr(attr[1]))
for attr in self.attrs.items()) + "]")
return repr
class GraphPreviewGenerator(object):
'''
Generate a graph image for ONNX proto.
'''
def __init__(self, title):
# init graphviz graph
self.graph = Graph(
title,
layout="dot",
concentrate="true",
rankdir="TB", )
self.op_rank = self.graph.rank_group('same', 2)
self.param_rank = self.graph.rank_group('same', 1)
self.arg_rank = self.graph.rank_group('same', 0)
def __call__(self, path='temp.dot', show=False):
if not show:
self.graph.compile(path)
else:
self.graph.show(path)
def add_param(self, name, data_type, shape, highlight=False):
label = '\n'.join([
'<<table cellpadding="5">',
' <tr>',
' <td bgcolor="#2b787e">',
' <b>',
name,
' </b>',
' </td>',
' </tr>',
' <tr>',
' <td>',
str(data_type),
' </td>'
' </tr>',
' <tr>',
' <td>',
'[%s]' % 'x'.join(shape),
' </td>'
' </tr>',
'</table>>',
])
return self.graph.node(
label,
prefix="param",
description=name,
shape="none",
style="rounded,filled,bold",
width="1.3",
color="#148b97" if not highlight else "orange",
fontcolor="#ffffff",
fontname="Arial")
def add_op(self, opType, **kwargs):
highlight = False
if 'highlight' in kwargs:
highlight = kwargs['highlight']
del kwargs['highlight']
return self.graph.node(
"<<B>%s</B>>" % opType,
prefix="op",
description=opType,
shape="box",
style="rounded, filled, bold",
color="#303A3A" if not highlight else "orange",
fontname="Arial",
fontcolor="#ffffff",
width="1.3",
height="0.84", )
def add_arg(self, name, highlight=False):
return self.graph.node(
crepr(name),
prefix="arg",
description=name,
shape="box",
style="rounded,filled,bold",
fontname="Arial",
fontcolor="#999999",
color="#dddddd" if not highlight else "orange")
def add_edge(self, source, target, **kwargs):
highlight = False
if 'highlight' in kwargs:
highlight = kwargs['highlight']
del kwargs['highlight']
return self.graph.edge(
source,
target,
color="#00000" if not highlight else "orange",
**kwargs)
```
#### File: fluid/tests/notest_csp.py
```python
import unittest
import paddle.v2.fluid as fluid
class TestCSPFramework(unittest.TestCase):
def daisy_chain(self):
n = 10000
leftmost = fluid.make_channel(dtype=int)
right = leftmost
left = leftmost
with fluid.While(steps=n):
right = fluid.make_channel(dtype=int)
with fluid.go():
fluid.send(left, 1 + fluid.recv(right))
left = right
with fluid.go():
fluid.send(right, 1)
fluid.Print(fluid.recv(leftmost))
if __name__ == '__main__':
unittest.main()
```
#### File: fluid/tests/test_auc_op.py
```python
import unittest
import numpy as np
from op_test import OpTest
class TestAucOp(OpTest):
def setUp(self):
self.op_type = "auc"
pred = np.random.random((128, 2)).astype("float32")
indices = np.random.randint(0, 2, (128, 2))
labels = np.random.randint(0, 2, (128, 1))
num_thresholds = 200
self.inputs = {'Out': pred, 'Indices': indices, 'Label': labels}
self.attrs = {'curve': 'ROC', 'num_thresholds': num_thresholds}
# NOTE: sklearn use a different way to generate thresholds
# which will cause the result differs slightly:
# from sklearn.metrics import roc_curve, auc
# fpr, tpr, thresholds = roc_curve(labels, pred)
# auc_value = auc(fpr, tpr)
# we caculate AUC again using numpy for testing
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
# caculate TP, FN, TN, FP count
tp_list = np.ndarray((num_thresholds, ))
fn_list = np.ndarray((num_thresholds, ))
tn_list = np.ndarray((num_thresholds, ))
fp_list = np.ndarray((num_thresholds, ))
for idx_thresh, thresh in enumerate(thresholds):
tp, fn, tn, fp = 0, 0, 0, 0
for i, lbl in enumerate(labels):
if lbl:
if pred[i, 0] >= thresh:
tp += 1
else:
fn += 1
else:
if pred[i, 0] >= thresh:
fp += 1
else:
tn += 1
tp_list[idx_thresh] = tp
fn_list[idx_thresh] = fn
tn_list[idx_thresh] = tn
fp_list[idx_thresh] = fp
epsilon = 1e-6
tpr = (tp_list.astype("float32") + epsilon) / (
tp_list + fn_list + epsilon)
fpr = fp_list.astype("float32") / (fp_list + tn_list + epsilon)
rec = (tp_list.astype("float32") + epsilon) / (
tp_list + fp_list + epsilon)
x = fpr[:num_thresholds - 1] - fpr[1:]
y = (tpr[:num_thresholds - 1] + tpr[1:]) / 2.0
auc_value = np.sum(x * y)
self.outputs = {'AUC': auc_value}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
```
#### File: fluid/tests/test_iou_similarity_op.py
```python
import unittest
import numpy as np
import sys
import math
from op_test import OpTest
class TestIOUSimilarityOp(OpTest):
def test_check_output(self):
self.check_output()
def setUp(self):
self.op_type = "iou_similarity"
self.boxes1 = np.array(
[[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]).astype('float32')
self.boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]]).astype('float32')
self.output = np.array(
[[2.0 / 16.0, 0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]]).astype('float32')
self.inputs = {'X': self.boxes1, 'Y': self.boxes2}
self.outputs = {'Out': self.output}
class TestIOUSimilarityOpWithLoD(TestIOUSimilarityOp):
def test_check_output(self):
self.check_output()
def setUp(self):
super(TestIOUSimilarityOpWithLoD, self).setUp()
self.boxes1_lod = [[0, 1, 2]]
self.output_lod = [[0, 1, 2]]
self.inputs = {'X': (self.boxes1, self.boxes1_lod), 'Y': self.boxes2}
self.outputs = {'Out': (self.output, self.output_lod)}
if __name__ == '__main__':
unittest.main()
```
#### File: fluid/tests/test_learning_rate_decay.py
```python
import unittest
import math
import copy
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid as fluid
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.learning_rate_decay as lr_decay
def exponential_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False):
exponent = float(global_step) / float(decay_steps)
if staircase:
exponent = math.floor(exponent)
return learning_rate * decay_rate**exponent
def natural_exp_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False):
exponent = float(global_step) / float(decay_steps)
if staircase:
exponent = math.floor(exponent)
return learning_rate * math.exp(-1 * decay_rate * exponent)
def inverse_time_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False):
temp = float(global_step) / float(decay_steps)
if staircase:
temp = math.floor(temp)
return learning_rate / (1 + decay_rate * temp)
def polynomial_decay(learning_rate,
global_step,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False):
if cycle:
div = math.ceil(global_step / float(decay_steps))
if div == 0:
div = 1
decay_steps = decay_steps * div
else:
global_step = min(global_step, decay_steps)
return (learning_rate - end_learning_rate) * \
((1 - float(global_step) / float(decay_steps)) ** power) + end_learning_rate
def piecewise_decay(global_step, boundaries, values):
assert len(boundaries) + 1 == len(values)
for i in range(len(boundaries)):
if global_step < boundaries[i]:
return values[i]
return values[len(values) - 1]
class TestLearningRateDecay(unittest.TestCase):
def check_decay(self, python_decay_fn, fluid_decay_fn, kwargs):
global_step = layers.create_global_var(
shape=[1], value=0.0, dtype='float32', persistable=True)
decayed_lr = fluid_decay_fn(global_step=global_step, **kwargs)
layers.increment(global_step, 1.0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for step in range(10):
step_val, lr_val = exe.run(fluid.default_main_program(),
feed=[],
fetch_list=[global_step, decayed_lr])
python_decayed_lr = python_decay_fn(global_step=step, **kwargs)
self.assertAlmostEqual(python_decayed_lr, lr_val[0])
def test_decay(self):
common_kwargs_true = {
"learning_rate": 1.0,
"decay_steps": 5,
"decay_rate": 0.5,
"staircase": True
}
common_kwargs_false = copy.deepcopy(common_kwargs_true)
common_kwargs_false["staircase"] = False
decay_fns = [
(exponential_decay, lr_decay.exponential_decay, common_kwargs_true),
(exponential_decay, lr_decay.exponential_decay,
common_kwargs_false),
(natural_exp_decay, lr_decay.natural_exp_decay, common_kwargs_true),
(natural_exp_decay, lr_decay.natural_exp_decay,
common_kwargs_false),
(inverse_time_decay, lr_decay.inverse_time_decay,
common_kwargs_true),
(inverse_time_decay, lr_decay.inverse_time_decay,
common_kwargs_false),
(polynomial_decay, lr_decay.polynomial_decay, {
"learning_rate": 1.0,
"decay_steps": 5,
"cycle": True
}),
(polynomial_decay, lr_decay.polynomial_decay, {
"learning_rate": 1.0,
"decay_steps": 5,
"cycle": False
}),
(piecewise_decay, lr_decay.piecewise_decay, {
"boundaries": [3, 6, 9],
"values": [0.1, 0.2, 0.3, 0.4]
}),
]
for py_decay_fn, fluid_decay_fn, kwargs in decay_fns:
print("decay_fn=" + py_decay_fn.__name__ + " kwargs=" + str(kwargs))
main_program = framework.Program()
startup_program = framework.Program()
with framework.program_guard(main_program, startup_program):
self.check_decay(py_decay_fn, fluid_decay_fn, kwargs)
if __name__ == '__main__':
unittest.main()
```
#### File: fluid/tests/test_split_and_merge_lod_tensor_op.py
```python
import unittest
import paddle.v2.fluid.core as core
import numpy as np
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.framework import Program, program_guard
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.backward import append_backward
class TestCPULoDTensorArrayOps(unittest.TestCase):
def place(self):
return core.CPUPlace()
def test_split_and_merge_lod_tensor_no_lod(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
mask_np = np.array([0, 0, 1, 1, 1, 1, 0, 0, 0, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([2, 3, 4, 5]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_false_tensor = np.array([0, 1, 6, 7, 8, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def test_split_and_merge_lod_tensor_level_0(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 10]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([3, 4, 5, 6, 7, 8]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_true.set_lod([[0, 6]])
expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false_lod = [[0, 3, 4]]
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
expect_false.set_lod(expect_false_lod)
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def main(self, tensor, mask, expect_true, expect_false, expect_out,
level=0):
place = self.place()
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[1])
x.persistable = True
y = layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out_true.persistable = True
out_false.persistable = True
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
out.persistable = True
exe = Executor(place)
scope = core.Scope()
exe.run(program,
feed={'x': tensor,
'y': mask},
scope=scope,
return_numpy=False)
var_true = scope.find_var(out_true.name).get_tensor()
var_false = scope.find_var(out_false.name).get_tensor()
var_out = scope.find_var(out.name).get_tensor()
self.check_tensor_same(var_true, expect_true)
self.check_tensor_same(var_false, expect_false)
self.check_tensor_same(var_out, expect_out)
def check_tensor_same(self, actual, expect):
self.assertTrue(np.allclose(np.array(actual), np.array(expect)))
self.assertEqual(actual.lod(), expect.lod())
class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
def test_grad(self):
place = core.CPUPlace()
program = Program()
with program_guard(program):
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
mean = layers.mean(x=out)
append_backward(mean)
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_lod([[0, 3, 9, 10]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, place)
exe = Executor(place)
scope = core.Scope()
g_vars = program.global_block().var(x.name + "@GRAD")
g_out = [
item.sum()
for item in map(np.array,
exe.run(program,
feed={'x': tensor,
'y': mask},
fetch_list=[g_vars],
scope=scope,
return_numpy=False))
]
g_out_sum = np.array(g_out).sum()
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JCZ-Automatisering/autobuild",
"score": 3
}
|
#### File: JCZ-Automatisering/autobuild/helpers.py
```python
import platform
import sys
import re
import os
import tempfile
import subprocess
import config
import helpers
OS_TYPE = platform.system()
OS_TYPE_WINDOWS = "Windows"
def error(msg, return_code=1):
"""
Print (fatal) error message and exit the process using return code
:param msg: Message to print before exiting
:param return_code: Optional return code to exit with
:return:
"""
print("FATAL ERROR: %s" % msg)
sys.exit(return_code)
__TRUE_LIST = ("yes", "true", "1")
def string_to_bool(data: str):
"""
Try to convert a string to a boolean, defaults to False
:param data: The data to "scan"
:return: True if detected true, False otherwise
"""
if data.lower() in __TRUE_LIST:
return True
return False
def strip_comments(text):
"""
Strip comments from input text
:param text:
:return: Stripped text
"""
result = re.sub('//.*?\n|/\*.*?\*/', '', text, flags=re.S)
if result.startswith("//"):
return ""
return result
def line_contains_any_of(the_line, items):
"""
Determine if any of the members of items is present in the string, if so, return True
:param the_line: The input line to check against
:param items: The (list of) check items
:return: True if at least one item found, False otherwise
"""
for the_item in items:
if the_item in the_line:
return True
return False
def line_contains_all(the_line, items):
"""
Determine if all of the members of items are present in the string, if so, return True
:param the_line: The input line to check against
:param items: The (list of) check items
:return: True if all items found, False otherwise
"""
for the_item in items:
if the_item not in the_line:
return False
return True
def execute(command, optional_error_message=None):
"""
Execute a command and exit with a fatal error (message) when it fails
:param command: The command to execute
:param optional_error_message: The optional error message to print when the command fails
:return:
"""
print("EXEC: %s" % command)
r = os.system(command)
if not r == 0:
print(" FAILURE! (r=%s)" % r)
print(" COMMAND=\n\n%s\n" % command)
if optional_error_message:
print(optional_error_message)
sys.exit(1)
def run_command_get_output(command):
"""
Execute command and return output (as string)
:param command: The command to execute
:return: Result as string
"""
result = subprocess.run(command.split(" "), stdout=subprocess.PIPE)
return result.stdout.decode('utf-8').strip()
def __generate_variables_string(environment_variables_pass_through=(),
environment_variables_set={},
hostname=None):
"""
Helper function to generate a variables string to pass to Docker. Variables defined in the configuration but not
set in the environment are not added.
:param environment_variables_pass_through: The list of environment variable which should be relayed
:param environment_variables_set: The dict of variables to set if not yet present in environment
:param hostname: Optional hostname to add
:return: String with all variables & hostname set (of applicable)
"""
result = ""
for var_item in environment_variables_pass_through:
var_item_content = os.getenv(var_item)
if var_item_content:
result += "-e %s=%s " % (var_item, var_item_content)
for var_item in environment_variables_set:
check = "%s=" % var_item
if check not in result:
result += "-e %s=%s " % (var_item, environment_variables_set[var_item])
# special variable: hostname
if hostname:
result += "-h %s" % hostname
return result
def __escape_local_volume(the_volume):
"""
Escape the local volume if running on Windows, otherwise, just return it as is.
:param the_volume: Volume
:return: Escaped volume
"""
if OS_TYPE_WINDOWS in OS_TYPE:
return str(the_volume).replace("\"", "/")
return the_volume
__script_name = "/tmp/the_script"
def execute_in_docker(command, the_config: config.Config, interactive=False, optional_error_message=None):
"""
Execute a command in a Docker container
:param command: The command to execute in the Docker container
:param the_config: Instance of Config()
:param interactive: Run with interactive flag (True) or not
:param optional_error_message: Optional error message to print when command fails
:return:
"""
__tmp_name = ""
__tmp_fp = None
try:
__tmp_fp, __tmp_name = tempfile.mkstemp()
with open(__tmp_name, "w+", newline="\n") as fp:
fp.write("#!/bin/sh\n\n%s\n" % command)
print("command: %s" % command)
command = "/bin/sh %s" % __tmp_name
if os.getenv("NO_DOCKER"):
# just run it without docker...
execute(command, optional_error_message=optional_error_message)
else:
if OS_TYPE_WINDOWS not in OS_TYPE:
home = os.getenv("HOME")
if not home:
error("HOME not set!")
home_vol_and_var = "-v {home}:{home} -e HOME={home}".format(home=home)
else:
home_vol_and_var = ""
other_volumes = ""
try_volumes = ("/etc/localtime", "/usr/share/zoneinfo", "/etc/passwd", "/etc/group", "/tmp")
for vol_item in try_volumes:
if os.path.exists(vol_item):
other_volumes = "-v %s:%s %s" %\
(__escape_local_volume(vol_item), vol_item, other_volumes)
# script "volume":
other_volumes = "%s -v %s:%s" % (other_volumes, __escape_local_volume(__tmp_name), __script_name)
for vol_item in the_config.extra_volumes:
if os.path.exists(vol_item):
other_volumes = "-v %s:%s %s" % (__escape_local_volume(vol_item), vol_item, other_volumes)
else:
print("WARNING: requested to add volume %s to container, but directory/file not found!" % vol_item)
docker_base = "docker run --rm --name {docker_name} {home_vol_and_var}".format(
docker_name=the_config.docker_name,
home_vol_and_var=home_vol_and_var
)
if interactive:
docker_base = "%s -it" % docker_base
verbose_var = os.getenv("VERBOSE", None)
if verbose_var:
verbose_var = "-e VERBOSE=%s" % verbose_var
else:
verbose_var = ""
local_dir = os.getcwd()
if OS_TYPE_WINDOWS in OS_TYPE:
# because we are running on windows, we cannot use our path in the container; use something different
# in that case, /code
if the_config.volume_one_up:
print("Volume one up not yet supported on windows, skipping...")
remote_dir = "/code"
work_dir = remote_dir
# and we also do not specify user settings -u
user_settings = ""
else:
work_dir = local_dir
if the_config.volume_one_up:
remote_dir = os.path.dirname(local_dir) # one up
local_dir = remote_dir
else:
remote_dir = local_dir
user_id = run_command_get_output("id -u")
group_id = run_command_get_output("id -g")
user_settings = f"-u {user_id}:{group_id}"
if the_config.extra_docker_run_args:
extra_args = the_config.extra_docker_run_args
else:
extra_args = ""
variables = __generate_variables_string(the_config.environment_variables_pass_through,
the_config.set_environment_variables)
docker_cmd = f"{docker_base} {extra_args} {variables} -v {local_dir}:{remote_dir} {verbose_var} " \
f"{other_volumes} -w {work_dir} " \
f"{user_settings} {the_config.docker_name} /bin/sh {__script_name}"
if os.getenv("WAIT"):
print("\npress any key to continue...\n")
input()
execute(docker_cmd, optional_error_message=optional_error_message)
except Exception as e:
error("Exception during docker assembling/run")
finally:
os.close(__tmp_fp)
os.unlink(__tmp_name)
```
|
{
"source": "jczestochowska/emb2emb",
"score": 3
}
|
#### File: emb2emb/autoencoders/autoencoder.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from .noise import noisy
class Encoder(nn.Module):
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
def encode(self, x, lengths, train=False):
pass
class Decoder(nn.Module):
def __init__(self, config):
super(Decoder, self).__init__()
self.config = config
def decode(self, x, train=False, actual=None, batch_lengths=None, beam_width=1):
pass
def decode_teacher_forcing(self, x, actual, lengths):
pass
class AutoEncoder(nn.Module):
def __init__(self, encoder, decoder, tokenizer, config):
super(AutoEncoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.tokenizer = tokenizer
self.config = config
self.desired_length = config.desired_length
self.adversarial = config.adversarial
self.variational = config.variational
self.denoising = config.denoising
self.rae_regularization = config.rae_regularization
if self.config.share_embedding:
self.decoder.embedding = self.encoder.embedding
if self.adversarial:
self.discriminator = nn.Sequential(
torch.nn.Linear(config.hidden_size,
config.hidden_size, bias=False),
torch.nn.SELU(),
torch.nn.Linear(config.hidden_size,
config.hidden_size, bias=False),
torch.nn.SELU(),
torch.nn.Linear(config.hidden_size, 1, bias=True),
torch.nn.Sigmoid()
)
self.optimD = torch.optim.Adam(
self.discriminator.parameters(), lr=config.discriminator_lr)
def forward(self, x, lengths):
# denoising
if self.training and self.denoising:
x_, lengths_, orig_indices = noisy(
self.tokenizer, x, self.config.p_drop)
else:
x_, lengths_ = x, lengths
# x shape: (batch, seq_len)
encoded = self.encoder.encode(x_, lengths_, train=True)
if self.training and self.denoising:
encoded = encoded[orig_indices]
if self.variational:
encoded, mean, logv = encoded
# encoded shape: (batch, hidden_size)
# add small gaussian noise during training
if self.training and self.config.gaussian_noise_std > 0.:
encoded = encoded + \
torch.randn_like(encoded) * self.config.gaussian_noise_std
if self.config.teacher_forcing_batchwise and self.config.teacher_forcing_ratio > random.random():
decoded_pred = self.decoder.decode_teacher_forcing(
encoded, x, lengths)
else:
decoded_pred = self.decoder.decode(encoded)
# ret: (batch, seq_len, classes)
if self.variational:
return decoded_pred, mean, logv, encoded
if self.adversarial:
# it's important to detach the encoded embedding before feeding into the
# discriminator so that when updating the discriminator, it doesn't
# backprop through the generator
encoded_det = encoded.detach().clone()
prior_data = torch.randn_like(encoded)
return decoded_pred, self.discriminator(encoded), self.discriminator(encoded_det), self.discriminator(prior_data), encoded
else:
return decoded_pred, encoded
def encode(self, x, lengths):
return self.encoder.encode(x, lengths, reparameterize=False)
def decode(self, x, beam_width=1, batch_lengths=None):
return self.decoder.decode(x, beam_width=beam_width, desired_length=self.desired_length, batch_lengths=batch_lengths)
def decode_training(self, h, actual, lengths):
"""
Decoding step to be used for downstream training
"""
return self.decoder.decode(h)
def loss(self, predictions, embeddings, labels, reduction="mean"):
# predictions: (batch, seq_len, classes)
# labels: (batch, seq_len)
l_rec = F.cross_entropy(
predictions.reshape(-1, predictions.shape[2]), labels.reshape(-1), ignore_index=0, reduction=reduction)
# regularize embeddings
if self.rae_regularization > 0.:
l_reg = ((embeddings.norm(dim=-1) ** 2) / 2.).mean()
l = l_reg * self.rae_regularization + l_rec
return l
else:
return l_rec
def loss_variational(self, predictions, embeddings, labels, mu, z_var, lambda_r=1, lambda_kl=1, reduction="mean"):
recon_loss = self.loss(predictions, embeddings,
labels, reduction=reduction)
raw_kl_loss = torch.exp(z_var) + mu**2 - 1.0 - z_var
if reduction == "mean":
kl_loss = 0.5 * torch.mean(raw_kl_loss)
elif reduction == "sum":
kl_loss = 0.5 * torch.sum(raw_kl_loss)
return lambda_r * recon_loss + lambda_kl * kl_loss, recon_loss, kl_loss
def loss_adversarial(self, predictions, embeddings, labels, fake_z_g, fake_z_d, true_z, lambda_a=1):
r_loss = self.loss(predictions, embeddings, labels)
d_loss = (F.binary_cross_entropy(true_z, torch.ones_like(true_z)) +
F.binary_cross_entropy(fake_z_d, torch.zeros_like(fake_z_d))) / 2
g_loss = F.binary_cross_entropy(fake_z_g, torch.ones_like(fake_z_g))
# we need to update discriminator and generator independently, otherwise
# we will update the generator to produce better distinguishable embeddings,
# which we do not want
return (r_loss + lambda_a * g_loss), r_loss, d_loss, g_loss
def eval(self, x, lengths, teacher_forcing=False, beam_width=1):
encoded = self.encoder.encode(x, lengths)
# encoded shape: (batch, hidden_size)
if teacher_forcing:
return self.decoder.decode_teacher_forcing(encoded, x, lengths)
else:
return self.decoder.decode(encoded, beam_width=beam_width)
```
#### File: emb2emb/emb2emb/trainer.py
```python
import time
from random import choices
import torch
from torch import nn
from additive_noise import additive_noise
from .fgim import fast_gradient_iterative_modification
MODE_EMB2EMB = "mapping"
MODE_SEQ2SEQ = "seq2seq"
MODE_FINETUNEDECODER = "finetune_decoder"
MODE_SEQ2SEQFREEZE = "seq2seq_freeze"
# THIS IS PHI
class Emb2Emb(nn.Module):
"""This class encapsulates the computations happening in the Task-Learning phase of the Emb2Emb framework during
training and inference.
The basic flow in the Emb2Emb framework is like this:
#. Train an autoencoder to receive and encoder and a decoder.
#. Freeze the encoder and decoder.
#. Train a mapping in the autoencoder embedding space that maps the
encoding of the input to the encoding of the (desired) output.
#. At inference time, encode the input, plug it into the mapping, (optionally)
apply Fast-Gradient-Iterative-Modification, and plug the result into the decoder.
This class implements steps 2, 3, and 4. Emb2Emb can be used with any pretrained
autoencoder, so the encoder and decoder are passed for initialization.
Moreover, initialization expects the specific mapping (architecture) and loss function to be used
for training.
Learning in the embedding space has the disadvantage that the produced outputs
are not necessarily such that the decoder can deal with them. To mitigate this
issue, training in Emb2Emb uses an optional adversarial loss term that encourages
the mapping to keep its outputs on the manifold of the autoencoder such that the
decoder can more likely handle them well.
:param encoder: Used for encoding the input and, if provided, the output sequence.
:type encoder: class:`mapping.encoding.Encoder`
:param decoder: Used for decoding the output of the mapping.
:type decoder: class:`mapping.encoding.Decoder`
:param mapping: Used for transforming the embedding of the input to the embedding of the output.
:type mapping: class:`emb2emb.mapping.Mapping`
:param loss_fn: A loss function for regression problems, i.e., it must take as input
a pair (predicted, true) of embedding tensors of shape [batch size, embedding_dim].
:type loss_fn: class:`torch.nn.Module`
:param mode:
:type mode:
:param use_adversarial_term: If set, adversarial regularization term will be used.
:param adversarial_lambda Weight of the adversarial loss term.
:param device: The device to initialize tensors on.
:param critic_lr: Learning rate for training the discriminator/critic.
:param embedding_dim: Dimensionality of fixed-size bottleneck embedding.
:param critic_hidden_units: Hidden units in the discriminator MLP.
:param critic_hidden_layers: Number of hidden layers in the discriminator MLP.
:param real_data: If set to "input", the discriminator will receive target sequences
as the "true" embeddings. Otherwise, the parameter will be interpreted as a path
to file containing a corpus, with one sentence per line. Positive examples for
the descriminator are then randomly chosen from that corpus.
:param fast_gradient_iterative_modification: Whether to use FGIM at inference time.
:param binary_classifier: The binary classifier that FGIM takes the derivative of with
respect to the input. The gradient is followed towards the classifying the input as
'1'.
:param fgim_decay: Rate by which to decay weights.
:param fgim_threshold: How far from the target '1' the target has to be in order
to be considered finished.
:param fgim_weights: Step sizes to be applied in parallel (list).
:param fgim_use_training_loss: If set to true, the training loss is also followed
at inference time (i.e., including the adversarial term if active).
:param fgim_start_at_y: Instead of computing FGIM gradients starting from the output
of the mapping, we start from the embedding of the target (which is the same as
the input in the unsupervised case).
"""
def __init__(self, encoder, decoder, mapping, loss_fn, mode,
use_adversarial_term=False,
adversarial_lambda=0.,
device=None,
critic_lr=0.001,
embedding_dim=512,
critic_hidden_units=512,
critic_hidden_layers=1,
real_data="input",
fast_gradient_iterative_modification=False,
binary_classifier=None,
fgim_decay=1.0,
fgim_weights=[10e0, 10e1, 10e2, 10e3],
fgim_loss_f=None,
fgim_criterion_f=None,
fgim_start_at_y=False,
fgim_max_steps=30,
emb2emb_additive_noise=False):
"""Constructor method"""
super(Emb2Emb, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.mapping = mapping
self.loss_fn = loss_fn
self.fgim_decay = fgim_decay
self.fgim_loss_f = fgim_loss_f
self.fgim_criterion_f = fgim_criterion_f
self.fgim_start_at_y = fgim_start_at_y
self.fgim_max_steps = fgim_max_steps
self.fgim_weights = fgim_weights
self.change_mode(mode)
self.track_input_output_distance = False
self.use_adversarial_term = use_adversarial_term
self.fast_gradient_iterative_modification = fast_gradient_iterative_modification
self.binary_classifier = binary_classifier
self.total_time_fgim = 0.
self.total_emb2emb_time = 0.
self.total_inference_time = 0.
self.emb2emb_additive_noise = emb2emb_additive_noise
if mode in [MODE_FINETUNEDECODER, MODE_EMB2EMB, MODE_SEQ2SEQFREEZE]:
for p in self.encoder.parameters():
p.requires_grad = False
if mode in [MODE_FINETUNEDECODER]:
for p in self.mapping.parameters():
p.requires_grad = False
if mode in [MODE_EMB2EMB, MODE_SEQ2SEQFREEZE]:
for p in self.decoder.parameters():
p.requires_grad = False
if use_adversarial_term:
hidden = critic_hidden_units
critic_layers = [nn.Linear(embedding_dim,
hidden),
nn.ReLU()]
for _ in range(critic_hidden_layers):
critic_layers.append(nn.Linear(hidden, hidden))
critic_layers.append(nn.ReLU())
critic_layers.append(nn.Linear(hidden, 2))
self.critic = [nn.Sequential(*critic_layers)]
self.real_data = real_data
self.critic_loss = nn.CrossEntropyLoss()
self.critic_optimizer = torch.optim.Adam(
self._get_critic().parameters(), lr=critic_lr)
dev = device
self._get_critic().to(dev)
self.critic_loss.to(dev)
self.adversarial_lambda = adversarial_lambda
def _get_critic(self):
return self.critic[0]
def change_mode(self, new_mode):
if not new_mode in [MODE_EMB2EMB, MODE_SEQ2SEQ, MODE_FINETUNEDECODER, MODE_SEQ2SEQFREEZE]:
raise ValueError("Invalid mode.")
self.mode = new_mode
def _decode(self, output_embeddings, target_batch=None, Y_embeddings=None, batch_lengths=None):
if self.mode == MODE_EMB2EMB or not self.training:
if self.fast_gradient_iterative_modification:
# Fast Gradient Iterative Modification
start_time = time.time()
# configure FGIM
if self.fgim_start_at_y:
starting_point = Y_embeddings
else:
starting_point = output_embeddings
output_embeddings = fast_gradient_iterative_modification(
starting_point, lambda x: self.fgim_loss_f(
x, Y_embeddings), self.fgim_criterion_f,
self.fgim_weights, self.fgim_decay, self.fgim_max_steps)
self.total_time_fgim = self.total_time_fgim + \
(time.time() - start_time)
outputs = self.decoder(output_embeddings, batch_lengths=batch_lengths)
return outputs
elif self.mode in [MODE_SEQ2SEQ, MODE_FINETUNEDECODER, MODE_SEQ2SEQFREEZE]:
outputs, targets = self.decoder(
output_embeddings, target_batch=target_batch)
vocab_size = outputs.size(-1)
outputs = outputs.view(-1, vocab_size)
targets = targets.view(-1)
return outputs, targets
else:
raise ValueError(
"Undefined behavior for encoding in mode " + self.mode)
def _encode(self, S_batch):
if self.mode in [MODE_EMB2EMB, MODE_FINETUNEDECODER, MODE_SEQ2SEQ, MODE_SEQ2SEQFREEZE]:
embeddings, batch_lengths = self.encoder(S_batch)
return embeddings, batch_lengths
else:
raise ValueError(
"Undefined behavior for encoding in mode " + self.mode)
def _train_critic(self, real_embeddings, generated_embeddings):
self._get_critic().train()
# need to detach from the current computation graph, because critic has
# its own computation graph
real_embeddings = real_embeddings.detach().clone()
generated_embeddings = generated_embeddings.detach().clone()
# get predictions from critic
all_embeddings = torch.cat(
[real_embeddings, generated_embeddings], dim=0)
critic_logits = self._get_critic()(all_embeddings)
# compute critic loss
true_labels = torch.ones(
(real_embeddings.shape[0]), device=real_embeddings.device, dtype=torch.long)
false_labels = torch.zeros(
(generated_embeddings.shape[0]), device=generated_embeddings.device, dtype=torch.long)
labels = torch.cat([true_labels, false_labels], dim=0)
loss = self.critic_loss(critic_logits, labels)
# train critic
self.critic_optimizer.zero_grad()
loss.backward()
self.critic_optimizer.step()
return loss
def _test_critic(self, embeddings):
self._get_critic().eval()
# with torch.no_grad():
# do not detach embeddings, because we need to propagate through critic
# within the same computation graph
critic_logits = self._get_critic()(embeddings)
labels = torch.zeros(
(embeddings.shape[0]), device=embeddings.device, dtype=torch.long)
loss = self.critic_loss(critic_logits, labels)
return loss
def _adversarial_training(self, loss, output_embeddings, Y_embeddings):
# train the discriminator
# NOTE: We need to train the discriminator first, because otherwise we would
# backpropagate through the critic after changing it
train_critic_loss = self._train_critic(
Y_embeddings, output_embeddings)
task_loss = loss.clone()
# what does the discriminator say about the predicted output
# embeddings?
critic_loss = self._test_critic(output_embeddings)
# we want to fool the critic, i.e., we want to its loss to be high =>
# subtract adversarial loss
loss = loss - self.adversarial_lambda * critic_loss
return loss, task_loss, critic_loss, train_critic_loss
def compute_emb2emb(self, Sx_batch, next_x_batch):
# encode input
sent_batch = Sx_batch
if self.emb2emb_additive_noise and next_x_batch:
Sx_batch = additive_noise(
sent_batch=sent_batch,
# Tokenize to get lengths
lengths=[len(self.encoder.model.tokenizer.encode("<SOS>" + s + "<EOS>").ids) for s in Sx_batch],
next_batch=next_x_batch,
)
X_embeddings, batch_lengths = self._encode(Sx_batch)
# mapping step
if not self.training: # measure the time it takes to run through mapping, but only at inference time
s_time = time.time()
output_embeddings = self.mapping(X_embeddings)
if not self.training:
self.total_emb2emb_time = self.total_emb2emb_time + \
(time.time() - s_time)
return output_embeddings, X_embeddings, batch_lengths
def compute_loss(self, output_embeddings, Y_embeddings):
loss = self.loss_fn(output_embeddings, Y_embeddings)
if self.use_adversarial_term:
if self.real_data == "input":
real_data = Y_embeddings
else:
# unless we're using the true output embeddings at
real_data = self._encode(
choices(self.real_data, k=Y_embeddings.size(0)))
return self._adversarial_training(loss, output_embeddings, real_data)
return loss
def forward(self, Sx_batch, Sy_batch, next_x_batch=None):
"""
Propagates through the mapping framework. Takes as input two lists of
texts corresponding to the input and outputs. Returns loss (single scalar)
if in training mode, otherwise returns texts.
"""
# measure inference time it takes
if not self.training:
s_time = time.time()
output_embeddings, X_embeddings, batch_lengths = self.compute_emb2emb(Sx_batch, next_x_batch)
if self.training:
# compute loss depending on the mode
if self.mode == MODE_EMB2EMB:
Y_embeddings, _ = self._encode(Sy_batch)
loss = self.compute_loss(output_embeddings, Y_embeddings)
if self.use_adversarial_term:
loss, task_loss, critic_loss, train_critic_loss = loss
if self.track_input_output_distance:
input_output_distance = self.loss_fn(
X_embeddings, Y_embeddings)
print(input_output_distance)
elif self.mode in [MODE_SEQ2SEQ, MODE_FINETUNEDECODER, MODE_SEQ2SEQFREEZE]:
# for training with CE
outputs, targets = self._decode(
output_embeddings, target_batch=Sy_batch)
loss = self.loss_fn(outputs, targets)
if self.use_adversarial_term:
return loss, task_loss, critic_loss, train_critic_loss
else:
return loss
else:
# return textual output
out = self._decode(output_embeddings, Y_embeddings=X_embeddings, batch_lengths=batch_lengths)
self.total_inference_time = self.total_inference_time + \
(time.time() - s_time)
return out
```
|
{
"source": "jczhang/2dglasses",
"score": 3
}
|
#### File: 2dglasses/tools/classify.py
```python
from __future__ import print_function
import numpy as np
import sys
import os
# insert pycaffe into pythonpath
caffe_path = os.path.abspath(os.getenv('CAFFE'))
pycaffe_path = caffe_path + '/python'
if pycaffe_path not in sys.path:
sys.path.insert(0, pycaffe_path)
# suppress wall of text logging
os.environ['GLOG_minloglevel'] = '2'
import caffe
from caffe.proto import caffe_pb2
IM_MEAN_PATH = './data/val_mean.binaryproto'
DEPLOY_PATH = './models/bootstrap/deploy.prototxt'
CAFFEMODEL_PATH = './snapshots/bootstrap/train_iter_10000.caffemodel'
def build_net(im_mean_path, deploy_path, caffemodel_path, mode=caffe.TEST):
caffe.set_mode_cpu()
net = caffe.Net(deploy_path, caffemodel_path, mode)
im_mean_blob = caffe_pb2.BlobProto()
with open(im_mean_path, 'rb') as im_mean_file:
data = im_mean_file.read()
im_mean_blob.ParseFromString(data)
im_mean = np.squeeze(caffe.io.blobproto_to_array(im_mean_blob), axis=(0,))
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', im_mean.mean(1).mean(1))
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2, 1, 0))
net.blobs['data'].reshape(1, 3, 227, 227)
return (net, transformer)
def classify_image(net, transformer, im):
net.blobs['data'].data[...] = transformer.preprocess('data', im)
p = net.forward()['prob']
return (p[0][0], p[0][1])
def command_line(images):
net, transformer = build_net(IM_MEAN_PATH, DEPLOY_PATH, CAFFEMODEL_PATH)
for image_path in sys.argv[1:]:
im = caffe.io.load_image(image_path)
p_0, p_1 = classify_image(net, transformer, im)
print("{} [2d: {}, 3d: {}]".format(image_path, p_0, p_1))
if __name__ == '__main__':
if len(sys.argv) < 2:
print(__doc__)
else:
command_line(sys.argv[1:])
```
|
{
"source": "jczhao001/winkDetection",
"score": 3
}
|
#### File: jczhao001/winkDetection/test.py
```python
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import modle1
import os
import time
def get_image(test):
file = os.listdir(test)
n = len(file)
ind = np.random.randint(0,n)
imgdir = os.path.join(test,file[ind])
image = Image.open(imgdir)
plt.imshow(image)
plt.show()
image = image.resize([80,80])
image = np.array(image)
return image
def evalu_image():
train_dir_path = "E:/test/eyeclt/2/test/"
image_array = get_image(train_dir_path)
with tf.Graph().as_default():
BATCH_SIZE = 1
N_CLASSES = 2
image = tf.cast(image_array,tf.float32)
image = tf.image.per_image_standardization(image)
image = tf.reshape(image,[1,80,80,3])
logit = modle1.inference(image,BATCH_SIZE,N_CLASSES)
logit = tf.nn.softmax(logit)
x = tf.placeholder(tf.float32,shape=[80,80,3])
logs_train_dir = "E:/test/eyelid/data/logs/"
saver = tf.train.Saver()
with tf.Session() as sess:
print("reading...")
ckpt = tf.train.get_checkpoint_state(logs_train_dir)
if ckpt and ckpt.model_checkpoint_path:
globals_step = ckpt.model_checkpoint_path .split("/")[-1].split("-")[-1]
saver.restore(sess,ckpt.model_checkpoint_path)
print("loading success,global_step %s " % globals_step)
else:
print("No found.")
start = time.time()
prediction = sess.run(logit,feed_dict={x:image_array})
max_index = np.argmax(prediction)
if max_index == 0:
print("this is close: %.6f "% prediction[:,0])
else:
print("this is a open: %.6f" % prediction[:,1])
end = time.time()
print("time:%.8f" % (end - start))
evalu_image()
```
#### File: winkDetection/venv/train2.py
```python
import os
import numpy as np
import tensorflow as tf
import input1
import modle1
N_CLASSES = 2
IMG_H = 28
IMG_W = 28
BATCH_SIZE = 30
CAPACITY = 2000
MAX_STEP = 1500
learning_rate = 0.00008
def training():
train_dir_path = "E:/test/eyecld/0/0.1/"
logs_train_dir_path = "E:/test/eyelid/data/logs/"
train,train_label = input1.getFile(train_dir_path)
train_batch,train_label_batch = input1.getBatch(train,
train_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY
)
train_logits = modle1.inference(train_batch,BATCH_SIZE,N_CLASSES)
train_loss = modle1.losses(train_logits,train_label_batch)
train_op = modle1.train(train_loss,learning_rate)
train_acc = modle1.evalution(train_logits,train_label_batch)
summary_op = tf.summary.merge_all()
sess = tf.Session()
train_writer = tf.summary.FileWriter(logs_train_dir_path,sess.graph)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
try:
for step in np.arange(MAX_STEP):
if coord.should_stop():
break
_,tra_loss,tra_cc = sess.run([train_op,train_loss,train_acc])
if step %100 == 0:
print("step %d ,train loss = %.2f ,train acy = %.2f" % (step,tra_loss,tra_cc))
summary_str = sess.run(summary_op)
train_writer.add_summary(summary_str,step)
if step % 2000 == 0 or (step + 1) == MAX_STEP:
checkpoint_path = os.path.join(logs_train_dir_path,"model.ckpt")
saver.save(sess,checkpoint_path,global_step=step)
except tf.errors.OutOfRangeError:
print("Done--limit reached.")
finally:
coord.request_stop()
coord.join(threads)
sess.close()
training()
```
|
{
"source": "jczic/BLEAdvReader",
"score": 2
}
|
#### File: jczic/BLEAdvReader/bleAdvReader.py
```python
from ustruct import pack, unpack
from ubinascii import hexlify
class BLEAdvReader :
# ============================================================================
# ===( Constants )============================================================
# ============================================================================
DATA_TYPE_FLAGS = 0x01
DATA_TYPE_INCOMP_16BITS_UUIDS = 0x02
DATA_TYPE_COMP_16BITS_UUIDS = 0x03
DATA_TYPE_INCOMP_32BITS_UUIDS = 0x04
DATA_TYPE_COMP_32BITS_UUIDS = 0x05
DATA_TYPE_INCOMP_128BITS_UUIDS = 0x06
DATA_TYPE_COMP_128BITS_UUIDS = 0x07
DATA_TYPE_SHORT_NAME = 0x08
DATA_TYPE_COMPLETE_NAME = 0x09
DATA_TYPE_TX_POWER_LEVEL = 0x0A
DATA_TYPE_DEVICE_CLASS = 0x0B
DATA_TYPE_SMP_PAIR_HASH_C = 0x0C
DATA_TYPE_SMP_PAIR_HASH_C192 = 0x0D
DATA_TYPE_SMP_PAIR_RAND_R = 0x0E
DATA_TYPE_SMP_PAIR_RAND_R192 = 0x0F
DATA_TYPE_DEVICE_ID = 0x10
DATA_TYPE_SECU_MNGR_TK_VAL = 0x11
DATA_TYPE_SECU_MNGR_OOB_FLAGS = 0x12
DATA_TYPE_SLAVE_CONN_INT_RNG = 0x13
DATA_TYPE_16BITS_SVC_SOL_UUIDS = 0x14
DATA_TYPE_128BITS_SVC_SOL_UUIDS = 0x15
DATA_TYPE_SVC_DATA = 0x16
DATA_TYPE_SVC_DATA_16BITS_UUID = 0x17
DATA_TYPE_PUB_TARGET_ADDR = 0x18
DATA_TYPE_RAND_TARGET_ADDR = 0x19
DATA_TYPE_APPEARANCE = 0x1A
DATA_TYPE_ADV_INT = 0x1B
DATA_TYPE_LE_BLT_DEVICE_ADDR = 0x1C
DATA_TYPE_LE_ROLE = 0x1D
DATA_TYPE_SMP_PAIR_HASH_C256 = 0x1E
DATA_TYPE_SMP_PAIR_RAND_R256 = 0x1F
DATA_TYPE_32BITS_SVC_SOL_UUIDS = 0x20
DATA_TYPE_SVC_DATA_32BITS_UUID = 0x21
DATA_TYPE_SVC_DATA_128BITS_UUID = 0x22
DATA_TYPE_LE_SECU_CONN_RAND_VAL = 0x23
DATA_TYPE_URI = 0x24
DATA_TYPE_INDOOR_POS = 0x25
DATA_TYPE_TRANS_DISCOV_DATA = 0x26
DATA_TYPE_LE_SUPPORT_FEAT = 0x27
DATA_TYPE_CHAN_MAP_UPD_INDIC = 0x28
DATA_TYPE_PB_ADV = 0x29
DATA_TYPE_MESH_MSG = 0x2A
DATA_TYPE_MESH_BEACON = 0x2B
DATA_TYPE_3D_INFO_DATA = 0x3D
DATA_TYPE_MANUFACTURER_DATA = 0xFF
MEMBER_UUID_GOOGLE_EDDYSTONE = 0xFEAA
COMPANY_ID_APPLE = 0x004C
APPLE_TYPE_IBEACON = 0x02
APPLE_TYPE_AIRDROP = 0x05
APPLE_TYPE_AIRPODS = 0x07
APPLE_TYPE_AIRPLAY_DEST = 0x09
APPLE_TYPE_AIRPLAY_SRC = 0x0A
APPLE_TYPE_HANDOFF = 0x0C
APPLE_TYPE_NEARBY = 0x10
# ============================================================================
# ===( Class InvalidAdvData )=================================================
# ============================================================================
class InvalidAdvData(Exception) :
pass
# ============================================================================
# ===( Constructor )==========================================================
# ============================================================================
def __init__(self, advertisingData) :
self._advData = dict()
self._advObj = [ ]
self._advDataProcess(advertisingData)
self._advDataElementsProcess()
self._advKnownElementsProcess()
# ============================================================================
# ===( Functions )============================================================
# ============================================================================
@staticmethod
def _hex(data) :
if data :
return hexlify(data).decode().upper()
return ''
# ----------------------------------------------------------------------------
@staticmethod
def _twosComp(val, bits) :
if val < 2**bits :
return val - int((val << 1) & 2**bits)
raise ValueError('Value %s out of range of %s-bit value.' % (val, bits))
# ----------------------------------------------------------------------------
@staticmethod
def _accum88(data16b) :
if isinstance(data16b, bytes) and len(data16b) == 2 :
return BLEAdvReader._twosComp(data16b[0], 8) + \
BLEAdvReader._twosComp(data16b[1], 16) / 256
raise ValueError('%s is not a 16 bits data value.' % data16b)
# ----------------------------------------------------------------------------
@staticmethod
def _128bitsUUID(uuidBytes) :
if uuidBytes and len(uuidBytes) == 16 :
s = hexlify(uuidBytes).decode()
return s[:8] + '-' + s[8:12] + '-' + s[12:16] + '-' + s[16:20] + '-' + s[20:]
return ''
# ----------------------------------------------------------------------------
@staticmethod
def _decodeURIBeacon(data) :
schemes = {
0x00 : 'http://www.', 0x01 : 'https://www.',
0x02 : 'http://', 0x03 : 'https://'
}
expansions = {
0x00 : '.com/', 0x01 : '.org/', 0x02 : '.edu/', 0x03 : '.net/',
0x04 : '.info/', 0x05 : '.biz/', 0x06 : '.gov/', 0x07 : '.com',
0x08 : '.org', 0x09 : '.edu', 0x0A : '.net',
0x0B : '.info', 0x0C : '.biz', 0x0D : '.gov'
}
try :
url = schemes[data[0]]
for b in data[1:] :
url += expansions[b] if b in expansions else chr(b)
return url
except :
return ''
# ----------------------------------------------------------------------------
def _advDataProcess(self, advData) :
if advData :
advDataLen = len(advData)
idx = 0
while idx < advDataLen :
dataLen = advData[idx]
idx += 1
if dataLen > 0 :
idxEnd = idx + dataLen
if idxEnd < advDataLen :
dataType = advData[idx]
data = advData[idx+1:idxEnd]
self._advData[dataType] = data
else :
raise self.InvalidAdvData('Data element invalid size')
idx = idxEnd
# ----------------------------------------------------------------------------
def _advDataElementsProcess(self) :
if not self._advData :
raise self.InvalidAdvData('No advertising data element')
for dataType in self._advData :
data = self._advData[dataType]
advObj = None
if dataType == self.DATA_TYPE_FLAGS :
try :
advObj = self.Flags(ord(data))
except :
raise self.InvalidAdvData('Invalid flags data element')
elif dataType == self.DATA_TYPE_COMP_16BITS_UUIDS :
try :
advObj = self.AdoptedService16bits(unpack('<H', data)[0])
except :
raise self.InvalidAdvData('Invalid adopted service 16bits data element')
elif dataType == self.DATA_TYPE_COMP_32BITS_UUIDS :
try :
advObj = self.AdoptedService32bits(unpack('<I', data)[0])
except :
raise self.InvalidAdvData('Invalid adopted service 32bits data element')
elif dataType == self.DATA_TYPE_COMP_128BITS_UUIDS :
try :
advObj = self.AdoptedService128bits(data)
except :
raise self.InvalidAdvData('Invalid adopted service 128bits data element')
elif dataType == self.DATA_TYPE_SHORT_NAME :
try :
advObj = self.ShortName(data.decode())
except :
raise self.InvalidAdvData('Invalid short name data element')
elif dataType == self.DATA_TYPE_COMPLETE_NAME :
try :
advObj = self.CompleteName(data.decode())
except :
raise self.InvalidAdvData('Invalid complete name data element')
elif dataType == self.DATA_TYPE_TX_POWER_LEVEL :
try :
advObj = self.TXPowerLevel(unpack('<b', data)[0])
except :
raise self.InvalidAdvData('Invalid TX power level data element')
elif dataType == self.DATA_TYPE_SVC_DATA :
try :
advObj = self.ServiceData(unpack('<H', data[0:2])[0], data[2:])
except :
raise self.InvalidAdvData('Invalid service data element')
elif dataType == self.DATA_TYPE_MANUFACTURER_DATA :
try :
advObj = self.ManufacturerData(unpack('<H', data[0:2])[0], data[2:])
except :
raise self.InvalidAdvData('Invalid manufacturer data element')
if advObj :
self._advObj.append(advObj)
# ----------------------------------------------------------------------------
def _advKnownElementsProcess(self) :
for advObj in self._advObj :
if isinstance(advObj, self.AdoptedService16bits) :
if advObj.UUID == self.MEMBER_UUID_GOOGLE_EDDYSTONE :
try :
advObjToAdd = None
for ao in self._advObj :
if isinstance(ao, self.ServiceData) and \
ao.UUID == self.MEMBER_UUID_GOOGLE_EDDYSTONE :
advObjToAdd = self._getAdvObjForGoogleEddyStoneData(ao.Data)
break
if advObjToAdd :
self._advObj.append(advObjToAdd)
else :
raise Exception()
except :
raise self.InvalidAdvData('Invalid Google EddyStone data')
elif isinstance(advObj, self.ManufacturerData) :
if advObj.CompanyID == self.COMPANY_ID_APPLE :
try :
advObjToAdd = self._getAdvObjForAppleCompanyData(advObj.Data)
if advObjToAdd :
self._advObj.append(advObjToAdd)
else :
raise Exception()
except :
raise self.InvalidAdvData('Invalid Apple manufacturer data')
# ----------------------------------------------------------------------------
def _getAdvObjForAppleCompanyData(self, data) :
appleType = data[0]
dataLen = data[1]
data = data[2:]
if appleType == self.APPLE_TYPE_IBEACON :
return self.AppleIBeacon( data[:16],
unpack('>H', data[16:18])[0],
unpack('>H', data[18:20])[0],
data[20] - 256 )
elif appleType == self.APPLE_TYPE_AIRDROP :
return self.AppleService('AirDrop', data)
elif appleType == self.APPLE_TYPE_AIRPODS :
return self.AppleService('AirPods', data)
elif appleType == self.APPLE_TYPE_AIRPLAY_DEST :
return self.AppleService('AirPlay Destination', data)
elif appleType == self.APPLE_TYPE_AIRPLAY_SRC :
return self.AppleService('AirPlay Source', data)
elif appleType == self.APPLE_TYPE_HANDOFF :
return self.AppleService('HandOff', data)
elif appleType == self.APPLE_TYPE_NEARBY :
return self.AppleService('Nearby', data)
return self.AppleService()
# ----------------------------------------------------------------------------
def _getAdvObjForGoogleEddyStoneData(self, data) :
frameType = data[0]
if frameType == 0x00 :
txPower = unpack('<b', bytes([data[1]]))[0]
namespace = data[2:12]
instance = data[12:18]
return self.EddyStoneUID(txPower, namespace, instance)
elif frameType == 0x10 :
txPower = unpack('<b', bytes([data[1]]))[0]
url = self._decodeURIBeacon(data[2:])
return self.EddyStoneURL(txPower, url)
elif frameType == 0x20 :
version = data[1]
if version == 0x00 :
vbatt = unpack('>H', data[2:4])[0]
temp = BLEAdvReader._accum88(data[4:6])
advCnt = unpack('>I', data[6:10])[0]
secCnt = unpack('>I', data[10:14])[0]
return self.EddyStoneTLMUnencrypted(vbatt, temp, advCnt, secCnt)
elif version == 0x01 :
etlm = data[2:14]
salt = unpack('>H', data[14:16])[0]
mic = unpack('>H', data[16:18])[0]
return self.EddyStoneTLMEncrypted(etlm, salt, mic)
elif frameType == 0x30 :
txPower = unpack('<b', bytes([data[1]]))[0]
encryptedID = data[2:10]
return self.EddyStoneEID(txPower, encryptedID)
return None
# ----------------------------------------------------------------------------
def GetDataByDataType(self, dataType) :
return self._advData.get(dataType)
# ----------------------------------------------------------------------------
def GetAllElements(self) :
return self._advObj
# ----------------------------------------------------------------------------
def GetElementByClass(self, elementType) :
for advObj in self._advObj :
if isinstance(advObj, elementType) :
return advObj
return None
# ============================================================================
# ===( Class Flags )==========================================================
# ============================================================================
class Flags :
FLAG_LE_LIMITED_DISC_MODE = 0x01
FLAG_LE_GENERAL_DISC_MODE = 0x02
FLAG_BR_EDR_NOT_SUPPORTED = 0x04
FLAG_LE_BR_EDR_CONTROLLER = 0x08
FLAG_LE_BR_EDR_HOST = 0x10
FLAGS_LE_ONLY_LIMITED_DISC_MODE = 0x01 | 0x04
FLAGS_LE_ONLY_GENERAL_DISC_MODE = 0x02 | 0x04
def __init__(self, flags=0x00) :
self._flags = flags
def __str__(self) :
return '{0:08b}'.format(self._flags)
@property
def LE_LIMITED_DISC_MODE(self) :
return bool(self._flags & self.FLAG_LE_LIMITED_DISC_MODE)
@property
def LE_GENERAL_DISC_MODE(self) :
return bool(self._flags & self.FLAG_LE_GENERAL_DISC_MODE)
@property
def BR_EDR_NOT_SUPPORTED(self) :
return bool(self._flags & self.FLAG_BR_EDR_NOT_SUPPORTED)
@property
def LE_BR_EDR_CONTROLLER(self) :
return bool(self._flags & self.FLAG_LE_BR_EDR_CONTROLLER)
@property
def LE_BR_EDR_HOST(self) :
return bool(self._flags & self.FLAG_LE_BR_EDR_HOST)
@property
def LE_ONLY_LIMITED_DISC_MODE(self) :
return bool(self._flags & self.FLAGS_LE_ONLY_LIMITED_DISC_MODE)
@property
def LE_ONLY_GENERAL_DISC_MODE(self) :
return bool(self._flags & self.FLAGS_LE_ONLY_GENERAL_DISC_MODE)
# ============================================================================
# ===( Class AdoptedService16bits )===========================================
# ============================================================================
class AdoptedService16bits :
def __init__(self, svcUUID=0x0000) :
self._svcUUID = svcUUID
def __str__(self) :
return 'Adopted Service (16bits UUID=%s)' % self.StrUUID
@property
def UUID(self) :
return self._svcUUID
@property
def StrUUID(self) :
return BLEAdvReader._hex(pack('<H', self._svcUUID))
# ============================================================================
# ===( Class AdoptedService32bits )===========================================
# ============================================================================
class AdoptedService32bits :
def __init__(self, svcUUID=0x00000000) :
self._svcUUID = svcUUID
def __str__(self) :
return 'Adopted Service (32bits UUID=%s)' % self.StrUUID
@property
def UUID(self) :
return self._svcUUID
@property
def StrUUID(self) :
return BLEAdvReader._hex(pack('<I', self._svcUUID))
# ============================================================================
# ===( Class AdoptedService128bits )==========================================
# ============================================================================
class AdoptedService128bits :
def __init__(self, svcUUID=b'') :
self._svcUUID = svcUUID
def __str__(self) :
return 'Adopted Service (128bits UUID=%s)' % self.StrUUID
@property
def UUID(self) :
return self._svcUUID
@property
def StrUUID(self) :
return BLEAdvReader._128bitsUUID(self._svcUUID)
# ============================================================================
# ===( Class ShortName )======================================================
# ============================================================================
class ShortName :
def __init__(self, shortName='') :
self._shortName = shortName
def __str__(self) :
return self._shortName
# ============================================================================
# ===( Class CompleteName )===================================================
# ============================================================================
class CompleteName :
def __init__(self, completeName='') :
self._completeName = completeName
def __str__(self) :
return self._completeName
# ============================================================================
# ===( Class TXPowerLevel )===================================================
# ============================================================================
class TXPowerLevel :
def __init__(self, txPowerLvl=0) :
self._txPowerLvl= txPowerLvl
def __str__(self) :
return '%sdBm' % self._txPowerLvl
def GetProximityByLogTX(self, rssi, n_PathLossExp=2) :
return BLEAdvReader.ProximityHelper. \
LogTX(rssi, self._txPowerLvl, n_PathLossExp)
def GetProximityByOldBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
OldBconTX(rssi, self._txPowerLvl)
def GetProximityByNewBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
NewBconTX(rssi, self._txPowerLvl)
@property
def DBM(self) :
return self._txPowerLvl
# ============================================================================
# ===( Class ServiceData )====================================================
# ============================================================================
class ServiceData :
def __init__(self, uuid16bits=0x0000, data=b'') :
self._uuid16bits = uuid16bits
self._data = data
def __str__(self) :
return 'Service data of UUID %s (%s)' % ( self.StrUUID,
BLEAdvReader._hex(self._data) )
@property
def UUID(self) :
return self._uuid16bits
@property
def StrUUID(self) :
return BLEAdvReader._hex(pack('<H', self._uuid16bits))
@property
def Data(self) :
return self._data
# ============================================================================
# ===( Class ManufacturerData )===============================================
# ============================================================================
class ManufacturerData :
def __init__(self, companyID=0x0000, data=b'') :
self._companyID = companyID
self._data = data
def __str__(self) :
return 'Manufacturer data from company %s (%s)' % ( self.StrCompanyID,
BLEAdvReader._hex(self._data) )
@property
def CompanyID(self) :
return self._companyID
@property
def StrCompanyID(self) :
return BLEAdvReader._hex(pack('<H', self._companyID))
@property
def Data(self) :
return self._data
# ============================================================================
# ===( Class AppleService )===================================================
# ============================================================================
class AppleService :
def __init__(self, typeName='', data=b'') :
self._typeName = typeName
self._data = data
def __str__(self) :
if self._typeName :
return 'Apple Service %s (%s)' % ( self._typeName,
BLEAdvReader._hex(self._data) )
return 'Unknown Apple Service'
@property
def TypeName(self) :
return self._typeName
@property
def Data(self) :
return self._data
# ============================================================================
# ===( Class AppleIBeacon )===================================================
# ============================================================================
class AppleIBeacon :
def __init__(self, uuid=None, major=0, minor=0, txPower=-1) :
self._uuid = uuid
self._major = major
self._minor = minor
self._txPower = txPower
def __str__(self) :
return 'Apple iBeacon %s, %s.%s, %sdBm' % ( self.StrUUID,
self._major,
self._minor,
self._txPower )
def GetProximityByLogTX(self, rssi, n_PathLossExp=2) :
return BLEAdvReader.ProximityHelper. \
LogTX(rssi, self._txPower, n_PathLossExp)
def GetProximityByOldBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
OldBconTX(rssi, self._txPower)
def GetProximityByNewBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
NewBconTX(rssi, self._txPower)
@property
def UUID(self) :
return self._uuid
@property
def StrUUID(self) :
return BLEAdvReader._128bitsUUID(self._uuid)
@property
def Major(self) :
return self._major
@property
def Minor(self) :
return self._minor
@property
def TxPower(self) :
return self._txPower
# ============================================================================
# ===( Class EddyStoneUID )===================================================
# ============================================================================
class EddyStoneUID :
def __init__(self, txPower=-1, namespace=b'', instance=b'') :
self._txPower = txPower
self._namespace = namespace
self._instance = instance
def __str__(self) :
return 'EddyStone UID %sdBm, %s, %s' % ( self._txPower,
BLEAdvReader._hex(self._namespace),
BLEAdvReader._hex(self._instance) )
def GetProximityByLogTX(self, rssi, n_PathLossExp=2) :
return BLEAdvReader.ProximityHelper. \
LogTX(rssi, self._txPower, n_PathLossExp)
def GetProximityByOldBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
OldBconTX(rssi, self._txPower)
def GetProximityByNewBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
NewBconTX(rssi, self._txPower)
@property
def TxPower(self) :
return self._txPower
@property
def Namespace(self) :
return self._namespace
@property
def Instance(self) :
return self._instance
# ============================================================================
# ===( Class EddyStoneURL )===================================================
# ============================================================================
class EddyStoneURL :
def __init__(self, txPower=-1, url='') :
self._txPower = txPower
self._url = url
def __str__(self) :
return 'EddyStone URL %sdBm, %s' % (self._txPower, self._url)
def GetProximityByLogTX(self, rssi, n_PathLossExp=2) :
return BLEAdvReader.ProximityHelper. \
LogTX(rssi, self._txPower, n_PathLossExp)
def GetProximityByOldBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
OldBconTX(rssi, self._txPower)
def GetProximityByNewBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
NewBconTX(rssi, self._txPower)
@property
def TxPower(self) :
return self._txPower
@property
def URL(self) :
return self._url
# ============================================================================
# ===( Class EddyStoneTLMUnencrypted )==========================================
# ============================================================================
class EddyStoneTLMUnencrypted :
def __init__(self, vbatt=0, temp=0, advCnt=0, secCnt=0) :
self._vbatt = vbatt
self._temp = temp
self._advCnt = advCnt
self._secCnt = secCnt
def __str__(self) :
return 'EddyStone TLM Unencrypted %smV, %s°C, %s, %s' % ( self._vbatt,
self._temp,
self._advCnt,
self._secCnt )
@property
def VBatt(self) :
return self._vbatt
@property
def Temp(self) :
return self._temp
@property
def AdvCnt(self) :
return self._advCnt
@property
def SecCnt(self) :
return self._secCnt
# ============================================================================
# ===( Class EddyStoneTLMEncrypted )==========================================
# ============================================================================
class EddyStoneTLMEncrypted :
def __init__(self, etlm=b'', salt=0, mic=0) :
self._etlm = etlm
self._salt = salt
self._mic = mic
def __str__(self) :
return 'EddyStone TLM Encrypted %s, %s, %s' % ( BLEAdvReader._hex(self._etlm),
self._salt,
self._mic )
@property
def ETLM(self) :
return self._etlm
@property
def SALT(self) :
return self._salt
@property
def MIC(self) :
return self._mic
# ============================================================================
# ===( Class EddyStoneEID )===================================================
# ============================================================================
class EddyStoneEID :
def __init__(self, txPower=-1, encryptedID=b'') :
self._txPower = txPower
self._encryptedID = encryptedID
def __str__(self) :
return 'EddyStone EID %sdBm, %s' % ( self._txPower,
BLEAdvReader._hex(self._encryptedID) )
def GetProximityByLogTX(self, rssi, n_PathLossExp=2) :
return BLEAdvReader.ProximityHelper. \
LogTX(rssi, self._txPower, n_PathLossExp)
def GetProximityByOldBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
OldBconTX(rssi, self._txPower)
def GetProximityByNewBconTX(self, rssi) :
return BLEAdvReader.ProximityHelper. \
NewBconTX(rssi, self._txPower)
@property
def TxPower(self) :
return self._txPower
@property
def EncryptedID(self) :
return self._encryptedID
# ============================================================================
# ===( Class ProximityHelper )================================================
# ============================================================================
class ProximityHelper :
@staticmethod
def _txFormula(A, B, C, r, t) :
return A * ( (r/t) ** B ) + C
@staticmethod
def LogTX(rssi, rssiTX, n_PathLossExp=2) :
return 10.0 ** ( (rssi-rssiTX) / (-10*n_PathLossExp) )
@staticmethod
def OldBconTX(rssi, rssiTX) :
return BLEAdvReader.ProximityHelper. \
_txFormula(0.89976, 7.7095, 0.111, rssi, rssiTX)
@staticmethod
def NewBconTX(rssi, rssiTX) :
return BLEAdvReader.ProximityHelper. \
_txFormula(0.42093, 6.9476, 0.54992, rssi, rssiTX)
# ============================================================================
# ============================================================================
# ============================================================================
```
|
{
"source": "jczic/IoTSocket-Concentrator",
"score": 2
}
|
#### File: jczic/IoTSocket-Concentrator/iotSocketRouter.py
```python
from iotSocketStruct import IoTSocketStruct
from secrets import randbelow, token_bytes
from _thread import allocate_lock
from binascii import hexlify, unhexlify
from threading import Timer
from time import time
from datetime import datetime
import hmac
import hashlib
import json
class IoTSocketRouter :
def __init__(self, aclFilename, centralAuthKey, keepSessionSec) :
self._aclFilename = aclFilename
self._centralAuthKey = centralAuthKey
self._centralAuthKeyHex = hexlify(centralAuthKey).decode()
self._keepSessionSec = keepSessionSec
self._centralSession = None
self._groups = { }
self._acl = { }
self._objectsSessions = { }
self._keepSessionsData = { }
self._centralHTTPRequests = { }
self._telemetryTokens = { }
self._onGetWebHookRequest = None
self._onGetWebHookTelemetry = None
self._lock = allocate_lock()
self._processing = True
self._startTimerCheck()
self.Log('ROUTER > STARTED')
def _startTimerCheck(self) :
Timer(1, self._timerCheckSeconds).start()
def _timerCheckSeconds(self) :
nowSec = time()
with self._lock :
if self._keepSessionsData :
for uid in list(self._keepSessionsData) :
if nowSec >= self._keepSessionsData[uid][1] :
del self._keepSessionsData[uid]
if self._centralHTTPRequests :
for trackingNbr in list(self._centralHTTPRequests) :
httpReq, exp = self._centralHTTPRequests[trackingNbr]
if exp and nowSec >= exp :
del self._centralHTTPRequests[trackingNbr]
httpReq.SendResponseErrTimeout()
self.Log('HTTPS REQUEST TIMEOUT (#%s)' % trackingNbr)
if self._telemetryTokens :
for token in list(self._telemetryTokens) :
uid, exp = self._telemetryTokens[token]
if exp and nowSec >= exp :
del self._telemetryTokens[token]
self.Log( 'TELEMETRY TOKEN EXPIRED (%s)' %
self.TelemetryTokenToStr(token) )
for uid in self._objectsSessions :
self._objectsSessions[uid].CheckRequestsTimeout(nowSec)
if self._processing :
self._startTimerCheck()
def Stop(self) :
self._processing = False
def Log(self, line) :
dt = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
print('[%s] %s' % (dt, str(line)))
def AddGroup(self, groupName, options={ }) :
if groupName and type(options) is dict :
groupID = IoTSocketStruct.GroupNameToBin128(groupName)
if groupID :
self._groups[groupID] = options
return True
return False
def GetGroupOption(self, groupID, optName) :
options = self._groups.get(groupID, None)
if options :
return options.get(optName, None)
return None
def ClearACL(self) :
with self._lock :
self._acl.clear()
def AddACLAccess(self, groupID, uid, authKey) :
with self._lock :
if groupID in self._groups :
self._acl[uid] = (groupID, authKey)
return True
return False
def SaveACL(self) :
try :
o = { }
with self._lock :
for uid in self._acl :
o[IoTSocketStruct.UIDFromBin128(uid)] = {
"GroupName" : IoTSocketStruct.GroupNameFromBin128(self._acl[uid][0]),
"AuthKey" : hexlify(self._acl[uid][1]).decode()
}
with open(self._aclFilename, 'wb') as file :
file.write(json.dumps(o).encode('UTF-8'))
return True
except :
return False
def LoadACL(self) :
try :
with open(self._aclFilename, 'r') as file :
o = json.load(file)
acl = { }
for strUID in o :
uid = IoTSocketStruct.UIDToBin128(strUID)
groupID = IoTSocketStruct.GroupNameToBin128(o[strUID]["GroupName"])
authKey = unhexlify(o[strUID]["AuthKey"])
if not uid or not groupID or len(authKey) != 16 or \
not groupID in self._groups :
return False
acl[uid] = (groupID, authKey)
with self._lock :
self._acl = acl
return True
except :
return False
def GetACLAccess(self, uid) :
return self._acl.get(uid, (None, None))
def CheckCentralAuthKey(self, authKey) :
return (authKey == self._centralAuthKey)
def AuthenticateSession(self, session, token128, hmac256) :
if session.UID == IoTSocketStruct.CENTRAL_EMPTY_UID :
central = True
authKey = self._centralAuthKey
else :
groupID, authKey = self.GetACLAccess(session.UID)
central = False
if authKey :
hmac256srv = hmac.new(authKey, token128, hashlib.sha256).digest()
if hmac.compare_digest(hmac256, hmac256srv) :
if central :
if self._centralSession :
self._centralSession.Close()
self._centralSession = session
else :
existingSession = self._objectsSessions.get(session.UID, None)
if existingSession :
existingSession.Close()
self._objectsSessions[session.UID] = session
session.Send(IoTSocketStruct.MakeAuthValidation(True))
with self._lock :
sessionData, exp = self._keepSessionsData.get(session.UID, (None, None))
if sessionData is not None :
for data in sessionData :
session.Send(data)
del self._keepSessionsData[session.UID]
return True
session.Send(IoTSocketStruct.MakeAuthValidation(False))
session.Close()
return False
def RemoveSession(self, session, keepSessionData) :
with self._lock :
removed = False
if session.UID == IoTSocketStruct.CENTRAL_EMPTY_UID :
if session == self._centralSession :
self._centralSession = None
removed = True
elif session.UID in self._objectsSessions :
if session == self._objectsSessions[session.UID] :
del self._objectsSessions[session.UID]
removed = True
if removed and keepSessionData :
exp = time() + self._keepSessionSec
self._keepSessionsData[session.UID] = ([ ], exp)
def CentralSessionExists(self) :
return ( self._centralSession is not None or \
IoTSocketStruct.CENTRAL_EMPTY_UID in self._keepSessionsData )
def AddCentralHTTPRequest(self, httpReq, exp) :
with self._lock :
while True :
trackingNbr = randbelow(2**16)
if not trackingNbr in self._centralHTTPRequests :
self._centralHTTPRequests[trackingNbr] = (httpReq, exp)
break
return trackingNbr
def RemoveCentralHTTPRequest(self, httpReq) :
with self._lock :
if httpReq.TrackingNbr in self._centralHTTPRequests :
if httpReq == self._centralHTTPRequests[httpReq.TrackingNbr][0] :
del self._centralHTTPRequests[httpReq.TrackingNbr]
def GetNewTelemetryToken(self, uid, expirationMin) :
with self._lock :
while True :
token = token_bytes(8)
if not token in self._telemetryTokens :
if isinstance(expirationMin, int) and expirationMin > 0 :
exp = time() + (expirationMin * 60)
else :
exp = None
self._telemetryTokens[token] = (uid, exp)
break
self.Log( 'NEW TELEMETRY TOKEN FOR {%s} EXPIRING IN %s MIN (%s)' %
( IoTSocketStruct.UIDFromBin128(uid),
expirationMin,
self.TelemetryTokenToStr(token) ) )
return token
def TelemetryTokenToStr(self, token) :
if isinstance(token, bytes) and len(token) == 8 :
return hexlify(token).decode().upper()
return 'TOKEN-ERROR'
def RouteRequest(self, fromUID, toUID, trackingNbr, dataFormat, formatOpt, data) :
if toUID or self.CentralSessionExists() :
if toUID :
session = self._objectsSessions.get(toUID, None)
else :
session = self._centralSession
data = IoTSocketStruct.MakeRequestTRHdr( fromUID,
trackingNbr,
dataFormat,
formatOpt,
len(data) ) \
+ data
if session and session.Send(data) :
return True
if not toUID :
toUID = IoTSocketStruct.CENTRAL_EMPTY_UID
sessionData, exp = self._keepSessionsData.get(toUID, (None, None))
if sessionData is not None :
sessionData.append(data)
self.Log('ROUTER > REQUEST KEPT (#%s)' % trackingNbr)
return True
else :
if self._onGetWebHookRequest :
plFormat, plObject = IoTSocketStruct.DecodeJSONPayload(data, dataFormat)
if plFormat is not None and plObject is not None :
webHook = self._onGetWebHookRequest(self)
if webHook :
webHook.ObjRef = (fromUID, trackingNbr)
webHook.OnResponseOk = self._onWebHookResponseOk
webHook.OnClosed = self._onWebHookClosed
webHook.Post(self._centralAuthKeyHex, fromUID, plObject, plFormat)
return True
self.Log('ROUTER > ERROR TO OPEN WEBHOOK OF REQUEST')
self.Log('ROUTER > NO DESTINATION FOR REQUEST (#%s)' % trackingNbr)
return False
def _onWebHookResponseOk(self, centralHTTPWebHook, o) :
if o :
uid, trackingNbr = centralHTTPWebHook.ObjRef
try :
code = int(o['Code'])
fmt, data = IoTSocketStruct.EncodeJSONPayload(o['Payload'], o['Format'])
if fmt is not None and data is not None :
centralHTTPWebHook.ObjRef = (None, None)
session = self._objectsSessions.get(uid, None)
if session :
session.EndTrackingRequest(trackingNbr)
data = IoTSocketStruct.MakeResponseTRHdr( None,
trackingNbr,
code,
fmt,
IoTSocketStruct.PLDATA_FMT_OPT_NONE,
len(data) ) \
+ data
session.Send(data)
except :
pass
def _onWebHookClosed(self, centralHTTPWebHook) :
uid, trackingNbr = centralHTTPWebHook.ObjRef
if uid and trackingNbr :
session = self._objectsSessions.get(uid, None)
if session :
session.EndTrackingRequest(trackingNbr)
data = IoTSocketStruct.MakeResponseErrTR( None,
trackingNbr,
IoTSocketStruct.RESP_CODE_REQ_NOK )
session.Send(data)
def RouteResponse(self, fromUID, toUID, trackingNbr, code, dataFormat, formatOpt, data) :
if toUID or self.CentralSessionExists() :
if toUID :
session = self._objectsSessions.get(toUID, None)
else :
session = self._centralSession
if session :
session.EndTrackingRequest(trackingNbr)
data = IoTSocketStruct.MakeResponseTRHdr( fromUID,
trackingNbr,
code,
dataFormat,
formatOpt,
len(data) ) \
+ data
return session.Send(data)
else :
httpReq, exp = self._centralHTTPRequests.get(trackingNbr, (None, None))
if httpReq :
plFormat, plObject = IoTSocketStruct.DecodeJSONPayload(data, dataFormat)
if plFormat is not None and plObject is not None :
self.RemoveCentralHTTPRequest(httpReq)
return httpReq.SendResponse(code, plObject, plFormat)
self.Log('ROUTER > NO DESTINATION FOR RESPONSE (#%s)' % trackingNbr)
return False
def RouteTelemetry(self, token, dataFormat, formatOpt, data) :
if token and data :
uid, exp = self._telemetryTokens.get(token, (None, None))
if uid :
self.Log( 'ROUTER > TELEMETRY RECEIVED FROM {%s} WITH TOKEN %s' %
( IoTSocketStruct.UIDFromBin128(uid),
self.TelemetryTokenToStr(token) ) )
if self.CentralSessionExists() :
session = self._centralSession
if session :
data = IoTSocketStruct.MakeIdentTelemetryTRHdr( uid,
dataFormat,
formatOpt,
len(data) ) \
+ data
if session.Send(data) :
return True
elif self._onGetWebHookTelemetry :
plFormat, plObject = IoTSocketStruct.DecodeJSONPayload(data, dataFormat)
if plFormat is not None and plObject is not None :
webHook = self._onGetWebHookTelemetry(self)
if webHook :
webHook.Post(self._centralAuthKeyHex, uid, plObject, plFormat)
return True
self.Log('ROUTER > ERROR TO OPEN WEBHOOK OF TELEMETRY')
self.Log('ROUTER > NO DESTINATION FOR TELEMETRY')
return False
@property
def OnGetWebHookRequest(self) :
return self._onGetWebHookRequest
@OnGetWebHookRequest.setter
def OnGetWebHookRequest(self, value) :
self._onGetWebHookRequest = value
@property
def OnGetWebHookTelemetry(self) :
return self._onGetWebHookTelemetry
@OnGetWebHookTelemetry.setter
def OnGetWebHookTelemetry(self, value) :
self._onGetWebHookTelemetry = value
```
#### File: jczic/IoTSocket-Concentrator/iotSocketSession.py
```python
from iotSocketStruct import IoTSocketStruct
from XAsyncSockets import XClosedReason
from struct import unpack
from secrets import token_bytes
from _thread import allocate_lock
from time import time
class IoTSocketSession :
IOTSOCKET_VER = 0x01
RECV_TIMEOUT = 10
def __init__(self, xAsyncTCPClient, router, sslContext, reqTimeout) :
self._xasTCPCli = xAsyncTCPClient
self._router = router
self._sslContext = sslContext
self._reqTimeout = reqTimeout
self._uid = None
self._telemetryToken = None
self._authenticated = False
self._isCentral = False
self._strUID = None
self._groupID = None
self._closedCode = None
self._requests = { }
self._requestsLock = allocate_lock()
self._xasTCPCli.OnClosed = self._onTCPConnClosed
self._waitInitiationReq()
def Close(self) :
self._xasTCPCli.Close()
def Send(self, data, onDataSent=None, onDataSentArg=None) :
return self._xasTCPCli.AsyncSendData(data, onDataSent, onDataSentArg)
def _recv(self, size, onDataRecv, onDataRecvArg=None) :
self._xasTCPCli.AsyncRecvData(size, onDataRecv, onDataRecvArg, self.RECV_TIMEOUT)
def _onTCPConnClosed(self, xAsyncTCPClient, closedReason) :
reason = {
XClosedReason.ClosedByHost : 'BY HOST',
XClosedReason.ClosedByPeer : 'BY PEER',
XClosedReason.Timeout : 'AFTER TIMEOUT'
}.get(closedReason, '(ERROR)')
if self._authenticated :
keepSessionData = self._closedCode in [ None,
IoTSocketStruct.CLOSE_CODE_SLEEP_MODE,
IoTSocketStruct.CLOSE_CODE_FLUSH_RESS ]
self._router.RemoveSession(self, keepSessionData)
self._router.Log( 'SESSION %s CLOSED %s' %
(self._getSessionName(), reason) )
else :
self._router.Log( 'CONNECTION %s REFUSED (%s)' %
(self._xasTCPCli.CliAddr[0], reason) )
def _waitInitiationReq(self) :
self._recv(4, self._onInitiationReqRecv)
def _onInitiationReqRecv(self, xAsyncTCPClient, data, arg) :
tls, ver, opt, maxTrLen = IoTSocketStruct.DecodeInitiationReq(data)
ok = ( ver == self.IOTSOCKET_VER and
( not tls or self._sslContext is not None ) )
data = IoTSocketStruct.MakeInitiationResp( ok = ok,
ruleType = IoTSocketStruct.INIT_NO_RULE )
if ok :
self.Send(data, self._onInitiationRespSent, tls)
else :
self.Send(data)
self.Close()
def _onInitiationRespSent(self, xAsyncTCPClient, arg) :
if arg :
try :
self._xasTCPCli.StartSSLContext(self._sslContext, True)
except :
self.Close()
return
self._token128 = token_bytes(16)
self.Send(self._token128)
self._recv(48, self._onChallengeRecv)
def _onChallengeRecv(self, xAsyncTCPClient, data, arg) :
self._uid = data[:16].tobytes()
hmac256 = data[16:].tobytes()
if self._router.AuthenticateSession( self,
self._token128,
hmac256 ) :
self._startSession()
else :
self.Close()
def _startSession(self) :
self._authenticated = True
self._isCentral = (self._uid == IoTSocketStruct.CENTRAL_EMPTY_UID)
if not self._isCentral :
self._strUID = IoTSocketStruct.UIDFromBin128(self._uid)
self._router.Log( 'SESSION %s STARTED FROM %s' %
(self._getSessionName(), self._xasTCPCli.CliAddr[0]) )
if not self._isCentral :
self._groupID = self._router.GetACLAccess(self._uid)[0]
if self._router.GetGroupOption(self._groupID, 'Telemetry') :
expMin = self._router.GetGroupOption(self._groupID, 'TelemetryTokenExpMin')
self._telemetryToken = self._router.GetNewTelemetryToken(self._uid, expMin)
tr = IoTSocketStruct.MakeTelemetryTokenTR(self._telemetryToken)
self.Send(tr)
self._waitDataTransmission()
def _waitDataTransmission(self) :
self._xasTCPCli.AsyncRecvData(1, self._onDataTransmissionRecv)
def _onDataTransmissionRecv(self, xAsyncTCPClient, data, arg) :
if arg :
tot = arg
uid = data.tobytes()
else :
tot, rte = IoTSocketStruct.DecodeDataTRHdr(data)
if rte :
self._recv(16, self._onDataTransmissionRecv, tot)
return
uid = None
if tot == IoTSocketStruct.TOT_ACL and self._isCentral :
self._recv(4, self._onACLItemsCountRecv)
elif tot == IoTSocketStruct.TOT_PING :
self._router.Log('SESSION %s > PING RECEIVED' % self._getSessionName())
self.Send(IoTSocketStruct.MakePongTR())
self._waitDataTransmission()
elif tot == IoTSocketStruct.TOT_PONG :
self._router.Log('SESSION %s > PONG RECEIVED' % self._getSessionName())
self._waitDataTransmission()
elif tot == IoTSocketStruct.TOT_REQUEST :
self._recv(5, self._onRequestRecv, (uid, ))
elif tot == IoTSocketStruct.TOT_RESPONSE :
self._recv(6, self._onResponseRecv, (uid, ))
elif tot == IoTSocketStruct.TOT_CLOSE_CONN :
self._recv(1, self._onCloseConnCodeRecv)
else :
self.Send(IoTSocketStruct.MakeCloseConnTR(IoTSocketStruct.CLOSE_CODE_PROTO_ERR))
self.Close()
def _onACLItemsCountRecv(self, xAsyncTCPClient, data, arg) :
count = unpack('>I', data)[0]
self._router.Log( 'SESSION %s > %s ACL SETUP RECEIVED' %
(self._getSessionName(), count) )
self._router.ClearACL()
if count > 0 :
self._recv(48, self._onACLItemRecv, count)
else :
self._waitDataTransmission()
def _onACLItemRecv(self, xAsyncTCPClient, data, arg) :
groupID, uid, authKey = IoTSocketStruct.DecodeACLItem(data.tobytes())
self._router.AddACLAccess(groupID, uid, authKey)
if arg > 1 :
self._recv(48, self._onACLItemRecv, arg-1)
else :
self._router.SaveACL()
self._waitDataTransmission()
def _onRequestRecv(self, xAsyncTCPClient, data, arg) :
uid = arg[0]
if len(arg) == 2 :
trackingNbr, dataFormat, formatOpt, dataLen = arg[1]
data = data.tobytes()
else :
hdr = IoTSocketStruct.DecodeRequestHdr(data.tobytes())
trackingNbr, dataFormat, formatOpt, dataLen = hdr
if dataLen > 0 :
self._recv(dataLen, self._onRequestRecv, (uid, hdr))
return
data = b''
if uid :
strUID = ('{%s}' % IoTSocketStruct.UIDFromBin128(uid))
else :
strUID = 'CENTRAL'
errCode = None
with self._requestsLock :
self._router.Log( 'SESSION %s > REQUEST TO %s RECEIVED (#%s)' %
(self._getSessionName(), strUID, trackingNbr) )
if not trackingNbr in self._requests :
if self._router.RouteRequest( fromUID = None if self._isCentral else self._uid,
toUID = uid,
trackingNbr = trackingNbr,
dataFormat = dataFormat,
formatOpt = formatOpt,
data = data ) :
exp = time() + self._reqTimeout
self._requests[trackingNbr] = (uid, exp)
else :
errCode = IoTSocketStruct.RESP_CODE_ERR_NO_DEST
else :
self._router.Log( 'SESSION %s > TRACKING NUMBER #%s ALREADY EXISTS' %
(self._getSessionName(), trackingNbr) )
errCode = IoTSocketStruct.RESP_CODE_ERR_SAME_TRK_NBR
if errCode :
self.Send(IoTSocketStruct.MakeResponseErrTR(uid, trackingNbr, errCode))
self._waitDataTransmission()
def _onResponseRecv(self, xAsyncTCPClient, data, arg) :
uid = arg[0]
if len(arg) == 2 :
trackingNbr, code, dataFormat, formatOpt, dataLen = arg[1]
data = data.tobytes()
else :
hdr = IoTSocketStruct.DecodeResponseHdr(data.tobytes())
trackingNbr, code, dataFormat, formatOpt, dataLen = hdr
if dataLen > 0 :
self._recv(dataLen, self._onResponseRecv, (uid, hdr))
return
data = b''
if uid :
strUID = ('{%s}' % IoTSocketStruct.UIDFromBin128(uid))
else :
strUID = 'CENTRAL'
self._router.Log( 'SESSION %s > RESPONSE TO %s RECEIVED (#%s)' %
(self._getSessionName(), strUID, trackingNbr) )
self._router.RouteResponse( fromUID = None if self._isCentral else self._uid,
toUID = uid,
trackingNbr = trackingNbr,
code = code,
dataFormat = dataFormat,
formatOpt = formatOpt,
data = data )
self._waitDataTransmission()
def _onCloseConnCodeRecv(self, xAsyncTCPClient, data, arg) :
self._router.Log('SESSION %s > CLOSE CONNECTION CODE RECEIVED' % self._getSessionName())
self._closedCode = data[0]
self.Close()
def _getSessionName(self) :
if self._isCentral :
return 'CENTRAL'
else :
return ('{%s}' % self._strUID)
def EndTrackingRequest(self, trackingNbr) :
with self._requestsLock :
if trackingNbr in self._requests :
del self._requests[trackingNbr]
def CheckRequestsTimeout(self, nowSec) :
if self._requests :
with self._requestsLock :
for trackingNbr in list(self._requests) :
uid, exp = self._requests[trackingNbr]
if nowSec >= exp :
del self._requests[trackingNbr]
self.Send( IoTSocketStruct.MakeResponseErrTR( uid,
trackingNbr,
IoTSocketStruct.RESP_CODE_ERR_TIMEOUT ) )
self._router.Log( 'SESSION %s > REQUEST TIMEOUT (#%s)' %
(self._getSessionName(), trackingNbr) )
@property
def UID(self) :
return self._uid
@property
def IsCentral(self) :
return self._isCentral
@property
def TelemetryToken(self) :
return self._telemetryToken
```
|
{
"source": "jczic/MicroNN",
"score": 3
}
|
#### File: jczic/MicroNN/test colors.py
```python
colors.py
# -*- coding: utf-8 -*-
from microNN import MicroNN
from tkinter import *
from PIL import Image, ImageDraw, ImageTk
from random import random
# ----------------------------------------------------------------
etaLR = 0.5 # Learning rate
width = 800 # Window/Canvas width
height = 500 # Window/Canvas height
examples = [ ]
# ----------------------------------------------------------------
def rgb2hex(rgb):
return '#%02x%02x%02x' % rgb
# ----------------------------------------------------------------
def addExample(x, y) :
col = ( round(random()*255),
round(random()*255),
round(random()*255) )
examples.append( ( [x/width, y/height], [col] ) )
can.create_oval( x-15, y-15, x+15, y+15,
fill = rgb2hex(col),
width = 0 )
# ----------------------------------------------------------------
def process() :
global photoBuffer
if len(examples) :
for i in range(10) :
for ex in examples :
microNN.Learn(ex[0], ex[1])
for i in range(70) :
x = random()
y = random()
col = microNN.Predict([x, y])[0]
x *= width
y *= height
r = round(random()*6) + 1
drawBuffer.ellipse((x-r, y-r, x+r, y+r), fill=col)
photoBuffer = ImageTk.PhotoImage(imgBuffer)
can.create_image(0, 0, anchor=NW, image=photoBuffer)
mainWindow.after(10, process)
# ----------------------------------------------------------------
def onCanvasClick(evt) :
addExample(evt.x, evt.y)
# ----------------------------------------------------------------
microNN = MicroNN()
microNN.LearningRate = etaLR
microNN.AddInputLayer ( dimensions = MicroNN.Init1D(2),
shape = MicroNN.Shape.Neuron )
microNN.AddLayer ( dimensions = MicroNN.Init1D(10),
shape = MicroNN.Shape.Neuron,
activation = MicroNN.Activation.Gaussian,
initializer = MicroNN.LogisticInitializer(MicroNN.Initializer.HeUniform),
connStruct = MicroNN.FullyConnected )
microNN.AddLayer ( dimensions = MicroNN.Init1D(5),
shape = MicroNN.Shape.Neuron,
activation = MicroNN.Activation.Sigmoid,
initializer = MicroNN.LogisticInitializer(MicroNN.Initializer.XavierUniform),
connStruct = MicroNN.FullyConnected )
microNN.AddLayer ( dimensions = MicroNN.Init1D(5),
shape = MicroNN.Shape.Neuron,
activation = MicroNN.Activation.Sigmoid,
initializer = MicroNN.LogisticInitializer(MicroNN.Initializer.XavierUniform),
connStruct = MicroNN.FullyConnected )
microNN.AddLayer ( dimensions = MicroNN.Init1D(1),
shape = MicroNN.Shape.Color,
activation = MicroNN.Activation.Sigmoid,
initializer = MicroNN.LogisticInitializer(MicroNN.Initializer.XavierUniform),
connStruct = MicroNN.FullyConnected )
microNN.InitWeights()
mainWindow = Tk()
mainWindow.title('microNN - test colors')
mainWindow.geometry('%sx%s' % (width, height))
mainWindow.resizable(False, False)
can = Canvas( mainWindow,
width = width,
height = height,
bg = 'white',
borderwidth = 0 )
can.bind('<Button-1>', onCanvasClick)
can.pack()
imgBuffer = Image.new('RGB', (width, height), (255, 255, 255))
drawBuffer = ImageDraw.Draw(imgBuffer)
photoBuffer = None
process()
mainWindow.mainloop()
```
|
{
"source": "jczic/MicroWifi",
"score": 2
}
|
#### File: jczic/MicroWifi/microWifi.py
```python
from network import WLAN
from socket import getaddrinfo
from time import sleep, ticks_ms, ticks_diff
from binascii import hexlify
from os import mkdir
from json import load, dumps
class MicroWifi :
# ============================================================================
# ===( Constants )============================================================
# ============================================================================
_ETH_AP = 1
_ETH_STA = 0
_IP_NONE = '0.0.0.0'
_DEFAULT_AUTH_TYPE = WLAN.WPA2
_AP_MASK = '255.255.255.0'
_DEFAULT_TIMEOUT_SEC = 10
# ============================================================================
# ===( Utils )===============================================================
# ============================================================================
@staticmethod
def _mac2Str(binMac) :
return hexlify(binMac, ':').decode().upper()
# ----------------------------------------------------------------------------
def _setAPInfos(self, ssid=None, key=None, ip=None, mask=None, gateway=None, dns=None) :
self._apInfos = {
'ssid' : ssid,
'key' : key,
'ip' : ip,
'mask' : mask,
'gateway' : gateway,
'dns' : dns
}
# ----------------------------------------------------------------------------
def _setConnectionInfos(self, bssid=None, ssid=None, key=None, ip=None, mask=None, gateway=None, dns=None) :
self._connInfos = {
'macBssid' : bssid,
'ssid' : ssid,
'key' : key,
'ip' : ip,
'mask' : mask,
'gateway' : gateway,
'dns' : dns
}
# ----------------------------------------------------------------------------
def _openConf(self) :
try :
with open(self._filePath, 'r') as jsonFile :
self._confObj = load(jsonFile)
except :
self._confObj = { }
if self._confObj.get('STA', None) is None :
self._confObj['STA'] = { }
# ----------------------------------------------------------------------------
def _writeConf(self) :
try :
jsonStr = dumps(self._confObj)
try :
mkdir(self._confPath)
except :
pass
jsonFile = open(self._filePath, 'wb')
jsonFile.write(jsonStr)
jsonFile.close()
return True
except :
return False
# ============================================================================
# ===( Constructor )==========================================================
# ============================================================================
def __init__(self, confName="wifi", confPath="/flash/conf", useExtAntenna=False) :
self._confPath = confPath
self._filePath = '%s/%s.json' % (confPath, confName)
self._wlan = WLAN()
self._antenna = WLAN.EXT_ANT if useExtAntenna else WLAN.INT_ANT
self._openConf()
self._setAPInfos()
self._setConnectionInfos()
self._wlan.init(antenna=self._antenna)
self.DisableRadio()
# ============================================================================
# ===( Functions )============================================================
# ============================================================================
def DisableRadio(self) :
self.CloseAccessPoint()
self.CloseConnectionToAP()
self._wlan.deinit()
# ----------------------------------------------------------------------------
def GetMACAddr(self) :
return self._mac2Str(self._wlan.mac())
# ----------------------------------------------------------------------------
def GetAPInfos(self) :
if not self.IsAccessPointOpened() :
self._setAPInfos()
return self._apInfos
# ----------------------------------------------------------------------------
def GetConnectionInfos(self) :
if not self.IsConnectedToAP() :
self._setConnectionInfos()
return self._connInfos
# ----------------------------------------------------------------------------
def ScanAP(self) :
try :
if self._wlan.mode() == WLAN.STA :
self._wlan.init(antenna=self._antenna)
return self._wlan.scan()
except :
return ()
# ----------------------------------------------------------------------------
def OpenAccessPoint(self, ssid, key=None, ip='192.168.0.254', autoSave=True) :
if ssid and ip :
try :
self._wlan.ifconfig( id = self._ETH_AP,
config = (ip, self._AP_MASK, ip, ip) )
auth = (self._DEFAULT_AUTH_TYPE, key) if key else None
self._wlan.init( mode = WLAN.STA_AP,
ssid = ssid,
auth = auth,
antenna = self._antenna )
print("WIFI ACCESS POINT OPENED :")
print(" - MAC address : %s" % self.GetMACAddr())
print(" - Network SSID : %s" % ssid)
print(" - IP address : %s" % ip)
print(" - Mask : %s" % self._AP_MASK)
print(" - Gateway IP : %s" % ip)
print(" - DNS server : %s" % ip)
if autoSave :
self._confObj['AP'] = {
'ssid' : ssid,
'key' : key,
'ip' : ip
}
self._writeConf()
self._setAPInfos(ssid, key, ip, self._AP_MASK, ip, ip)
return True
except :
self.CloseAccessPoint()
return False
# ----------------------------------------------------------------------------
def OpenAccessPointFromConf(self) :
try :
ssid = self._confObj['AP']['ssid']
key = self._confObj['AP']['key']
ip = self._confObj['AP']['ip']
return self.OpenAccessPoint(ssid, key, ip, False)
except :
return False
# ----------------------------------------------------------------------------
def RemoveAccessPointFromConf(self) :
try :
del self._confObj['AP']
return self._writeConf()
except :
return False
# ----------------------------------------------------------------------------
def CloseAccessPoint(self) :
try :
ip = self._IP_NONE
self._wlan.mode(WLAN.STA)
self._wlan.ifconfig( id = self._ETH_AP,
config = (ip, ip, ip, ip) )
return True
except :
return False
# ----------------------------------------------------------------------------
def IsAccessPointOpened(self) :
return self._wlan.ifconfig(self._ETH_AP)[0] != self._IP_NONE
# ----------------------------------------------------------------------------
def ConnectToAP(self, ssid, key=None, macBssid=None, timeoutSec=None, autoSave=True) :
if ssid :
if not key :
key = ''
if not timeoutSec :
timeoutSec = self._DEFAULT_TIMEOUT_SEC
timeout = timeoutSec * 1000
if self._wlan.mode() == WLAN.STA :
self._wlan.init(antenna=self._antenna)
print("TRYING TO CONNECT WIFI TO AP %s..." % ssid)
for ap in self.ScanAP() :
if ap.ssid == ssid and \
( not macBssid or self._mac2Str(ap.bssid) == macBssid ) :
self._wlan.connect( ssid = ap.ssid,
bssid = ap.bssid,
auth = (self._DEFAULT_AUTH_TYPE, key),
timeout = timeout )
t = ticks_ms()
while ticks_diff(t, ticks_ms()) < timeout :
sleep(0.100)
if self.IsConnectedToAP() :
bssid = self._mac2Str(ap.bssid)
staCfg = self._wlan.ifconfig(id=self._ETH_STA)
ip = staCfg[0]
mask = staCfg[1]
gateway = staCfg[2]
dns = staCfg[3]
print("WIFI CONNECTED TO AP :")
print(" - MAC address : %s" % self.GetMACAddr())
print(" - Network BSSID : %s" % bssid)
print(" - Network SSID : %s" % ssid)
print(" - IP address : %s" % ip)
print(" - Mask : %s" % mask)
print(" - Gateway IP : %s" % gateway)
print(" - DNS server : %s" % dns)
if autoSave :
sta = {
'ssid' : ssid,
'key' : key,
}
self._confObj['STA'][bssid] = sta
self._writeConf()
self._setConnectionInfos(bssid, ssid, key, ip, mask, gateway, dns)
return True
self.CloseConnectionToAP()
break
print("FAILED TO CONNECT WIFI TO AP %s" % ssid)
return False
# ----------------------------------------------------------------------------
def ConnectToAPFromConf(self, bssidMustBeSame=False, timeoutSec=None) :
if self._wlan.mode() == WLAN.STA :
self._wlan.init(antenna=self._antenna)
for ap in self.ScanAP() :
for bssid in self._confObj['STA'] :
macBssid = self._mac2Str(ap.bssid) if bssidMustBeSame else None
if self._confObj['STA'][bssid]['ssid'] == ap.ssid and \
( not macBssid or bssid == macBssid ) :
if self.ConnectToAP( ap.ssid,
self._confObj['STA'][bssid]['key'],
macBssid,
timeoutSec,
False ) :
return True
break
return False
# ----------------------------------------------------------------------------
def RemoveConnectionToAPFromConf(self, ssid, macBssid=None) :
try :
changed = False
for bssid in list(self._confObj['STA']) :
if self._confObj['STA'][bssid]['ssid'] == ssid and \
( not macBssid or bssid == macBssid ) :
del self._confObj['STA'][bssid]
changed = True
if changed :
return self._writeConf()
except :
pass
return False
# ----------------------------------------------------------------------------
def CloseConnectionToAP(self) :
try :
self._wlan.disconnect()
self._wlan.ifconfig( id = self._ETH_STA,
config = 'dhcp' )
return True
except :
return False
# ----------------------------------------------------------------------------
def IsConnectedToAP(self) :
return self._wlan.ifconfig(self._ETH_STA)[0] != self._IP_NONE
# ----------------------------------------------------------------------------
def ResolveIPFromHostname(self, hostname) :
originalMode = self._wlan.mode()
if originalMode == WLAN.STA_AP :
self._wlan.mode(WLAN.STA)
try :
ipResolved = getaddrinfo(hostname, 0)[0][-1][0]
except :
ipResolved = None
if originalMode == WLAN.STA_AP :
self._wlan.mode(WLAN.STA_AP)
return ipResolved if ipResolved != self._IP_NONE else None
# ----------------------------------------------------------------------------
def InternetAccessIsPresent(self) :
return ( self.ResolveIPFromHostname('iana.org') is not None )
# ----------------------------------------------------------------------------
def WaitForInternetAccess(self, timeoutSec=None) :
if not timeoutSec :
timeoutSec = self._DEFAULT_TIMEOUT_SEC
timeout = timeoutSec * 1000
t = ticks_ms()
while ticks_diff(t, ticks_ms()) < timeout :
sleep(0.100)
if self.InternetAccessIsPresent() :
return True
return False
# ============================================================================
# ============================================================================
# ============================================================================
```
|
{
"source": "jczimmerman/flaskoop",
"score": 3
}
|
#### File: flaskoop/flaskoop/scenes.py
```python
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.exceptions import abort
bp = Blueprint('scenes', __name__)
@bp.route('/')
def index():
return render_template('base.html')
@bp.route('/death')
def death():
return render_template('scenes/death.html')
@bp.route('/tavern', methods=('GET', 'POST'))
def tavern():
current_scene = {
"title": "The Blue Moon Tavern",
"intro": """You're in the tavern. Straight ahead there is the bar.
To your right there is a strange man, staring at you. Do you:
1) Go to the bar. 2) Go talk to the strange man. 3.) Leave the tavern."""
}
if request.method == 'POST':
choice = request.form['action']
if choice == "1":
return redirect(url_for('scenes.bar'))
elif choice == "2":
return redirect(url_for('scenes.stranger'))
elif choice == "3":
return redirect(url_for('scenes.town_center'))
else:
return redirect(url_for('scenes.death'))
return render_template('scenes/index.html', scene=current_scene)
@bp.route('/bar', methods=('GET', 'POST'))
def bar():
current_scene = {
"title": "The bar in the tavern",
"intro": """You approach the bar in the tavern, the bartender sees you
coming and offers you some ale... and some advice. 'Avoid that strange
man over there!' Do you: 1) Buy an ale. 2) Go back to the tavern."""
}
if request.method == 'POST':
choice = request.form['action']
if choice == "1":
return redirect(url_for('scenes.death'))
elif choice == "2":
return redirect(url_for('scenes.tavern'))
else:
return redirect(url_for('scenes.death'))
return render_template('scenes/index.html', scene=current_scene)
@bp.route('/stranger', methods=('GET', 'POST'))
def stranger():
current_scene = {
"title": "Approaching the stranger",
"intro": """You approach the stranger who was gazing at you with curious
eyes. Before you have a chance to say anything, he starts speaking. 'You
look like just the person I was looking for. If you're interested, I know
of a spot with enough gold to buy the Kings castle and have some to spare.
I'll tell you all about it on one condition. If you manage to make it to the
gold, you must share it with me.' Do you: 1) Accept the strangers proposition.
2) Go back to the tavern."""
}
if request.method == 'POST':
choice = request.form['action']
if choice == "1":
return redirect(url_for('scenes.stranger_advice'))
elif choice == "2":
return redirect(url_for('scenes.tavern'))
else:
return redirect(url_for('scenes.death'))
return render_template('scenes/index.html', scene=current_scene)
@bp.route('/stranger/advice', methods=('GET', 'POST'))
def stranger_advice():
current_scene = {
"title": "Approaching the stranger - advice",
"intro": """You accept the stranger's offer. He replies, 'Good, I will
give you some advice then. When you come across a lock, the way in is as easy
as ABC.' The stranger is done talking now, and you decide to go on your way.
Do you: 1) Go back to the tavern 2) Leave the tavern"""
}
return render_template('scenes/index.html', scene=current_scene)
@bp.route('/town-center')
def town_center():
current_scene = {
"title": "The town center",
"intro": """You wander into the town center and look around. To the north there
is the Gloomy Forest, which seems like just looking at it makes you feel uneasy.
To the south there is the Somber Mountains, once home to the dwarves, now home to
wolves and bears. Do you: """
}
return render_template('scenes/index.html', scene=current_scene)
@bp.route('/road')
def trader_wagon():
current_scene = {
"title": "The trader's wagon on the edge of town",
"intro": "You approach the trader's wagon"
}
return render_template('scenes/index.html', scene=current_scene)
```
|
{
"source": "jczimmerman/tsct-portal",
"score": 3
}
|
#### File: tsct-portal/portal/assignment.py
```python
from flask import Flask, render_template, g, redirect, url_for, Blueprint, request, session, abort
from . import db
from portal.auth import login_required, teacher_required
from . import course
bp = Blueprint("assignment", __name__)
def get_assignment(id):
user_id = session.get('user_id')
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT assignments.assignment_id, assignments.session_id,
sessions.course_id, courses.teacherid
FROM assignments JOIN sessions ON assignments.session_id = sessions.id
JOIN courses ON sessions.course_id = courses.course_id
WHERE assignments.assignment_id = %s
AND courses.teacherid = %s""",
(id, user_id))
check_teacher = cur.fetchone()
cur.close()
if check_teacher is None:
con.close()
abort(400, """You cannot modify this assignment""")
else:
cur = con.cursor()
cur.execute("""SELECT assignments.assignment_id, assignments.name, assignments.description,
assignments.due_date
FROM assignments
WHERE assignment_id = %s""",
(id,))
assignment = cur.fetchone()
cur.close()
return assignment
@bp.route('/course/<int:course_id>/session/<int:id>/create_assignment', methods=('GET', 'POST'))
@login_required
@teacher_required
def create_assignment(id, course_id):
"""Single page view to create an assignment."""
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT sessions.course_id, courses.course_id, courses.name
AS class_name FROM sessions JOIN courses
ON sessions.course_id=sessions.course_id
WHERE sessions.id=%s""",
(id,))
course = cur.fetchone()
cur.close()
if request.method == 'POST':
# Getting all information necessary for creating an assignment
name = request.form['name']
description = request.form['description']
due_date = request.form['date']
total_points = request.form['total_points']
con = db.get_db()
cur = con.cursor()
# Query to actually insert assignment into the database
cur.execute("""
INSERT INTO assignments(session_id, name, description, due_date)
VALUES (%s, %s, %s, %s)""",
(id, name, description, due_date))
g.db.commit()
cur.execute("""SELECT assignment_id from assignments
WHERE session_id = %s
AND name =%s
AND description = %s
AND due_date = %s""",
(id, name, description, due_date))
assignment = cur.fetchone()
cur.execute("""SELECT roster.student_id FROM roster WHERE session_id = %s""",
(id,))
students = cur.fetchall()
for student in students:
cur.execute("""INSERT INTO grades (student_id, assignment_id, total_points)
VALUES (%s, %s, %s) """,
(student['student_id'], assignment['assignment_id'], total_points))
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=id, course_id=course_id))
con.close()
return render_template('layouts/assignments/create_assignments.html', course=course)
@bp.route('/course/<int:course_id>/session/<int:id>/assignments', methods=('GET',))
@login_required
def view_assignments(id, course_id):
"""Single page view of all the assignments in a session."""
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT sessions.id, sessions.course_id, courses.course_id,
courses.teacherid, courses.name
AS class_name FROM sessions JOIN courses
ON sessions.course_id = sessions.course_id
WHERE sessions.id=%s AND courses.course_id= %s""",
(id, course_id,))
course = cur.fetchone()
# Query to get all of the asssignments in a session
cur.execute("""
SELECT * FROM assignments
WHERE session_id = %s
ORDER BY assignment_id ASC
""", (id,))
assignments = cur.fetchall()
cur.close()
con.close()
return render_template('layouts/assignments/view_assignments.html', course=course, id=id, assignments=assignments)
@bp.route('/course/<int:course_id>/session/<int:session_id>/assignment/<int:id>/edit-assignment', methods=('GET', 'POST'))
@login_required
@teacher_required
def edit_assignments(course_id, session_id, id):
"""Singe page view to edit an assignment."""
assignment = get_assignment(id)
if request.method == 'POST':
# getting all info required to update assignment information
name = request.form['name']
description = request.form['description']
due_date = request.form['date']
con = db.get_db()
cur = con.cursor()
# Query to update the information for an assignment
cur.execute("""
UPDATE assignments SET name = %s, description = %s, due_date= %s
WHERE assignment_id = %s
""", (name, description, due_date, id))
# Query to return directly to whichever session the assignment was from
cur.execute("""
SELECT * FROM assignments
WHERE assignment_id = %s""", (id,))
session_id = cur.fetchone()
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=session_id['session_id'], course_id=course_id))
return render_template('layouts/assignments/edit_assignments.html', assignment=assignment)
@bp.route('/course/<int:course_id>/session/<int:session_id>/assignment/<int:id>/delete', methods=['POST'])
@login_required
@teacher_required
def delete_assignments(id, course_id, session_id):
"""Deletes any unwanted assignments."""
assignment = get_assignment(id)
assignment_id = assignment['assignment_id']
# Query to delete an assignment from the database
con = db.get_db()
cur = con.cursor()
cur.execute("""DELETE FROM assignments WHERE assignment_id = %s
""", (assignment_id,))
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=session_id, course_id=course_id))
```
#### File: tsct-portal/portal/auth.py
```python
import functools
import bcrypt
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from . import db
bp = Blueprint("auth", __name__)
def hash_pass(password):
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
return hashed
@bp.route('/', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
cur = db.get_db().cursor()
error = None
cur.execute(
'SELECT * FROM users WHERE email = %s', (email,)
)
user = cur.fetchone()
cur.close()
if user is None or not bcrypt.checkpw(password.encode('utf8'), user['password'].tobytes()):
error = 'Incorrect email or password!'
if error is None:
session.clear()
session['user_id'] = user['id']
if session['user_id'] == user['id'] and user['role'] == 'student':
return redirect(url_for('student.student_view'))
elif session['user_id'] == user['id'] and user['role'] == 'teacher':
return redirect(url_for('main.home'))
flash(error)
return render_template('layouts/index.html')
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('main.index'))
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
cur = db.get_db().cursor()
if user_id is None:
g.user = None
else:
cur.execute(
'SELECT * FROM users WHERE id = %s', (user_id,)
)
g.user = cur.fetchone()
cur.close()
cur.close()
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
def teacher_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user['role'] != 'teacher':
return redirect(url_for('student.student_view'))
return view(**kwargs)
return wrapped_view
def student_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user['role'] != 'student':
return redirect(url_for('main.index'))
return view(**kwargs)
return wrapped_view
```
#### File: tsct-portal/portal/main.py
```python
from flask import Flask, render_template, g, redirect, url_for, Blueprint, request, session
from . import db
from portal.auth import login_required, teacher_required
bp = Blueprint("main", __name__)
# route for index template
@bp.route('/')
def index():
return render_template('layouts/index.html')
# route for showing the home for teachers
@bp.route("/home", methods=['GET'])
@login_required
@teacher_required
def home():
# user_id = session['user_id']
cur = db.get_db().cursor()
cur.execute(
"""SELECT courses.course_id, courses.name, courses.major, users.name AS teacher_name FROM courses INNER JOIN users ON courses.teacherid = users.id""")
courses = cur.fetchall()
cur.close()
return render_template("layouts/home.html", courses=courses)
@bp.route("/home/mycourses", methods=['GET'])
@login_required
@teacher_required
def my_courses():
user_id = session.get('user_id')
cur = db.get_db().cursor()
cur.execute(
"""SELECT courses.course_id, courses.name, courses.major,
users.name AS teacher_name FROM courses
INNER JOIN users ON courses.teacherid = users.id
WHERE users.id = %s """,
(user_id,))
courses = cur.fetchall()
cur.close()
return render_template("layouts/home.html", courses=courses)
@bp.route("/course/<int:course_id>/session/<int:session_id>/assignments/<int:assignment_id>/grades")
@login_required
@teacher_required
def teacher_assignment_grades(course_id, session_id, assignment_id):
"""View for the teacher to view assignments in a course."""
con = db.get_db()
cur = con.cursor()
cur.execute("""
SELECT (ROUND(grades.points_received/grades.total_points, 2 )*100) AS assignment_grade,
grades.total_points, grades.points_received, grades.grade_id, users.name
FROM grades INNER JOIN users
ON (grades.student_id = users.id)
WHERE assignment_id = %s;
""",
(assignment_id,))
grades = cur.fetchall()
return render_template('layouts/gradebook/teacher_view.html', grades=grades,
course_id=course_id, session_id=session_id, assignment_id=assignment_id)
@bp.route("/course/<int:course_id>/session/<int:session_id>/assignments/<int:assignment_id>/input-grade/<int:grade_id>", methods=('GET', 'POST'))
@login_required
@teacher_required
def input_grade(course_id, session_id, assignment_id, grade_id):
"""View for teacher to update assignment grade."""
con = db.get_db()
cur = con.cursor()
cur.execute("""
SELECT grades.total_points, grades.assignment_id AS assign,
users.name AS student
FROM grades JOIN users ON users.id = grades.student_id
WHERE grade_id = %s
""",
(grade_id,))
max_points = cur.fetchone()
if request.method == 'POST':
grade_input = request.form['grade_input']
feedback = request.form['feedback']
cur.execute("""
UPDATE grades SET points_received = %s, feedback = %s
WHERE grade_id = %s
""", (grade_input, feedback, grade_id))
g.db.commit()
cur.close()
con.close()
return redirect(url_for('main.teacher_assignment_grades', course_id=course_id,
session_id=session_id, assignment_id=assignment_id))
cur.close()
con.close()
return render_template('layouts/gradebook/input_grade.html', max_points=max_points)
```
#### File: tsct-portal/tests/test_gpa.py
```python
import pytest
def test_view(client, auth):
#this test if the user gets the right data
auth.login()
response = client.get('/course/2/session/2/gpa')
assert b'Test Student' in response.data
```
|
{
"source": "JcZou/model-prediction",
"score": 2
}
|
#### File: JcZou/model-prediction/evaluate.py
```python
def evaluate(df):
return "This is a stub for the evaluation function. The real implementation will compare your predictions with the held out production measurements."
```
|
{
"source": "JcZou/rotors_simulator",
"score": 3
}
|
#### File: src/mavros/param.py
```python
import csv
import time
import rospy
import mavros
from mavros_msgs.msg import ParamValue
from mavros_msgs.srv import ParamPull, ParamPush, ParamGet, ParamSet
class Parameter(object):
"""Class representing one parameter"""
def __init__(self, param_id, param_value=0):
self.param_id = param_id
self.param_value = param_value
def __repr__(self):
return "<Parameter '{}': {}>".format(self.param_id, self.param_value)
class ParamFile(object):
"""Base class for param file parsers"""
def __init__(self, args):
pass
def read(self, file_):
"""Returns a iterable of Parameters"""
raise NotImplementedError
def write(self, file_, parametes):
"""Writes Parameters to file"""
raise NotImplementedError
class MissionPlannerParam(ParamFile):
"""Parse MissionPlanner param files"""
class CSVDialect(csv.Dialect):
delimiter = ','
doublequote = False
skipinitialspace = True
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
def read(self, file_):
to_numeric = lambda x: float(x) if '.' in x else int(x)
for data in csv.reader(file_, self.CSVDialect):
if data[0].startswith('#'):
continue # skip comments
if len(data) != 2:
raise ValueError("wrong field count")
yield Parameter(data[0].strip(), to_numeric(data[1]));
def write(self, file_, parameters):
writer = csv.writer(file_, self.CSVDialect)
writer.writerow(("#NOTE: " + time.strftime("%d.%m.%Y %T") ,))
for p in parameters:
writer.writerow((p.param_id, p.param_value))
class QGroundControlParam(ParamFile):
"""Parse QGC param files"""
class CSVDialect(csv.Dialect):
delimiter = '\t'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_NONE
def read(self, file_):
to_numeric = lambda x: float(x) if '.' in x else int(x)
for data in csv.reader(file_, self.CSVDialect):
if data[0].startswith('#'):
continue # skip comments
if len(data) != 5:
raise ValueError("wrong field count")
yield Parameter(data[2].strip(), to_numeric(data[3]));
def write(self, file_, parameters):
def to_type(x):
if isinstance(x, float):
return 9 # REAL32
elif isinstance(x, int):
return 6 # INT32
else:
raise ValueError("unknown type: " + repr(type(x)))
sysid = rospy.get_param(mavros.get_topic('target_system_id'), 1)
compid = rospy.get_param(mavros.get_topic('target_component_id'), 1)
writer = csv.writer(file_, self.CSVDialect)
writer.writerow(("# NOTE: " + time.strftime("%d.%m.%Y %T"), ))
writer.writerow(("# Onboard parameters saved by mavparam for ({}, {})".format(sysid, compid), ))
writer.writerow(("# MAV ID" , "COMPONENT ID", "PARAM NAME", "VALUE", "(TYPE)"))
for p in parameters:
writer.writerow((sysid, compid, p.param_id, p.param_value, to_type(p.param_value), )) # XXX
def param_ret_value(ret):
if ret.value.integer != 0:
return ret.value.integer
elif ret.value.real != 0.0:
return ret.value.real
else:
return 0
def param_get(param_id):
try:
get = rospy.ServiceProxy(mavros.get_topic('param', 'get'), ParamGet)
ret = get(param_id=param_id)
except rospy.ServiceException as ex:
raise IOError(str(ex))
if not ret.success:
raise IOError("Request failed.")
return param_ret_value(ret)
def param_set(param_id, value):
if isinstance(value, float):
val = ParamValue(integer=0, real=value)
else:
val = ParamValue(integer=value, real=0.0)
try:
set = rospy.ServiceProxy(mavros.get_topic('param', 'set'), ParamSet)
ret = set(param_id=param_id, value=val)
except rospy.ServiceException as ex:
raise IOError(str(ex))
if not ret.success:
raise IOError("Request failed.")
return param_ret_value(ret)
def param_get_all(force_pull=False):
try:
pull = rospy.ServiceProxy(mavros.get_topic('param', 'pull'), ParamPull)
ret = pull(force_pull=force_pull)
except rospy.ServiceException as ex:
raise IOError(str(ex))
if not ret.success:
raise IOError("Request failed.")
params = rospy.get_param(mavros.get_topic('param'))
return (ret.param_received,
sorted((Parameter(k, v) for k, v in params.iteritems()),
cmp=lambda x, y: cmp(x.param_id, y.param_id))
)
def param_set_list(param_list):
# 1. load parameters to parameter server
for p in param_list:
rospy.set_param(mavros.get_topic('param', p.param_id), p.param_value)
# 2. request push all
try:
push = rospy.ServiceProxy(mavros.get_topic('param', 'push'), ParamPush)
ret = push()
except rospy.ServiceException as ex:
raise IOError(str(ex))
if not ret.success:
raise IOError("Request failed.")
return ret.param_transfered
```
#### File: src/rqt_rotors/hil_plugin.py
```python
import os
import rospy
import rospkg
from mavros_msgs.msg import State
from mavros_msgs.srv import CommandBool
from mavros_msgs.srv import CommandLong
from mavros_msgs.srv import SetMode
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding import QtCore
from python_qt_binding.QtCore import QTimer, Slot
#from python_qt_binding.QtGui import QWidget, QFormLayout
# JC: QtGui is not exited in Qt5
from python_qt_binding.QtWidgets import QWidget, QFormLayout
import time
class HilPlugin(Plugin):
# MAV mode flags
MAV_MODE_FLAG_SAFETY_ARMED = 128
MAV_MODE_FLAG_MANUAL_INPUT_ENABLED = 64
MAV_MODE_FLAG_HIL_ENABLED = 32
MAV_MODE_FLAG_STABILIZE_ENABLED = 16
MAV_MODE_FLAG_GUIDED_ENABLED = 8
MAV_MODE_FLAG_AUTO_ENABLED = 4
MAV_MODE_FLAG_TEST_ENABLED = 2
MAV_MODE_FLAG_CUSTOM_MODE_ENABLED = 1
# MAV state dictionary
mav_state = {0: 'Uninitialized',
1: 'Booting up',
2: 'Calibrating',
3: 'Standby',
4: 'Active',
5: 'Critical',
6: 'Emergency',
7: 'Poweroff'}
# Constants
STR_ON = 'ON'
STR_OFF = 'OFF'
STR_UNKNOWN = 'N/A'
STR_MAVROS_ARM_SERVICE_NAME = '/mavros/cmd/arming'
STR_MAVROS_COMMAND_LONG_SERVICE_NAME = '/mavros/cmd/command'
STR_MAVROS_SET_MODE_SERVICE_NAME = '/mavros/set_mode'
STR_SYS_STATUS_SUB_TOPIC = '/mavros/state'
TIMEOUT_HIL_HEARTBEAT = 2.0
def __init__(self, context):
super(HilPlugin, self).__init__(context)
self.setObjectName('HilPlugin')
self._widget = QWidget()
rp = rospkg.RosPack()
ui_file = os.path.join(rospkg.RosPack().get_path('rqt_rotors'), 'resource', 'HilPlugin.ui')
loadUi(ui_file, self._widget)
self._widget.setObjectName('HilPluginUi')
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
context.add_widget(self._widget)
# Set the initial parameters of UI elements
self._widget.button_set_hil_mode.setEnabled(False)
self._widget.button_arm.setEnabled(False)
self._widget.button_reboot_autopilot.setEnabled(False)
self._widget.text_state.setText(self.STR_UNKNOWN)
self.clear_mav_mode()
# Initialize class variables
self.last_heartbeat_time = time.time()
self.mav_mode = 65
self.mav_status = 255
self.armed = False
self.connected = False
self.guided = False
self.hil_enabled = False
# Set the functions that are called when signals are emitted
self._widget.button_set_hil_mode.pressed.connect(self.on_set_hil_mode_button_pressed)
self._widget.button_arm.pressed.connect(self.on_arm_button_pressed)
self._widget.button_reboot_autopilot.pressed.connect(self.on_reboot_autopilot_button_pressed)
# Create ROS service proxies
self.arm = rospy.ServiceProxy(self.STR_MAVROS_ARM_SERVICE_NAME, CommandBool)
self.send_command_long = rospy.ServiceProxy(self.STR_MAVROS_COMMAND_LONG_SERVICE_NAME, CommandLong)
self.set_mode = rospy.ServiceProxy(self.STR_MAVROS_SET_MODE_SERVICE_NAME, SetMode)
# Initialize ROS subscribers and publishers
self.sys_status_sub = rospy.Subscriber(self.STR_SYS_STATUS_SUB_TOPIC, State, self.sys_status_callback, queue_size=1)
def on_set_hil_mode_button_pressed(self):
new_mode = self.mav_mode | self.MAV_MODE_FLAG_HIL_ENABLED
self.hil_enabled = True
self.mav_mode = new_mode
self.set_mode(new_mode, '')
self._widget.text_mode_hil.setText(self.mav_mode_text(self.hil_enabled))
def on_arm_button_pressed(self):
self.arm(True)
def on_reboot_autopilot_button_pressed(self):
self.send_command_long(False, 246, 1, 1, 0, 0, 0, 0, 0, 0)
def sys_status_callback(self, msg):
if (not self.connected and msg.connected):
self._widget.button_set_hil_mode.setEnabled(True)
self._widget.button_arm.setEnabled(True)
self._widget.button_reboot_autopilot.setEnabled(True)
self.connected = True
self.last_heartbeat_time = time.time()
self._widget.text_mode_safety_armed.setText(self.mav_mode_text(msg.armed))
self._widget.text_mode_guided.setText(self.mav_mode_text(msg.guided))
return
if (((time.time() - self.last_heartbeat_time) >= self.TIMEOUT_HIL_HEARTBEAT) and self.hil_enabled):
new_mode = self.mav_mode | self.MAV_MODE_FLAG_HIL_ENABLED
self.set_mode(new_mode, '')
if (self.armed != msg.armed):
self.armed = msg.armed
self._widget.text_mode_safety_armed.setText(self.mav_mode_text(self.armed))
self._widget.button_arm.setEnabled(not(self.armed))
self.mav_mode = self.mav_mode | self.MAV_MODE_FLAG_SAFETY_ARMED
if (self.guided != msg.guided):
self.guided = msg.guided
self._widget.text_mode_guided.setText(self.mav_mode_text(self.guided))
self.last_heartbeat_time = time.time()
def clear_mav_mode(self):
count = self._widget.mav_mode_layout.rowCount()
for i in range(count):
self._widget.mav_mode_layout.itemAt(i, QFormLayout.FieldRole).widget().setText(self.STR_UNKNOWN)
def mav_mode_text(self, mode_enabled):
return self.STR_ON if mode_enabled else self.STR_OFF
def shutdown_plugin(self):
if self.sys_status_sub is not None:
self.sys_status_sub.unregister()
```
|
{
"source": "JCZuurmond/dbt-synapse",
"score": 3
}
|
#### File: adapters/sqlserver/impl.py
```python
from dbt.adapters.sql import SQLAdapter
from dbt.adapters.sqlserver import SQLServerConnectionManager
from dbt.adapters.base.relation import BaseRelation
import agate
from typing import (
Optional, Tuple, Callable, Iterable, Type, Dict, Any, List, Mapping,
Iterator, Union, Set
)
class SQLServerAdapter(SQLAdapter):
ConnectionManager = SQLServerConnectionManager
@classmethod
def date_function(cls):
return "getdate()"
@classmethod
def convert_text_type(cls, agate_table, col_idx):
column = agate_table.columns[col_idx]
lens = (len(d.encode("utf-8")) for d in column.values_without_nulls())
max_len = max(lens) if lens else 64
length = max_len if max_len > 16 else 16
return "varchar({})".format(length)
@classmethod
def convert_datetime_type(cls, agate_table, col_idx):
return "datetime"
@classmethod
def convert_boolean_type(cls, agate_table, col_idx):
return "bit"
@classmethod
def convert_number_type(cls, agate_table, col_idx):
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))
return "float" if decimals else "int"
@classmethod
def convert_time_type(cls, agate_table, col_idx):
return "datetime"
# Methods used in adapter tests
def timestamp_add_sql(
self, add_to: str, number: int = 1, interval: str = "hour"
) -> str:
# note: 'interval' is not supported for T-SQL
# for backwards compatibility, we're compelled to set some sort of
# default. A lot of searching has lead me to believe that the
# '+ interval' syntax used in postgres/redshift is relatively common
# and might even be the SQL standard's intention.
return f"DATEADD({interval},{number},{add_to})"
def string_add_sql(
self, add_to: str, value: str, location='append',
) -> str:
"""
`+` is T-SQL's string concatenation operator
"""
if location == 'append':
return f"{add_to} + '{value}'"
elif location == 'prepend':
return f"'{value}' + {add_to}"
else:
raise RuntimeException(
f'Got an unexpected location value of "{location}"'
)
def get_rows_different_sql(
self,
relation_a: BaseRelation,
relation_b: BaseRelation,
column_names: Optional[List[str]] = None,
except_operator: str = "EXCEPT",
) -> str:
"""
note: using is not supported on Synapse so COLUMNS_EQUAL_SQL is adjsuted
Generate SQL for a query that returns a single row with a two
columns: the number of rows that are different between the two
relations and the number of mismatched rows.
"""
# This method only really exists for test reasons.
names: List[str]
if column_names is None:
columns = self.get_columns_in_relation(relation_a)
names = sorted((self.quote(c.name) for c in columns))
else:
names = sorted((self.quote(n) for n in column_names))
columns_csv = ", ".join(names)
sql = COLUMNS_EQUAL_SQL.format(
columns=columns_csv,
relation_a=str(relation_a),
relation_b=str(relation_b),
except_op=except_operator,
)
return sql
COLUMNS_EQUAL_SQL = """
with diff_count as (
SELECT
1 as id,
COUNT(*) as num_missing FROM (
(SELECT {columns} FROM {relation_a} {except_op}
SELECT {columns} FROM {relation_b})
UNION ALL
(SELECT {columns} FROM {relation_b} {except_op}
SELECT {columns} FROM {relation_a})
) as a
), table_a as (
SELECT COUNT(*) as num_rows FROM {relation_a}
), table_b as (
SELECT COUNT(*) as num_rows FROM {relation_b}
), row_count_diff as (
select
1 as id,
table_a.num_rows - table_b.num_rows as difference
from table_a, table_b
)
select
row_count_diff.difference as row_count_difference,
diff_count.num_missing as num_mismatched
from row_count_diff
join diff_count on row_count_diff.id = diff_count.id
""".strip()
```
|
{
"source": "JCZuurmond/scikit-lego",
"score": 3
}
|
#### File: sklego/preprocessing/patsytransformer.py
```python
import numpy as np
from patsy import dmatrix, build_design_matrices, PatsyError
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
class PatsyTransformer(TransformerMixin, BaseEstimator):
"""
The patsy transformer offers a method to select the right columns
from a dataframe as well as a DSL for transformations. It is inspired
from R formulas.
This is can be useful as a first step in the pipeline.
:param formula: a patsy-compatible formula
"""
def __init__(self, formula):
self.formula = formula
def fit(self, X, y=None):
"""Fits the estimator"""
X_ = dmatrix(self.formula, X)
assert np.array(X_).shape[0] == np.array(X).shape[0]
self.design_info_ = X_.design_info
return self
def transform(self, X):
"""
Applies the formula to the matrix/dataframe X.
Returns an design array that can be used in sklearn pipelines.
"""
check_is_fitted(self, "design_info_")
try:
return build_design_matrices([self.design_info_], X)[0]
except PatsyError as e:
raise RuntimeError from e
```
|
{
"source": "jd10ne/swtk",
"score": 3
}
|
#### File: jd10ne/swtk/sts-token.py
```python
import argparse
import boto_wp
import os
def main(profile="default", *, token=None, duration=900):
credentials = boto_wp.get_session_token(profile=profile, token=token, duration=int(duration))
# valid credential is there
if credentials is None:
return
tmp_prof = profile + "-tmp"
boto_wp.add_tmp_profile(tmp_prof, credentials)
def arg_parse():
"""引数パーサ
"""
parser = argparse.ArgumentParser(prog="SWTK - AWS SESSION TOKEN SWITCHER -")
# AWS Profile
parser.add_argument("-p", "--profile", help="AWS profile", required=True)
# MFA Token
parser.add_argument("-t", "--token", help="MFA token")
# Duration Time
parser.add_argument("-d", "--duration", help="Duration seconds of session token", default=900)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = arg_parse()
main(profile=args.profile, token=args.token, duration=args.duration)
pass
```
|
{
"source": "jd-13/ElectroMap",
"score": 3
}
|
#### File: Source Code/python/activationmapoff.py
```python
import numpy as np
import scipy
def activationmapoff(pixelSize,
framerate,
images,
mask,
velalgo,
before,
tfilt,
usespline,
splineN,
repolap,
inoff):
"""
Function for creating activation map from an image stack.
This version also outputs the offset
Parameters
----------
pixelSize : float
Image pixel size in micrometers
framerate : float
Framerate in kHz
images : 3 dimensional numpy array
mask : 2 dimensional numpy array
uint16 image mask
velalgo :
before :
tfilt : int
usespline :
splineN :
repolap :
inoff :
Returns
-------
"""
# As input in GUI is in kHz, so 1=1000Hz which also means exposure of 1ms
exposure = 1 / framerate
# Input in micrometers, this converts it to cms
pixelSize = pixelSize / 10000
scalelength = 1
before = round(before / exposure)
# Save an untouched imagestack for repol map
checkimage = images
usespline = usespline - 1
splineN = 1 / splineN
count = 0
order = 3
framesize = 11
if usespline == 0:
splineN = 1
rows = images.shape[0]
cols = images.shape[1]
if (velalgo == 1) or (velalgo == 2) or (velalgo == 4) or (velalgo == 6):
for row in range(rows):
for col in range(cols):
if not mask[row, col] == 0:
count += 1
maxInd = []
dpol = []
# Diff signal, find upstroke
signalav = np.squeeze(images[row, col, :])
if tfilt == 2:
signalav = scipy.signal.savgol_filter(signalav,
window_length=framesize,
polyorder=order)
signalav = imcomplement(signalav);
time=1:length(signalav);
fittime=min(time):splineN:max(time);
if usespline == 1
signalav=spline(time,signalav,fittime);
end
ads=diff(signalav);
[~,maxInd]=max(signalav);
if length(ads) == maxInd-1
maxInd=maxInd -1
end
ds_up=ads(1:maxInd);
[~, upstroke] = max(ads);
ds=diff(signalav);
[~,downstroke] = min(ds)
%find dpol, calc max d2F/dt2 up
for i =1:upstroke
dpol(i) = signalav(i);
end
ds=smooth(diff(dpol));
d2s=diff(ds);
[~,sdstart] = max(d2s);
if velalgo == 1
rawmap(row, col)=maxInd;
end
if velalgo == 2
rawmap(row, col)=upstroke;
end
if velalgo == 4
rawmap(row, col)=sdstart;
end
if velalgo == 6
rawmap(row, col)=downstroke;
end
else
rawmap(row,col)=0;
end
end
end
end
%% repolmap
if velalgo == 5
highVal =[];
lowVal=[];
checkimage=imcomplement(images);
for row = 1: rows;
for col = 1: cols;
highVal =[];
lowVal=[];
count = count +1;
if tfilt == 2
checkimage(row, col,:) = sgolayfilt(double(squeeze(checkimage(row, col,:))), order,framesize);
end
dsigav = diff(checkimage(row, col,:));
[maxi, maxInd] = max(checkimage(row, col,:));
[mini, minInd] = min(checkimage(row, col,:));
amp=maxi-mini;
[pks,locs]=findpeaks(squeeze(checkimage(row,col,:)),'MINPEAKHEIGHT',(amp/2)+mini);
maxInd=[];
maxi=[];
if isempty(pks) == 0
maxi=pks(1);
maxInd=locs(1);
else
[maxi, maxInd] = max(checkimage(row, col,:));
end
mini=[];minInd=[];
[mini, minInd] = min(checkimage(row, col, maxInd:end));
minInd=minInd+maxInd; % want minimum after peak
[maxup, upstroke] = max(dsigav(1:maxInd-2));
midi=(maxi-mini)*(1-(repolap/100));
midi=midi+mini;
if isempty(upstroke) == 1
upstroke = 1;
end
if numel(pks) > 1
stopp=locs(2);
else
stopp=size(images,3)-1;
end
countr=0;
for i = maxInd:size(images,3)-1
if checkimage(row, col,i) > midi && checkimage(row, col,i+1) < midi
countr=countr+1;
if countr==1
highVal= checkimage(row, col,i);
lowVal=checkimage(row, col,i+1);
end
end
end
% Determines points for line equations
if isempty(highVal) == 0 && isempty(lowVal) == 0
midi;
y1 = highVal;
y2 = lowVal;
x1 = find(checkimage(row, col,maxInd:end)==highVal);
x2 = find(checkimage(row, col,maxInd:end)==lowVal);
x1=x1+maxInd;
x2=x2+maxInd;
if numel(x1) > 1
x1=x1(numel(1));
end
if numel(x2)>1
x2=x2(1);
end
m = (y2-y1)/(x2-x1);
% Line constant, should be same for both c1 and c2
c1 = y1-(m.*x1);
c2 = y2-(m.*x2);
% Time
Ti = (midi-c1)/m;
npeaks(count) = Ti;
rawmap(row, col)=Ti;
% if row == 56 && col == 31
% figure,
% plot(squeeze(checkimage(row, col,:)),'b')
% hold on
% plot(locs,pks,'go')
% plot(maxInd,(checkimage(row,col,maxInd)),'xb')
% line([0 120],[midi midi])
% Ti
% plot(round(Ti),(checkimage(row,col,round(Ti))),'or')
%
% figure,
% end
%
% if row == 31 && col == 31
% figure,
% plot(squeeze(checkimage(row, col,:)),'r')
% hold on
% plot(locs,pks,'go')
% plot(maxInd,(checkimage(row,col,maxInd)),'xb')
% line([0 120],[midi midi])
% Ti
% plot(round(Ti),(checkimage(row,col,round(Ti))),'or')
%
% figure,
% end
else
npeaks(count) = 0;
rawmap(row, col)=0;
end
end
end
end
%% upstroke midpoint - 2/5/17 - CLEAN THIS UP A BIT NOW CHNAGED TO BEFORE ONLY (LIKE APD)
if velalgo == 3
highVal =[];
lowVal=[];
for row = 1: rows;
for col = 1: cols;
if mask(row,col) ~=0
count = count +1;
signalav=[];
maxInd=[];
dpol=[];
% diff signal, find maxi, mini and dpol start
%% come back to this
% % signalav= sgolayfilt(double(squeeze(images(row, col,:))), order,framesize);
% % signalav=imcomplement(signalav);
% % ads=diff(signalav);
% % [maxi,maxInd]=max(signalav);
% % ds_up=ads(1:before+round(20/exposure));
% % [~, upstroke] = max(ds_up);
% %
% % for i =1:upstroke
% % dpol(i) = signalav(i);
% % end
% %
% % ds=smooth(diff(dpol));
% % d2s=diff(ds);
% % [mini,sdstart] = max(d2s);
% %
% % % find midpoint
% % Ti=maxInd-sdstart;
%%
highVal =[];
lowVal=[];
count = count +1;
images(row, col,:) = sgolayfilt(double(squeeze(images(row, col,:))), order,framesize);
dsigav = diff(imcomplement(images(row, col,:)));
%find depol point
s_dsigav = smooth(diff((imcomplement(images(row, col,:)))));
dsigav_up=s_dsigav(1:before+round(40/exposure));
[~, s_upstroke] = max(dsigav_up);
%[s_maxval, s_maxInd] = max(imcomplement(images(row, col,:)));
%[~, s_upstroke] = max(s_dsigav(1:s_maxInd-2));
%[maxi, maxInd] = max(imcomplement(images(row, col,1:(s_upstroke+(round(20/exposure))))));
[maxi, maxInd] = max(imcomplement(images(row, col,:)));
if isempty(s_upstroke) == 0
for i =1:s_upstroke
s_dpol(i) = imcomplement(images(row,col,i));
end
% sD = find(diff(s_dpol)>0);
% st = diff([0,round(diff(diff(sD)))==0,0]);
% sp = find(st==1);
% sq = find(st==-1);
% [smaxlen, sind] = max(sq-sp);
% sfirst = sp(sind);
% sdstart = sD(sfirst);% depolarisation start point
%
% % ds=smooth(diff(s_dpol));
% % d2s=diff(ds);
% % [~,sdstart] = max(d2s);
%
%
% mini=imcomplement(images(row, col, sdstart));
%if isempty(mini) == 1
mini=min(imcomplement(images(row, col, 1:maxInd)));
%end
[maxup, upstroke] = max(dsigav(1:maxInd-2));
midi=(maxi-mini)*0.5;
midi=midi+mini;
if isempty(upstroke) == 1
upstroke = 0;
end
count50= 0; %switch to find first time 50% amplitude reached (i.e. in upstroke, not repol)
for i = 1:before+round(50/exposure)
if imcomplement(images(row, col,i)) < midi && imcomplement(images(row, col,i+1)) > midi && count50 == 0
lowVal= imcomplement(images(row, col,i));
highVal=imcomplement(images(row, col,i+1));
count50=count50+1;
end
end
count50=0;
% Determines points for line equations
if isempty(highVal) == 0 && isempty(lowVal) == 0
midi;
y1 = highVal;
y2 = lowVal;
x1 = find(imcomplement(images(row, col,:))==highVal);
x2 = find(imcomplement(images(row, col,:))==lowVal);
x1=x1+1;
x2=x2+1;
if numel(x2) > 1
x2=x2(numel(x2));
end
if numel(x1)>1
x1=x1(1);
end
m = (y2-y1)/(x2-x1);
% Line constant, should be same for both c1 and c2
c1 = y1-(m.*x1);
c2 = y2-(m.*x2);
% Time
Ti = (midi-c1)/m;
if isempty(Ti) == 1
disp('fuck up')
Ti=NaN;
end
npeaks(count) = Ti;
rawmap(row, col)= Ti;
else
npeaks(count) = 0;
rawmap(row, col)=0;
end
end
if isempty(s_upstroke) ~= 0
npeaks(count) = 0;
rawmap(row, col)= 0;
end
else
rawmap(row,col) = 0;
end
end
end
end
A = unique(rawmap);
A;
difference = diff(A);
dif = diff(difference); % Second difference
if rows == 1 || cols == 1
train = diff([false, round(dif)==0, false]); %work around for line stacks
else
pretrain=[false; round(dif)==0; false];
train = diff(pretrain);% train of sequencial values
end
p = find(train==1);
q = find(train==-1);
[maxlen, ind] = max(q-p);
first = p(ind);
last = q(ind)-1;
if A(first) == 0
disp('HI!')
A(first)=1;
end
offset = A(first)-1;
if isempty(offset) == 1
%errordlg('Unable to compute conduction velocity. Possibly due to difference in activation time being too short at this framerate');
end
%map = rawmap-offset;
map=rawmap;
offset;
if isempty(offset) == 0;
inoff
if isempty(inoff) == 1
map=map-offset;
aoff=offset;
else
map=map-inoff;
end
end
% map = map.*double(mask);
% miniT=min(min(map(map~=0)));
% map=(map-miniT)+1;
map = map.*double(mask);
isomap = medfilt2(map, 'symmetric');
actmap=isomap.*(exposure);
```
|
{
"source": "jd1/EsphoMaTrix",
"score": 2
}
|
#### File: components/ehmtx/__init__.py
```python
from argparse import Namespace
import logging
from esphome import core, automation
from esphome.components import display, font, time, text_sensor
import esphome.components.image as espImage
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.const import CONF_BLUE, CONF_GREEN, CONF_RED, CONF_FILE, CONF_ID, CONF_BRIGHTNESS, CONF_RAW_DATA_ID, CONF_TYPE, CONF_TIME, CONF_DURATION, CONF_TRIGGER_ID
from esphome.core import CORE, HexInt
from esphome.cpp_generator import RawExpression
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ["display", "light", "api"]
AUTO_LOAD = ["ehmtx"]
MAXFRAMES = 8
Icons_ = display.display_ns.class_("Animation")
ehmtx_ns = cg.esphome_ns.namespace("esphome")
EHMTX_ = ehmtx_ns.class_("EHMTX", cg.Component)
# Triggers
NextScreenTrigger = ehmtx_ns.class_(
"EHMTXNextScreenTrigger", automation.Trigger.template(cg.std_string)
)
CONF_SHOWCLOCK = "show_clock"
CONF_SHOWSCREEN = "show_screen"
CONF_EHMTX = "ehmtx"
CONF_ICONS = "icons"
CONF_DISPLAY = "display8x32"
CONF_ICONID = "id"
CONF_SCROLLINTERVALL = "scroll_intervall"
CONF_ANIMINTERVALL = "anim_intervall"
CONF_FONT_ID = "font_id"
CONF_YOFFSET = "yoffset"
CONF_XOFFSET = "xoffset"
CONF_ON_NEXT_SCREEN = "on_next_screen"
CONF_ICON = "icon_name"
CONF_TEXT = "text"
CONF_ALARM = "alarm"
EHMTX_SCHEMA = cv.Schema({
cv.Required(CONF_ID): cv.declare_id(EHMTX_),
cv.Required(CONF_TIME): cv.use_id(time),
cv.Required(CONF_DISPLAY): cv.use_id(display),
cv.Required(CONF_FONT_ID): cv.use_id(font),
cv.Optional(
CONF_SHOWCLOCK, default="5"
): cv.templatable(cv.positive_int),
cv.Optional(
CONF_YOFFSET, default="-5"
): cv.templatable(cv.int_range(min=-32, max=32)),
cv.Optional(
CONF_XOFFSET, default="0"
): cv.templatable(cv.int_range(min=-32, max=32)),
cv.Optional(CONF_SCROLLINTERVALL, default="80"
): cv.templatable(cv.positive_int),
cv.Optional(
CONF_ANIMINTERVALL, default="192"
): cv.templatable(cv.positive_int),
cv.Optional(
CONF_SHOWSCREEN, default="8"
): cv.templatable(cv.positive_int),
cv.Optional(CONF_BRIGHTNESS, default=80): cv.templatable(cv.int_range(min=0, max=255)),
cv.Optional(
CONF_DURATION, default="5"
): cv.templatable(cv.positive_int),
cv.Optional(CONF_ON_NEXT_SCREEN): automation.validate_automation(
{
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_id(NextScreenTrigger),
}
),
cv.Required(CONF_ICONS): cv.All(
cv.ensure_list(
{
cv.Required(CONF_ICONID): cv.declare_id(Icons_),
cv.Required(CONF_FILE): cv.file_,
cv.Optional(CONF_TYPE, default="RGB24"): cv.enum(
espImage.IMAGE_TYPE, upper=True
),
cv.GenerateID(CONF_RAW_DATA_ID): cv.declare_id(cg.uint8),
}
),
cv.Length(max=64),
)})
CONFIG_SCHEMA = cv.All(font.validate_pillow_installed, EHMTX_SCHEMA)
ADD_SCREEN_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(EHMTX_),
cv.Required(CONF_ICON): cv.templatable(cv.string),
cv.Required(CONF_TEXT): cv.templatable(cv.string),
cv.Optional(CONF_DURATION): cv.templatable(cv.positive_int),
cv.Optional(CONF_ALARM, default=False): cv.templatable(cv.boolean),
}
)
AddScreenAction = ehmtx_ns.class_("AddScreenAction", automation.Action)
@automation.register_action(
"ehmtx.add.screen", AddScreenAction, ADD_SCREEN_ACTION_SCHEMA
)
async def ehmtx_add_screen_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_ICON], args, cg.std_string)
cg.add(var.set_icon(template_))
template_ = await cg.templatable(config[CONF_TEXT], args, cg.std_string)
cg.add(var.set_text(template_))
if CONF_DURATION in config:
template_ = await cg.templatable(config[CONF_DURATION], args, cg.uint8)
cg.add(var.set_duration(template_))
template_ = await cg.templatable(config[CONF_ALARM], args, bool)
cg.add(var.set_alarm(template_))
return var
SET_BRIGHTNESS_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(EHMTX_),
cv.Optional(CONF_BRIGHTNESS, default=80): cv.templatable(cv.int_range(min=0, max=255)),
}
)
SetBrightnessAction = ehmtx_ns.class_("SetBrightnessAction", automation.Action)
@automation.register_action(
"ehmtx.set.brightness", SetBrightnessAction, SET_BRIGHTNESS_ACTION_SCHEMA
)
async def ehmtx_set_brightness_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_BRIGHTNESS], args, cg.int32)
cg.add(var.set_brightness(template_))
return var
SET_COLOR_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(EHMTX_),
cv.Required(CONF_RED): cv.templatable(cv.uint8_t,),
cv.Required(CONF_BLUE): cv.templatable(cv.uint8_t,),
cv.Required(CONF_GREEN): cv.templatable(cv.uint8_t,),
}
)
SetIndicatorOnAction = ehmtx_ns.class_("SetIndicatorOn", automation.Action)
@automation.register_action(
"ehmtx.indicator.on", SetIndicatorOnAction, SET_COLOR_ACTION_SCHEMA
)
async def ehmtx_set_indicator_on_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_RED], args, cg.int_)
cg.add(var.set_red(template_))
template_ = await cg.templatable(config[CONF_GREEN], args, cg.int_)
cg.add(var.set_green(template_))
template_ = await cg.templatable(config[CONF_BLUE], args, cg.int_)
cg.add(var.set_blue(template_))
return var
DELETE_SCREEN_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(EHMTX_),
cv.Required(CONF_ICON): cv.templatable(cv.string),
}
)
DeleteScreenAction = ehmtx_ns.class_("DeleteScreen", automation.Action)
@automation.register_action(
"ehmtx.delete.screen", DeleteScreenAction, DELETE_SCREEN_ACTION_SCHEMA
)
async def ehmtx_delete_screen_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_ICON], args, cg.std_string)
cg.add(var.set_icon(template_))
return var
ForceScreenAction = ehmtx_ns.class_("ForceScreen", automation.Action)
@automation.register_action(
"ehmtx.force.screen", ForceScreenAction, DELETE_SCREEN_ACTION_SCHEMA
)
async def ehmtx_force_screen_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
template_ = await cg.templatable(config[CONF_ICON], args, cg.std_string)
cg.add(var.set_icon(template_))
return var
SetIndicatorOffAction = ehmtx_ns.class_("SetIndicatorOff", automation.Action)
INDICATOR_OFF_ACTION_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.use_id(EHMTX_),
}
)
@automation.register_action(
"ehmtx.indicator.off", SetIndicatorOffAction, INDICATOR_OFF_ACTION_SCHEMA
)
async def ehmtx_set_indicator_off_action_to_code(config, action_id, template_arg, args):
paren = await cg.get_variable(config[CONF_ID])
var = cg.new_Pvariable(action_id, template_arg, paren)
return var
CODEOWNERS = ["@lubeda"]
async def to_code(config):
from PIL import Image
var = cg.new_Pvariable(config[CONF_ID])
for conf in config[CONF_ICONS]:
path = CORE.relative_config_path(conf[CONF_FILE])
try:
image = Image.open(path)
except Exception as e:
raise core.EsphomeError(f"Could not load image file {path}: {e}")
width, height = image.size
if (width != 8) or (height != 8):
image = image.resize([8, 8])
width, height = image.size
if hasattr(image, 'n_frames'):
frames = min(image.n_frames, MAXFRAMES)
else:
frames = 1
if conf[CONF_TYPE] == "GRAYSCALE":
data = [0 for _ in range(8 * 8 * frames)]
pos = 0
for frameIndex in range(frames):
image.seek(frameIndex)
frame = image.convert("L", dither=Image.NONE)
pixels = list(frame.getdata())
if len(pixels) != 8 * 8:
raise core.EsphomeError(
f"Unexpected number of pixels in {path} frame {frameIndex}: ({len(pixels)} != {height*width})"
)
for pix in pixels:
data[pos] = pix
pos += 1
elif conf[CONF_TYPE] == "RGB24":
data = [0 for _ in range(8 * 8 * 3 * frames)]
pos = 0
for frameIndex in range(frames):
image.seek(frameIndex)
frame = image.convert("RGB")
pixels = list(frame.getdata())
if len(pixels) != 8 * 8:
raise core.EsphomeError(
f"Unexpected number of pixels in {path} frame {frameIndex}: ({len(pixels)} != {height*width})"
)
for pix in pixels:
data[pos] = pix[0] & 248
pos += 1
data[pos] = pix[1] & 252
pos += 1
data[pos] = pix[2] & 248
pos += 1
elif conf[CONF_TYPE] == "BINARY":
width8 = ((width + 7) // 8) * 8
data = [0 for _ in range((height * width8 // 8) * frames)]
for frameIndex in range(frames):
image.seek(frameIndex)
frame = image.convert("1", dither=Image.NONE)
for y in range(height):
for x in range(width):
if frame.getpixel((x, y)):
continue
pos = x + y * width8 + (height * width8 * frameIndex)
data[pos // 8] |= 0x80 >> (pos % 8)
rhs = [HexInt(x) for x in data]
prog_arr = cg.progmem_array(conf[CONF_RAW_DATA_ID], rhs)
cg.new_Pvariable(
conf[CONF_ID],
prog_arr,
width,
height,
frames,
espImage.IMAGE_TYPE[conf[CONF_TYPE]],
)
cg.add(var.add_icon(RawExpression(
str(conf[CONF_ID])+",\""+str(conf[CONF_ID])+"\"")))
cg.add(var.set_clock_time(config[CONF_SHOWCLOCK]))
cg.add(var.set_default_brightness(config[CONF_BRIGHTNESS]))
cg.add(var.set_screen_time(config[CONF_SHOWSCREEN]))
cg.add(var.set_duration(config[CONF_DURATION]))
cg.add(var.set_scroll_intervall(config[CONF_SCROLLINTERVALL]))
cg.add(var.set_anim_intervall(config[CONF_ANIMINTERVALL]))
cg.add(var.set_font_offset(config[CONF_XOFFSET], config[CONF_YOFFSET]))
disp = await cg.get_variable(config[CONF_DISPLAY])
cg.add(var.set_display(disp))
f = await cg.get_variable(config[CONF_FONT_ID])
cg.add(var.set_font(f))
ehmtxtime = await cg.get_variable(config[CONF_TIME])
cg.add(var.set_clock(ehmtxtime))
for conf in config.get(CONF_ON_NEXT_SCREEN, []):
trigger = cg.new_Pvariable(conf[CONF_TRIGGER_ID], var)
await automation.build_automation(trigger, [(cg.std_string, "x"), (cg.std_string, "y")], conf)
await cg.register_component(var, config)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.