metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jm9e/django-adaptors",
"score": 2
} |
#### File: jm9e/django-adaptors/setup.py
```python
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='django-adaptors',
version='0.2.5',
description='Convert CSV/XML files into python object or django model',
author='<NAME>',
author_email='<EMAIL>',
long_description=read('README.txt'),
license="BSD",
keywords="CSV XML Django adaptor",
packages=['adaptor'],
install_requires=[
'Django>=1.4',
],
extras_require={
'XML': ['lxml>=2.3.4']
},
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
])
```
#### File: django-adaptors/tests/validators_tests.py
```python
from django.test import TestCase
from adaptor.validators import RegexValidator
class TestValidator(TestCase):
def test_regex_validator(self):
validator = RegexValidator("status", "^Z[0,2]")
self.assertTrue(validator().validate("Z0"))
self.assertFalse(validator().validate("Z1"))
``` |
{
"source": "jm9e/FL_Pipeline",
"score": 2
} |
#### File: FL_Pipeline/pipeline/evaluation.py
```python
import json
import random
from sklearn.metrics import confusion_matrix, precision_recall_curve, auc, roc_auc_score
import numpy as np
from pipeline.pipeline import PipelineStep, PipelinePacket
class EvaluateStep(PipelineStep):
def __init__(self):
super().__init__(['classifier', 'test_data'])
self.global_model = None
self.local_models = None
def process(self, item):
if item.label == 'classifier':
self.global_model = item.data['global_model']
self.local_models = item.data['local_models']
yield item
elif item.label == 'test_data':
cm, pr, roc = evaluate_classifier(self.global_model, item.data[0], item.data[1], item.data[2])
yield PipelinePacket('global_cm', cm)
if pr:
yield PipelinePacket('global_pr', pr)
yield PipelinePacket('global_roc', roc)
if self.local_models is not None:
for local_model in self.local_models:
cm, pr, roc = evaluate_classifier(local_model, item.data[0], item.data[1], item.data[2])
yield PipelinePacket('local_cm', cm)
if pr:
yield PipelinePacket('local_pr', pr)
yield PipelinePacket('local_roc', roc)
yield PipelinePacket('end_evaluation', [item.data[0].shape[0]])
def setup(self):
pass
def cleanup(self):
pass
class AnalysisStep(PipelineStep):
def __init__(self, name, variable='sites', pr_samples=10):
super().__init__(['meta', 'end_evaluation', 'global_pr', 'global_cm', 'global_roc', 'local_pr', 'local_cm', 'local_roc'])
self.name = name
self.variable = variable
self.cm = None
self.pr = None
self.roc = None
self.cms = []
self.prs = []
self.rocs = []
self.meta = None
self.pr_samples = pr_samples
def process(self, item):
if item.label == 'end_evaluation':
cm = self.cm
pr = self.pr
roc = self.roc
cms = self.cms
prs = self.prs
rocs = self.rocs
self.cm = None
self.pr = None
self.roc = None
self.cms = []
self.prs = []
self.rocs = []
global_cm = [[int(cm[0, 0]), int(cm[0, 1])],
[int(cm[1, 0]), int(cm[1, 1])]]
global_pr = None
global_auc = None
global_roc = roc
if pr:
global_auc = auc(pr[1], pr[0])
global_pr = list(zip(pr[1], pr[0])), self.pr_samples
if len(global_pr) > self.pr_samples:
global_pr = random.sample(global_pr, self.pr_samples)
local_cms = []
local_prs = []
local_aucs = []
local_rocs = rocs
for local_cm in cms:
local_cms.append([[int(local_cm[0, 0]), int(local_cm[0, 1])],
[int(local_cm[1, 0]), int(local_cm[1, 1])]])
for local_pr in prs:
local_aucs.append(auc(local_pr[1], local_pr[0]))
local_pr = list(zip(local_pr[1], local_pr[0]))
if len(local_pr) > self.pr_samples:
local_pr = random.sample(local_pr, self.pr_samples)
local_prs.append(local_pr)
yield PipelinePacket('analysis', {
'name': self.name,
self.variable: self.meta[0],
'weighted': '1' if self.meta[1] else '0',
'gauc': json.dumps(global_auc),
'groc': json.dumps(global_roc),
'gcm': json.dumps(global_cm),
'gpr': json.dumps(global_pr),
'lauc': json.dumps(local_aucs),
'lroc': json.dumps(local_rocs),
'lcm': json.dumps(local_cms),
'lpr': json.dumps(local_prs),
})
elif item.label == 'global_cm':
self.cm = item.data
elif item.label == 'local_cm':
self.cms.append(item.data)
elif item.label == 'global_pr':
self.pr = item.data
elif item.label == 'local_pr':
self.prs.append(item.data)
elif item.label == 'global_roc':
self.roc = item.data
elif item.label == 'local_roc':
self.rocs.append(item.data)
elif item.label == 'meta':
self.meta = item.data
yield item
def setup(self):
pass
def cleanup(self):
pass
class StatsStep(PipelineStep):
def __init__(self, labels):
super().__init__(labels + ['done'])
self.data = []
def process(self, item: PipelinePacket):
if item.label != 'done':
self.data.append(item.data)
# a = np.array(self.data)
# acc = ((a[:, 0, 0] + a[:, 1, 1]) / a.sum(axis=(1, 2)))
# print()
# print(f'Total: {a.shape[0]}')
# print(f'Acc mean: {np.mean(acc)}')
# print(f'Acc median: {np.median(acc)}')
# print(f'Acc std: {np.std(acc)}')
# print(f'Acc min: {np.min(acc)}')
# print(f'Acc max: {np.max(acc)}')
# print()
if item.label == 'done':
a = np.array(self.data)
acc = ((a[:, 0, 0] + a[:, 1, 1]) / a.sum(axis=(1, 2)))
cm = (a.sum(axis=0) / a.sum(axis=(0, 1, 2)))
print()
# print(f'CM: {cm}')
print(f'Total: {a.shape[0]}')
print(f'Acc mean: {np.mean(acc)}')
print(f'Acc median: {np.median(acc)}')
print(f'Acc std: {np.std(acc)}')
print(f'Acc min: {np.min(acc)}')
print(f'Acc max: {np.max(acc)}')
print()
yield item
def setup(self):
pass
def cleanup(self):
pass
def evaluate_classifier(model, data, classes, target):
"""
:param model: needs to provide a predict and predict_proba method
:param data: test dataset
:param classes: labels in the dataset
:param target: target label to predict
:return: confusion matrix and precision/recall curve
"""
X = data.drop(columns=[target]).values
y_true = data[target].values
# Fixed threshold prediction
y_preds = model.predict(X)
y_pred_idx = np.argmax(y_preds, axis=1)
y_pred = classes[y_pred_idx]
cm = confusion_matrix(y_true, y_pred)
pr = None
roc = None
if len(classes) == 2:
# Probability predictions
y_preds_prob = model.predict_proba(X)[:, 1]
p, r, t = precision_recall_curve(y_true, y_preds_prob)
roc = roc_auc_score(y_true, y_preds_prob)
pr = [p, r, t]
return cm, pr, roc
```
#### File: FL_Pipeline/pipeline/result.py
```python
import os
from datetime import datetime
from pipeline.pipeline import PipelineStep, PipelinePacket
class WriteResultsStep(PipelineStep):
def __init__(self, columns, labels=None):
super().__init__(labels)
self.__results = []
self.columns = columns
def process(self, item: PipelinePacket):
filename = item.data['name'] + '.csv'
if not os.path.isfile(filename):
with open(filename, "w") as f:
f.write(";".join(['time'] + self.columns))
f.write("\n")
with open(filename, "a") as f:
f.write(";".join([f'{datetime.now()}'] + [f'{item.data[col]}' for col in self.columns]))
f.write("\n")
yield None
def setup(self):
pass
def cleanup(self):
pass
```
#### File: FL_Pipeline/pipeline/training.py
```python
from typing import Generator
import numpy as np
from sklearn.base import ClassifierMixin
from sklearn.ensemble import RandomForestClassifier
from pipeline.pipeline import PipelineStep, PipelinePacket
class VoidClassifier(ClassifierMixin):
def __init__(self, classes, label):
self.cn = classes.shape[0]
self.idx = list(classes).index(label)
def predict(self, X):
preds = np.zeros((X.shape[0], 1), dtype=np.int)
preds[:] = self.idx
return preds.T
def predict_proba(self, X):
preds = np.zeros((X.shape[0], self.cn))
preds[:, self.idx] = 1
return preds
class TrainRandomForestStep(PipelineStep):
def __init__(self, estimators):
self.estimators = estimators
self.meta = None
super().__init__(['meta', 'train_data'])
def process(self, item: PipelinePacket) -> Generator[PipelinePacket, None, None]:
if item.label == 'meta':
self.meta = item.data
yield item
else:
data = item.data[0]
classes = item.data[1]
target = item.data[2]
X = data.drop(columns=[target]).values
y = data[target].values
estimators = self.estimators(self.meta[0])
if len(set(y)) > 1:
rf = RandomForestClassifier(n_estimators=estimators)
rf.fit(X, y)
yield PipelinePacket('classifier_list', (list(rf.estimators_), (X, y), classes, target))
else:
print(f'Degenerated tree ({y})')
classifiers = [VoidClassifier(classes, y[0]) for _ in range(estimators)]
yield PipelinePacket('classifier_list', (classifiers, (X, y), classes, target))
def setup(self):
pass
def cleanup(self):
pass
``` |
{
"source": "jma100/surreal",
"score": 2
} |
#### File: surreal/datageneration/main_part1.py
```python
import sys
import os
import random
import math
import bpy
import numpy as np
from os import getenv
from os import remove
from os.path import join, dirname, realpath, exists
from mathutils import Matrix, Vector, Quaternion, Euler
from glob import glob
from random import choice
from pickle import load
from bpy_extras.object_utils import world_to_camera_view as world2cam
sys.path.insert(0, ".")
def mkdir_safe(directory):
try:
os.makedirs(directory)
except FileExistsError:
pass
def setState0():
for ob in bpy.data.objects.values():
ob.select=False
bpy.context.scene.objects.active = None
sorted_parts = ['hips','leftUpLeg','rightUpLeg','spine','leftLeg','rightLeg',
'spine1','leftFoot','rightFoot','spine2','leftToeBase','rightToeBase',
'neck','leftShoulder','rightShoulder','head','leftArm','rightArm',
'leftForeArm','rightForeArm','leftHand','rightHand','leftHandIndex1' ,'rightHandIndex1']
# order
part_match = {'root':'root', 'bone_00':'Pelvis', 'bone_01':'L_Hip', 'bone_02':'R_Hip',
'bone_03':'Spine1', 'bone_04':'L_Knee', 'bone_05':'R_Knee', 'bone_06':'Spine2',
'bone_07':'L_Ankle', 'bone_08':'R_Ankle', 'bone_09':'Spine3', 'bone_10':'L_Foot',
'bone_11':'R_Foot', 'bone_12':'Neck', 'bone_13':'L_Collar', 'bone_14':'R_Collar',
'bone_15':'Head', 'bone_16':'L_Shoulder', 'bone_17':'R_Shoulder', 'bone_18':'L_Elbow',
'bone_19':'R_Elbow', 'bone_20':'L_Wrist', 'bone_21':'R_Wrist', 'bone_22':'L_Hand', 'bone_23':'R_Hand'}
part2num = {part:(ipart+1) for ipart,part in enumerate(sorted_parts)}
# create one material per part as defined in a pickle with the segmentation
# this is useful to render the segmentation in a material pass
def create_segmentation(ob, params):
materials = {}
vgroups = {}
with open('pkl/segm_per_v_overlap.pkl', 'rb') as f:
vsegm = load(f)
bpy.ops.object.material_slot_remove()
parts = sorted(vsegm.keys())
for part in parts:
vs = vsegm[part]
vgroups[part] = ob.vertex_groups.new(part)
vgroups[part].add(vs, 1.0, 'ADD')
bpy.ops.object.vertex_group_set_active(group=part)
materials[part] = bpy.data.materials['Material'].copy()
materials[part].pass_index = part2num[part]
bpy.ops.object.material_slot_add()
ob.material_slots[-1].material = materials[part]
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.vertex_group_select()
bpy.ops.object.material_slot_assign()
bpy.ops.object.mode_set(mode='OBJECT')
return(materials)
# create the different passes that we render
def create_composite_nodes(tree, params, img=None, idx=0):
res_paths = {k:join(params['tmp_path'], '%05d_%s'%(idx, k)) for k in params['output_types'] if params['output_types'][k]}
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# create node for foreground image
layers = tree.nodes.new('CompositorNodeRLayers')
layers.location = -300, 400
# create node for background image
bg_im = tree.nodes.new('CompositorNodeImage')
bg_im.location = -300, 30
if img is not None:
bg_im.image = img
if(params['output_types']['vblur']):
# create node for computing vector blur (approximate motion blur)
vblur = tree.nodes.new('CompositorNodeVecBlur')
vblur.factor = params['vblur_factor']
vblur.location = 240, 400
# create node for saving output of vector blurred image
vblur_out = tree.nodes.new('CompositorNodeOutputFile')
vblur_out.format.file_format = 'PNG'
vblur_out.base_path = res_paths['vblur']
vblur_out.location = 460, 460
# create node for mixing foreground and background images
mix = tree.nodes.new('CompositorNodeMixRGB')
mix.location = 40, 30
mix.use_alpha = True
# create node for the final output
composite_out = tree.nodes.new('CompositorNodeComposite')
composite_out.location = 240, 30
# create node for saving depth
if(params['output_types']['depth']):
depth_out = tree.nodes.new('CompositorNodeOutputFile')
depth_out.location = 40, 700
depth_out.format.file_format = 'OPEN_EXR'
depth_out.base_path = res_paths['depth']
# create node for saving normals
if(params['output_types']['normal']):
normal_out = tree.nodes.new('CompositorNodeOutputFile')
normal_out.location = 40, 600
normal_out.format.file_format = 'OPEN_EXR'
normal_out.base_path = res_paths['normal']
# create node for saving foreground image
if(params['output_types']['fg']):
fg_out = tree.nodes.new('CompositorNodeOutputFile')
fg_out.location = 170, 600
fg_out.format.file_format = 'PNG'
fg_out.base_path = res_paths['fg']
# create node for saving ground truth flow
if(params['output_types']['gtflow']):
gtflow_out = tree.nodes.new('CompositorNodeOutputFile')
gtflow_out.location = 40, 500
gtflow_out.format.file_format = 'OPEN_EXR'
gtflow_out.base_path = res_paths['gtflow']
# create node for saving segmentation
if(params['output_types']['segm']):
segm_out = tree.nodes.new('CompositorNodeOutputFile')
segm_out.location = 40, 400
segm_out.format.file_format = 'OPEN_EXR'
segm_out.base_path = res_paths['segm']
# merge fg and bg images
tree.links.new(bg_im.outputs[0], mix.inputs[1])
tree.links.new(layers.outputs['Image'], mix.inputs[2])
if(params['output_types']['vblur']):
tree.links.new(mix.outputs[0], vblur.inputs[0]) # apply vector blur on the bg+fg image,
tree.links.new(layers.outputs['Z'], vblur.inputs[1]) # using depth,
tree.links.new(layers.outputs['Speed'], vblur.inputs[2]) # and flow.
tree.links.new(vblur.outputs[0], vblur_out.inputs[0]) # save vblurred output
tree.links.new(mix.outputs[0], composite_out.inputs[0]) # bg+fg image
if(params['output_types']['fg']):
tree.links.new(layers.outputs['Image'], fg_out.inputs[0]) # save fg
if(params['output_types']['depth']):
tree.links.new(layers.outputs['Z'], depth_out.inputs[0]) # save depth
if(params['output_types']['normal']):
tree.links.new(layers.outputs['Normal'], normal_out.inputs[0]) # save normal
if(params['output_types']['gtflow']):
tree.links.new(layers.outputs['Speed'], gtflow_out.inputs[0]) # save ground truth flow
if(params['output_types']['segm']):
tree.links.new(layers.outputs['IndexMA'], segm_out.inputs[0]) # save segmentation
return(res_paths)
# creation of the spherical harmonics material, using an OSL script
def create_sh_material(tree, sh_path, img=None):
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
uv = tree.nodes.new('ShaderNodeTexCoord')
uv.location = -800, 400
uv_xform = tree.nodes.new('ShaderNodeVectorMath')
uv_xform.location = -600, 400
uv_xform.inputs[1].default_value = (0, 0, 1)
uv_xform.operation = 'AVERAGE'
uv_im = tree.nodes.new('ShaderNodeTexImage')
uv_im.location = -400, 400
if img is not None:
uv_im.image = img
rgb = tree.nodes.new('ShaderNodeRGB')
rgb.location = -400, 200
script = tree.nodes.new('ShaderNodeScript')
script.location = -230, 400
script.mode = 'EXTERNAL'
script.filepath = sh_path #'spher_harm/sh.osl' #using the same file from multiple jobs causes white texture
script.update()
# the emission node makes it independent of the scene lighting
emission = tree.nodes.new('ShaderNodeEmission')
emission.location = -60, 400
mat_out = tree.nodes.new('ShaderNodeOutputMaterial')
mat_out.location = 110, 400
tree.links.new(uv.outputs[2], uv_im.inputs[0])
tree.links.new(uv_im.outputs[0], script.inputs[0])
tree.links.new(script.outputs[0], emission.inputs[0])
tree.links.new(emission.outputs[0], mat_out.inputs[0])
# computes rotation matrix through Rodrigues formula as in cv2.Rodrigues
def Rodrigues(rotvec):
theta = np.linalg.norm(rotvec)
r = (rotvec/theta).reshape(3, 1) if theta > 0. else rotvec
cost = np.cos(theta)
mat = np.asarray([[0, -r[2], r[1]],
[r[2], 0, -r[0]],
[-r[1], r[0], 0]])
return(cost*np.eye(3) + (1-cost)*r.dot(r.T) + np.sin(theta)*mat)
def init_scene(scene, params, gender='female'):
# load fbx model
bpy.ops.import_scene.fbx(filepath=join(params['smpl_data_folder'], 'basicModel_%s_lbs_10_207_0_v1.0.2.fbx' % gender[0]),
axis_forward='Y', axis_up='Z', global_scale=100)
obname = '%s_avg' % gender[0]
ob = bpy.data.objects[obname]
ob.data.use_auto_smooth = False # autosmooth creates artifacts
# assign the existing spherical harmonics material
ob.active_material = bpy.data.materials['Material']
# delete the default cube (which held the material)
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects['Cube'].select = True
bpy.ops.object.delete(use_global=False)
# set camera properties and initial position
bpy.ops.object.select_all(action='DESELECT')
cam_ob = bpy.data.objects['Camera']
scn = bpy.context.scene
scn.objects.active = cam_ob
cam_ob.matrix_world = Matrix(((0., 0., 1, params['camera_distance']),
(1., 0., 0., 0.),
(0., 1., 0., 1.),
(0.0, 0.0, 0.0, 1.0)))
cam_ob.data.angle = math.radians(40)
cam_ob.data.lens = 60
cam_ob.data.clip_start = 0.1
cam_ob.data.sensor_width = 32
# setup an empty object in the center which will be the parent of the Camera
# this allows to easily rotate an object around the origin
scn.cycles.film_transparent = True
scn.render.layers["RenderLayer"].use_pass_vector = True
scn.render.layers["RenderLayer"].use_pass_normal = True
scene.render.layers['RenderLayer'].use_pass_emit = True
scene.render.layers['RenderLayer'].use_pass_emit = True
scene.render.layers['RenderLayer'].use_pass_material_index = True
# set render size
scn.render.resolution_x = params['resy']
scn.render.resolution_y = params['resx']
scn.render.resolution_percentage = 100
scn.render.image_settings.file_format = 'PNG'
# clear existing animation data
ob.data.shape_keys.animation_data_clear()
arm_ob = bpy.data.objects['Armature']
arm_ob.animation_data_clear()
return(ob, obname, arm_ob, cam_ob)
# transformation between pose and blendshapes
def rodrigues2bshapes(pose):
rod_rots = np.asarray(pose).reshape(24, 3)
mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots]
bshapes = np.concatenate([(mat_rot - np.eye(3)).ravel()
for mat_rot in mat_rots[1:]])
return(mat_rots, bshapes)
# apply trans pose and shape to character
def apply_trans_pose_shape(trans, pose, shape, ob, arm_ob, obname, scene, cam_ob, frame=None):
# transform pose into rotation matrices (for pose) and pose blendshapes
mrots, bsh = rodrigues2bshapes(pose)
# set the location of the first bone to the translation parameter
arm_ob.pose.bones[obname+'_Pelvis'].location = trans
if frame is not None:
arm_ob.pose.bones[obname+'_root'].keyframe_insert('location', frame=frame)
# set the pose of each bone to the quaternion specified by pose
for ibone, mrot in enumerate(mrots):
bone = arm_ob.pose.bones[obname+'_'+part_match['bone_%02d' % ibone]]
bone.rotation_quaternion = Matrix(mrot).to_quaternion()
if frame is not None:
bone.keyframe_insert('rotation_quaternion', frame=frame)
bone.keyframe_insert('location', frame=frame)
# apply pose blendshapes
for ibshape, bshape in enumerate(bsh):
ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].value = bshape
if frame is not None:
ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].keyframe_insert('value', index=-1, frame=frame)
# apply shape blendshapes
for ibshape, shape_elem in enumerate(shape):
ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].value = shape_elem
if frame is not None:
ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].keyframe_insert('value', index=-1, frame=frame)
def get_bone_locs(obname, arm_ob, scene, cam_ob):
n_bones = 24
render_scale = scene.render.resolution_percentage / 100
render_size = (int(scene.render.resolution_x * render_scale),
int(scene.render.resolution_y * render_scale))
bone_locations_2d = np.empty((n_bones, 2))
bone_locations_3d = np.empty((n_bones, 3), dtype='float32')
# obtain the coordinates of each bone head in image space
for ibone in range(n_bones):
bone = arm_ob.pose.bones[obname+'_'+part_match['bone_%02d' % ibone]]
co_2d = world2cam(scene, cam_ob, arm_ob.matrix_world * bone.head)
co_3d = arm_ob.matrix_world * bone.head
bone_locations_3d[ibone] = (co_3d.x,
co_3d.y,
co_3d.z)
bone_locations_2d[ibone] = (round(co_2d.x * render_size[0]),
round(co_2d.y * render_size[1]))
return(bone_locations_2d, bone_locations_3d)
# reset the joint positions of the character according to its new shape
def reset_joint_positions(orig_trans, shape, ob, arm_ob, obname, scene, cam_ob, reg_ivs, joint_reg):
# since the regression is sparse, only the relevant vertex
# elements (joint_reg) and their indices (reg_ivs) are loaded
reg_vs = np.empty((len(reg_ivs), 3)) # empty array to hold vertices to regress from
# zero the pose and trans to obtain joint positions in zero pose
apply_trans_pose_shape(orig_trans, np.zeros(72), shape, ob, arm_ob, obname, scene, cam_ob)
# obtain a mesh after applying modifiers
bpy.ops.wm.memory_statistics()
# me holds the vertices after applying the shape blendshapes
me = ob.to_mesh(scene, True, 'PREVIEW')
# fill the regressor vertices matrix
for iiv, iv in enumerate(reg_ivs):
reg_vs[iiv] = me.vertices[iv].co
bpy.data.meshes.remove(me)
# regress joint positions in rest pose
joint_xyz = joint_reg.dot(reg_vs)
# adapt joint positions in rest pose
arm_ob.hide = False
bpy.ops.object.mode_set(mode='EDIT')
arm_ob.hide = True
for ibone in range(24):
bb = arm_ob.data.edit_bones[obname+'_'+part_match['bone_%02d' % ibone]]
bboffset = bb.tail - bb.head
bb.head = joint_xyz[ibone]
bb.tail = bb.head + bboffset
bpy.ops.object.mode_set(mode='OBJECT')
return(shape)
# load poses and shapes
def load_body_data(smpl_data, ob, obname, gender='female', idx=0):
# load MoSHed data from CMU Mocap (only the given idx is loaded)
# create a dictionary with key the sequence name and values the pose and trans
cmu_keys = []
for seq in smpl_data.files:
if seq.startswith('pose_'):
cmu_keys.append(seq.replace('pose_', ''))
name = sorted(cmu_keys)[idx % len(cmu_keys)]
cmu_parms = {}
for seq in smpl_data.files:
if seq == ('pose_' + name):
cmu_parms[seq.replace('pose_', '')] = {'poses':smpl_data[seq],
'trans':smpl_data[seq.replace('pose_','trans_')]}
# compute the number of shape blendshapes in the model
n_sh_bshapes = len([k for k in ob.data.shape_keys.key_blocks.keys()
if k.startswith('Shape')])
# load all SMPL shapes
fshapes = smpl_data['%sshapes' % gender][:, :n_sh_bshapes]
return(cmu_parms, fshapes, name)
import time
start_time = None
def log_message(message):
elapsed_time = time.time() - start_time
print("[%.2f s] %s" % (elapsed_time, message))
def main():
# time logging
global start_time
start_time = time.time()
import argparse
# parse commandline arguments
log_message(sys.argv)
parser = argparse.ArgumentParser(description='Generate synth dataset images.')
parser.add_argument('--idx', type=int,
help='idx of the requested sequence')
parser.add_argument('--ishape', type=int,
help='requested cut, according to the stride')
parser.add_argument('--stride', type=int,
help='stride amount, default 50')
args = parser.parse_args(sys.argv[sys.argv.index("--") + 1:])
idx = args.idx
ishape = args.ishape
stride = args.stride
log_message("input idx: %d" % idx)
log_message("input ishape: %d" % ishape)
log_message("input stride: %d" % stride)
if idx == None:
exit(1)
if ishape == None:
exit(1)
if stride == None:
log_message("WARNING: stride not specified, using default value 50")
stride = 50
# import idx info (name, split)
idx_info = load(open("pkl/idx_info.pickle", 'rb'))
idx_info = [x for x in idx_info if x['name'][:4] != 'h36m']
# get runpass
(runpass, idx) = divmod(idx, len(idx_info))
log_message("runpass: %d" % runpass)
log_message("output idx: %d" % idx)
idx_info = idx_info[idx]
log_message("sequence: %s" % idx_info['name'])
log_message("nb_frames: %f" % idx_info['nb_frames'])
log_message("use_split: %s" % idx_info['use_split'])
# import configuration
log_message("Importing configuration")
import config
params = config.load_file('config', 'SYNTH_DATA')
smpl_data_folder = params['smpl_data_folder']
smpl_data_filename = params['smpl_data_filename']
bg_path = params['bg_path']
resy = params['resy']
resx = params['resx']
clothing_option = params['clothing_option'] # grey, nongrey or all
tmp_path = params['tmp_path']
output_path = params['output_path']
output_types = params['output_types']
stepsize = params['stepsize']
clipsize = params['clipsize']
openexr_py2_path = params['openexr_py2_path']
# compute number of cuts
nb_ishape = max(1, int(np.ceil((idx_info['nb_frames'] - (clipsize - stride))/stride)))
log_message("Max ishape: %d" % (nb_ishape - 1))
if ishape == None:
exit(1)
assert(ishape < nb_ishape)
# name is set given idx
name = idx_info['name']
output_path = join(output_path, 'run%d' % runpass, name.replace(" ", ""))
params['output_path'] = output_path
tmp_path = join(tmp_path, 'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1)))
params['tmp_path'] = tmp_path
# check if already computed
# + clean up existing tmp folders if any
if exists(tmp_path) and tmp_path != "" and tmp_path != "/":
os.system('rm -rf %s' % tmp_path)
rgb_vid_filename = "%s_c%04d.mp4" % (join(output_path, name.replace(' ', '')), (ishape + 1))
#if os.path.isfile(rgb_vid_filename):
# log_message("ALREADY COMPUTED - existing: %s" % rgb_vid_filename)
# return 0
# create tmp directory
if not exists(tmp_path):
mkdir_safe(tmp_path)
# >> don't use random generator before this point <<
# initialize RNG with seeds from sequence id
import hashlib
s = "synth_data:%d:%d:%d" % (idx, runpass,ishape)
seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10 ** 8)
log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
random.seed(seed_number)
np.random.seed(seed_number)
if(output_types['vblur']):
vblur_factor = np.random.normal(0.5, 0.5)
params['vblur_factor'] = vblur_factor
log_message("Setup Blender")
# create copy-spher.harm. directory if not exists
sh_dir = join(tmp_path, 'spher_harm')
if not exists(sh_dir):
mkdir_safe(sh_dir)
sh_dst = join(sh_dir, 'sh_%02d_%05d.osl' % (runpass, idx))
os.system('cp spher_harm/sh.osl %s' % sh_dst)
genders = {0: 'female', 1: 'male'}
# pick random gender
gender = choice(genders)
scene = bpy.data.scenes['Scene']
scene.render.engine = 'CYCLES'
bpy.data.materials['Material'].use_nodes = True
scene.cycles.shading_system = True
scene.use_nodes = True
log_message("Listing background images")
bg_names = join(bg_path, '%s_img.txt' % idx_info['use_split'])
nh_txt_paths = []
with open(bg_names) as f:
for line in f:
nh_txt_paths.append(join(bg_path, line))
# grab clothing names
log_message("clothing: %s" % clothing_option)
with open( join(smpl_data_folder, 'textures', '%s_%s.txt' % ( gender, idx_info['use_split'] ) ) ) as f:
txt_paths = f.read().splitlines()
# if using only one source of clothing
if clothing_option == 'nongrey':
txt_paths = [k for k in txt_paths if 'nongrey' in k]
elif clothing_option == 'grey':
txt_paths = [k for k in txt_paths if 'nongrey' not in k]
# random clothing texture
cloth_img_name = choice(txt_paths)
cloth_img_name = join(smpl_data_folder, cloth_img_name)
cloth_img = bpy.data.images.load(cloth_img_name)
# random background
bg_img_name = choice(nh_txt_paths)[:-1]
bg_img = bpy.data.images.load(bg_img_name)
log_message("Loading parts segmentation")
beta_stds = np.load(join(smpl_data_folder, ('%s_beta_stds.npy' % gender)))
log_message("Building materials tree")
mat_tree = bpy.data.materials['Material'].node_tree
create_sh_material(mat_tree, sh_dst, cloth_img)
res_paths = create_composite_nodes(scene.node_tree, params, img=bg_img, idx=idx)
log_message("Loading smpl data")
smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))
log_message("Initializing scene")
camera_distance = np.random.normal(8.0, 1)
params['camera_distance'] = camera_distance
ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)
setState0()
ob.select = True
bpy.context.scene.objects.active = ob
segmented_materials = True #True: 0-24, False: expected to have 0-1 bg/fg
log_message("Creating materials segmentation")
# create material segmentation
if segmented_materials:
materials = create_segmentation(ob, params)
prob_dressed = {'leftLeg':.5, 'leftArm':.9, 'leftHandIndex1':.01,
'rightShoulder':.8, 'rightHand':.01, 'neck':.01,
'rightToeBase':.9, 'leftShoulder':.8, 'leftToeBase':.9,
'rightForeArm':.5, 'leftHand':.01, 'spine':.9,
'leftFoot':.9, 'leftUpLeg':.9, 'rightUpLeg':.9,
'rightFoot':.9, 'head':.01, 'leftForeArm':.5,
'rightArm':.5, 'spine1':.9, 'hips':.9,
'rightHandIndex1':.01, 'spine2':.9, 'rightLeg':.5}
else:
materials = {'FullBody': bpy.data.materials['Material']}
prob_dressed = {'FullBody': .6}
orig_pelvis_loc = (arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname+'_Pelvis'].head.copy()) - Vector((-1., 1., 1.))
orig_cam_loc = cam_ob.location.copy()
# unblocking both the pose and the blendshape limits
for k in ob.data.shape_keys.key_blocks.keys():
bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10
bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10
log_message("Loading body data")
cmu_parms, fshapes, name = load_body_data(smpl_data, ob, obname, idx=idx, gender=gender)
log_message("Loaded body data for %s" % name)
nb_fshapes = len(fshapes)
if idx_info['use_split'] == 'train':
fshapes = fshapes[:int(nb_fshapes*0.8)]
elif idx_info['use_split'] == 'test':
fshapes = fshapes[int(nb_fshapes*0.8):]
# pick random real body shape
shape = choice(fshapes) #+random_shape(.5) can add noise
#shape = random_shape(3.) # random body shape
# example shapes
#shape = np.zeros(10) #average
#shape = np.array([ 2.25176191, -3.7883464 , 0.46747496, 3.89178988, 2.20098416, 0.26102114, -3.07428093, 0.55708514, -3.94442258, -2.88552087]) #fat
#shape = np.array([-2.26781107, 0.88158132, -0.93788176, -0.23480508, 1.17088298, 1.55550789, 0.44383225, 0.37688275, -0.27983086, 1.77102953]) #thin
#shape = np.array([ 0.00404852, 0.8084637 , 0.32332591, -1.33163664, 1.05008727, 1.60955275, 0.22372946, -0.10738459, 0.89456312, -1.22231216]) #short
#shape = np.array([ 3.63453289, 1.20836171, 3.15674431, -0.78646793, -1.93847355, -0.32129994, -0.97771656, 0.94531640, 0.52825811, -0.99324327]) #tall
ndofs = 10
scene.objects.active = arm_ob
orig_trans = np.asarray(arm_ob.pose.bones[obname+'_Pelvis'].location).copy()
# create output directory
if not exists(output_path):
mkdir_safe(output_path)
# spherical harmonics material needs a script to be loaded and compiled
scs = []
for mname, material in materials.items():
scs.append(material.node_tree.nodes['Script'])
scs[-1].filepath = sh_dst
scs[-1].update()
rgb_dirname = name.replace(" ", "") + '_c%04d.mp4' % (ishape + 1)
rgb_path = join(tmp_path, rgb_dirname)
data = cmu_parms[name]
fbegin = ishape*stepsize*stride
fend = min(ishape*stepsize*stride + stepsize*clipsize, len(data['poses']))
log_message("Computing how many frames to allocate")
N = len(data['poses'][fbegin:fend:stepsize])
log_message("Allocating %d frames in mat file" % N)
# force recomputation of joint angles unless shape is all zeros
curr_shape = np.zeros_like(shape)
nframes = len(data['poses'][::stepsize])
matfile_info = join(output_path, name.replace(" ", "") + "_c%04d_info.mat" % (ishape+1))
log_message('Working on %s' % matfile_info)
# allocate
dict_info = {}
dict_info['bg'] = np.zeros((N,), dtype=np.object) # background image path
dict_info['camLoc'] = np.empty(3) # (1, 3)
dict_info['clipNo'] = ishape +1
dict_info['cloth'] = np.zeros((N,), dtype=np.object) # clothing texture image path
dict_info['gender'] = np.empty(N, dtype='uint8') # 0 for male, 1 for female
dict_info['joints2D'] = np.empty((2, 24, N), dtype='float32') # 2D joint positions in pixel space
dict_info['joints3D'] = np.empty((3, 24, N), dtype='float32') # 3D joint positions in world coordinates
dict_info['light'] = np.empty((9, N), dtype='float32')
dict_info['pose'] = np.empty((data['poses'][0].size, N), dtype='float32') # joint angles from SMPL (CMU)
dict_info['sequence'] = name.replace(" ", "") + "_c%04d" % (ishape + 1)
dict_info['shape'] = np.empty((ndofs, N), dtype='float32')
dict_info['zrot'] = np.empty(N, dtype='float32')
dict_info['camDist'] = camera_distance
dict_info['stride'] = stride
if name.replace(" ", "").startswith('h36m'):
dict_info['source'] = 'h36m'
else:
dict_info['source'] = 'cmu'
if(output_types['vblur']):
dict_info['vblur_factor'] = np.empty(N, dtype='float32')
# for each clipsize'th frame in the sequence
get_real_frame = lambda ifr: ifr
random_zrot = 0
reset_loc = False
batch_it = 0
curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob, obname, scene,
cam_ob, smpl_data['regression_verts'], smpl_data['joint_regressor'])
random_zrot = 2*np.pi*np.random.rand()
arm_ob.animation_data_clear()
cam_ob.animation_data_clear()
arm_ob.rotation_euler.x -= math.pi / 2
# create a keyframe animation with pose, translation, blendshapes and camera motion
# LOOP TO CREATE 3D ANIMATION
for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
iframe = seq_frame
scene.frame_set(get_real_frame(seq_frame))
# apply the translation, pose and shape to the character
apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob, obname, scene, cam_ob, get_real_frame(seq_frame))
dict_info['shape'][:, iframe] = shape[:ndofs]
dict_info['pose'][:, iframe] = pose
dict_info['gender'][iframe] = list(genders)[list(genders.values()).index(gender)]
if(output_types['vblur']):
dict_info['vblur_factor'][iframe] = vblur_factor
arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion(Euler((0, 0, random_zrot), 'XYZ'))
arm_ob.pose.bones[obname+'_root'].keyframe_insert('rotation_quaternion', frame=get_real_frame(seq_frame))
dict_info['zrot'][iframe] = random_zrot
scene.update()
# Bodies centered only in each minibatch of clipsize frames
if seq_frame == 0 or reset_loc:
reset_loc = False
new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname+'_Pelvis'].head.copy()
rotated_orig = Vector([orig_pelvis_loc.copy()[0], orig_pelvis_loc.copy()[2], -orig_pelvis_loc.copy()[1]])
cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() - rotated_orig.copy())
cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))
dict_info['camLoc'] = np.array(cam_ob.location)
scene.node_tree.nodes['Image'].image = bg_img
for part, material in materials.items():
material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = (0, 0)
# random light
sh_coeffs = .7 * (2 * np.random.rand(9) - 1)
sh_coeffs[0] = .5 + .9 * np.random.rand() # Ambient light (first coeff) needs a minimum is ambient. Rest is uniformly distributed, higher means brighter.
sh_coeffs[1] = -.7 * np.random.rand()
for ish, coeff in enumerate(sh_coeffs):
for sc in scs:
sc.inputs[ish+1].default_value = coeff
# iterate over the keyframes and render
# LOOP TO RENDER
for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
scene.frame_set(get_real_frame(seq_frame))
iframe = seq_frame
dict_info['bg'][iframe] = bg_img_name
dict_info['cloth'][iframe] = cloth_img_name
dict_info['light'][:, iframe] = sh_coeffs
scene.render.use_antialiasing = False
scene.render.filepath = join(rgb_path, 'Image%04d.png' % get_real_frame(seq_frame))
log_message("Rendering frame %d" % seq_frame)
# disable render output
logfile = '/dev/null'
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
# Render
bpy.ops.render.render(write_still=True)
# disable output redirection
os.close(1)
os.dup(old)
os.close(old)
# NOTE:
# ideally, pixels should be readable from a viewer node, but I get only zeros
# --> https://ammous88.wordpress.com/2015/01/16/blender-access-render-results-pixels-directly-from-python-2/
# len(np.asarray(bpy.data.images['Render Result'].pixels) is 0
# Therefore we write them to temporary files and read with OpenEXR library (available for python2) in main_part2.py
# Alternatively, if you don't want to use OpenEXR library, the following commented code does loading with Blender functions, but it can cause memory leak.
# If you want to use it, copy necessary lines from main_part2.py such as definitions of dict_normal, matfile_normal...
#for k, folder in res_paths.items():
# if not k== 'vblur' and not k=='fg':
# path = join(folder, 'Image%04d.exr' % get_real_frame(seq_frame))
# render_img = bpy.data.images.load(path)
# # render_img.pixels size is width * height * 4 (rgba)
# arr = np.array(render_img.pixels[:]).reshape(resx, resy, 4)[::-1,:, :] # images are vertically flipped
# if k == 'normal':# 3 channels, original order
# mat = arr[:,:, :3]
# dict_normal['normal_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
# elif k == 'gtflow':
# mat = arr[:,:, 1:3]
# dict_gtflow['gtflow_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
# elif k == 'depth':
# mat = arr[:,:, 0]
# dict_depth['depth_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
# elif k == 'segm':
# mat = arr[:,:,0]
# dict_segm['segm_%d' % (iframe + 1)] = mat.astype(np.uint8, copy=False)
#
# # remove the image to release memory, object handles, etc.
# render_img.user_clear()
# bpy.data.images.remove(render_img)
# bone locations should be saved after rendering so that the bones are updated
bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene, cam_ob)
dict_info['joints2D'][:, :, iframe] = np.transpose(bone_locs_2D)
dict_info['joints3D'][:, :, iframe] = np.transpose(bone_locs_3D)
reset_loc = (bone_locs_2D.max(axis=-1) > 256).any() or (bone_locs_2D.min(axis=0) < 0).any()
arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion((1, 0, 0, 0))
# save a .blend file for debugging:
# bpy.ops.wm.save_as_mainfile(filepath=join(tmp_path, 'pre.blend'))
# save RGB data with ffmpeg (if you don't have h264 codec, you can replace with another one and control the quality with something like -q:v 3)
cmd_ffmpeg = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 ''%s_c%04d.mp4''' % (join(rgb_path, 'Image%04d.png'), join(output_path, name.replace(' ', '')), (ishape + 1))
log_message("Generating RGB video (%s)" % cmd_ffmpeg)
os.system(cmd_ffmpeg)
if(output_types['vblur']):
cmd_ffmpeg_vblur = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" ''%s_c%04d.mp4''' % (join(res_paths['vblur'], 'Image%04d.png'), join(output_path, name.replace(' ', '')+'_vblur'), (ishape + 1))
log_message("Generating vblur video (%s)" % cmd_ffmpeg_vblur)
os.system(cmd_ffmpeg_vblur)
if(output_types['fg']):
cmd_ffmpeg_fg = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 ''%s_c%04d.mp4''' % (join(res_paths['fg'], 'Image%04d.png'), join(output_path, name.replace(' ', '')+'_fg'), (ishape + 1))
log_message("Generating fg video (%s)" % cmd_ffmpeg_fg)
os.system(cmd_ffmpeg_fg)
cmd_tar = 'tar -czvf %s/%s.tar.gz -C %s %s' % (output_path, rgb_dirname, tmp_path, rgb_dirname)
log_message("Tarballing the images (%s)" % cmd_tar)
os.system(cmd_tar)
# save annotation excluding png/exr data to _info.mat file
import scipy.io
scipy.io.savemat(matfile_info, dict_info, do_compression=True)
if __name__ == '__main__':
main()
``` |
{
"source": "jma712/DIRECT",
"score": 2
} |
#### File: DIRECT/src/main_disent.py
```python
import time
import numpy as np
import torch
from torch import optim
from torch import nn
from torch.nn import functional as F
from torchvision.utils import save_image
from torch.utils.data import Dataset, DataLoader
import math
import argparse
import os
import sys
import scipy.io as scio
import matplotlib
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE as tsn
from scipy.stats import pearsonr
from data_synthetic import plot_cluster, generate_y_final, get_y_final
from model_disent import MTvae
sys.path.append('../')
font_sz = 28
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
matplotlib.rcParams.update({'font.size': font_sz})
parser = argparse.ArgumentParser(description='Disentangled multiple cause VAE')
parser.add_argument('--nocuda', type=int, default=0, help='Disables CUDA training.')
parser.add_argument('--batch-size', type=int, default=1500, metavar='N',
help='input batch size for training (default: 10000)')
parser.add_argument('--epochs', type=int, default=150, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--K', type=int, default=4, metavar='N',
help='number of clusters')
parser.add_argument('--trn_rate', type=float, default=0.6, help='training data ratio')
parser.add_argument('--tst_rate', type=float, default=0.2, help='test data ratio')
parser.add_argument('--mu_p_wt', type=float, default=1.0, help='weight for mu_p_t')
parser.add_argument('--dim_zt', type=int, default=32, metavar='N',
help='dimension of zt')
parser.add_argument('--dim_zi', type=int, default=32, metavar='N',
help='dimension of zi')
parser.add_argument('--nogb', action='store_true', default=False,
help='Disable Gumbel-Softmax sampling.')
parser.add_argument('--beta', type=float, default=20, help='weight for loss balance')
parser.add_argument('--dataset', default='synthetic', help='dataset to use') # synthetic, amazon, amazon-6c
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate for optimizer')
parser.add_argument('--weight_decay', type=float, default=1e-5,
help='weight decay')
args = parser.parse_args()
# select gpu if available
args.cuda = not args.nocuda and torch.cuda.is_available()
device = torch.device("cuda:0" if args.cuda else "cpu")
args.device = device
print('using device: ', device)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
def loss_function(input_ins_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target, a_reconstby_zt, input_treat_trn):
# 1. recontrust loss
loss_bce = nn.BCELoss(reduction='mean').to(device)
loss_reconst = loss_bce(a_pred.view(-1), input_ins_batch.view(-1))
# 2. KLD_C
KLD_C = torch.mean(torch.sum(qc * torch.log(args.K * qc + 1e-10), dim=1), dim=0)
# 3. E_KLD_QT_PT
mu_zt = mu_zt.unsqueeze(-1)
logvar_zt = logvar_zt.unsqueeze(-1)
mu_p_zt = mu_p_zt.T
logvar_p_zt = logvar_p_zt.T
mu_p_zt = mu_p_zt.unsqueeze(0)
logvar_p_zt = logvar_p_zt.unsqueeze(0)
KLD_QT_PT = 0.5 * (((logvar_p_zt - logvar_zt) + ((logvar_zt.exp() + (mu_zt - args.mu_p_wt * mu_p_zt).pow(2)) / logvar_p_zt.exp())) - 1)
# zt
loss_bce2 = nn.BCELoss(reduction='mean').to(device)
loss_reconst_zt = loss_bce2(a_reconstby_zt.reshape(-1), input_treat_trn.reshape(-1))
qc = qc.unsqueeze(-1) # m x k x 1
qc = qc.expand(-1, args.K, 1) # m x k x 1
E_KLD_QT_PT = torch.mean(torch.sum(torch.bmm(KLD_QT_PT, qc), dim=1), dim=0)
# 4. KL_ZI
# KL_ZI = None
# for k in range(args.K):
# mu_zi_k = mu_zi_list[k] # batch_size x d
# logvar_zi_k = logvar_zi_list[k]
# kl_zi_k = -0.5 * torch.sum(1 + logvar_zi_k - mu_zi_k.pow(2) - logvar_zi_k.exp(), dim=1) # n
# KL_ZI = (kl_zi_k if (KL_ZI is None) else (KL_ZI + kl_zi_k)) #
# KL_ZI = torch.mean(KL_ZI, dim=0)
#
mu_zi_all = None
log_zi_all = None
for k in range(args.K):
mu_zi_k = mu_zi_list[k]
logvar_zi_k = logvar_zi_list[k]
mu_zi_all = mu_zi_k if mu_zi_all is None else torch.cat([mu_zi_all, mu_zi_k], dim=1)
log_zi_all = logvar_zi_k if log_zi_all is None else torch.cat([log_zi_all, logvar_zi_k], dim=1)
KL_ZI = -0.5 * torch.sum(1 + log_zi_all - mu_zi_all.pow(2) - log_zi_all.exp(), dim=1) # n
KL_ZI = torch.mean(KL_ZI, dim=0)
# 5. loss_y
temp = 0.5 * math.log(2 * math.pi)
target = target.view(-1, 1)
bb = - 0.5 * ((target - mu_y).pow(2)) / logvar_y.exp() - 0.5 * logvar_y - temp
loss_y = - torch.mean(torch.sum(- 0.5 * ((target - mu_y).pow(2)) / logvar_y.exp() - 0.5 * logvar_y - temp, dim=1), dim=0)
# MSE_Y
loss_mse = nn.MSELoss(reduction='mean')
loss_y_mse = loss_mse(mu_y, target)
# 6. loss balance
loss_balance = 0.0
loss = loss_reconst + KL_ZI + KLD_C + E_KLD_QT_PT + loss_y
eval_result = {
'loss': loss, 'loss_reconst': loss_reconst, 'KLD_C': KLD_C, 'E_KLD_QT_PT': E_KLD_QT_PT, 'loss_reconst_zt':loss_reconst_zt,
'KL_ZI': KL_ZI, 'loss_y': loss_y, 'loss_y_mse': loss_y_mse, 'loss_balance': loss_balance,
}
return eval_result
def test(model, data_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C_true, inx_spec_treat=None, show_cluster=False, show_disent=True, show_y=False):
model.eval()
num_cluster = args.K
m = input_treat_trn.shape[0]
num_assign = len(adj_assign)
pehe = torch.zeros(num_assign, dtype = torch.float)
ite_true_sum = torch.zeros(num_assign, dtype = torch.float)
ite_pred_sum = torch.zeros(num_assign, dtype = torch.float)
adj_pred_correctNum = 0.0 # m
data_size = 0
for batch_idx, (adj_batch, target, orin_index) in enumerate(data_loader):
data_size += adj_batch.shape[0]
batch_size = adj_batch.shape[0]
if args.cuda:
adj_batch = adj_batch.to(device)
orin_index = orin_index.to(device)
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(
adj_batch, input_treat_trn)
# accuracy of treatment assignment prediction
a_pred[a_pred >= 0.5] = 1.0
a_pred[a_pred < 0.5] = 0.0
if inx_spec_treat is None:
adj_pred_correctNum += (a_pred == adj_batch).sum()
else:
a_pred_spec = a_pred[:, inx_spec_treat]
a_true_spec = adj_batch[:, inx_spec_treat]
adj_pred_correctNum += (a_pred_spec == a_true_spec).sum()
# get true y
if Z_i_list is None:
y_true = torch.zeros((batch_size, len(adj_assign), 1), device=args.device)
y_true_0 = torch.zeros((batch_size, 1), device=args.device)
else:
y_true, y_true_0 = get_y_true_final(orin_index, adj_assign, Z_i_list, Zt, params)
# pehe, ate
adj_batch_0 = torch.zeros([adj_batch.shape[0], m], dtype=torch.float) # batch size x m
if args.cuda:
adj_batch_0 = adj_batch_0.to(device)
y_pred_0, _ = model.predictY(mu_zt, zi_sample_list, qc, adj_batch_0)
for j in range(len(adj_assign)):
adj_assign_j = adj_assign[j] # m
adj_assign_j = adj_assign_j.unsqueeze(0)
adj_assign_j = adj_assign_j.expand(adj_batch.shape[0], m)
if args.cuda:
adj_assign_j = adj_assign_j.to(device)
y_pred_j, _ = model.predictY(mu_zt, zi_sample_list, qc, adj_assign_j)
y_true_j = y_true[:, j, :]
ite_pred_j = y_pred_j - y_pred_0
ite_true_j = y_true_j - y_true_0
pehe[j] = pehe[j] + torch.sum((ite_pred_j - ite_true_j).pow(2))
ite_true_sum[j] = ite_true_sum[j] + torch.sum(ite_true_j)
ite_pred_sum[j] = ite_pred_sum[j] + torch.sum(ite_pred_j)
pehe = torch.sqrt(pehe / data_size)
pehe_ave = torch.sum(pehe) / num_assign
ate = torch.abs(ite_true_sum / data_size - ite_pred_sum / data_size)
ate_ave = torch.sum(ate) / num_assign
if inx_spec_treat is None:
acc_apred = adj_pred_correctNum / (data_size * m)
else:
m_new = len(inx_spec_treat)
acc_apred = adj_pred_correctNum / (data_size * m_new)
# acc of zt
a_reconstby_zt[a_reconstby_zt >= 0.5] = 1.0
a_reconstby_zt[a_reconstby_zt < 0.5] = 0.0
adj_pred_correctNum_zt = 0.0
adj_pred_correctNum_zt += (a_reconstby_zt == input_treat_trn).sum()
acc_apred_zt = adj_pred_correctNum_zt / (input_treat_trn.shape[0] * input_treat_trn.shape[1])
if show_cluster:
C = torch.argmax(qc, dim=1).cpu().detach().numpy() # m
mu_zt = mu_zt.cpu().detach().numpy()
mu_p_zt = args.mu_p_wt * mu_p_zt.cpu().detach().numpy()
Zt_tsn = plot_cluster(mu_zt, C, num_cluster, mu_zt_all=mu_p_zt, saving=False)
# true clusters
plot_cluster(mu_zt, C_true, num_cluster, mu_zt_all=mu_p_zt, saving=False, Zt_tsn=Zt_tsn)
eval_result = {
'pehe': pehe_ave, 'ate': ate_ave, 'acc_apred': acc_apred,
'acc_apred_zt': acc_apred_zt
}
return eval_result
def train(epochs, model, trn_loader, val_loader, tst_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C_true, optimizer, with_test=True, active_opt=[True, True, True, True]):
time_begin = time.time()
model.train()
print("start training!")
optimizer_1 = optimizer[0]
optimizer_2 = optimizer[1]
optimizer_3 = optimizer[2]
optimizer_4 = optimizer[3]
for epoch in range(epochs):
for batch_idx, (adj_batch, target, orin_index) in enumerate(trn_loader):
if args.cuda:
adj_batch = adj_batch.to(device)
target = target.to(device)
optimizer_1.zero_grad()
optimizer_2.zero_grad()
optimizer_3.zero_grad()
optimizer_4.zero_grad()
# forward pass
if active_opt[0]:
for i in range(5):
optimizer_1.zero_grad()
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(adj_batch, input_treat_trn)
eval_result = loss_function(adj_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target, a_reconstby_zt,input_treat_trn)
loss, KLD_C, E_KLD_QT_PT, loss_a_reconst_zt, loss_reconst, KL_ZI, KLD_C, loss_y, loss_y_mse = \
eval_result['loss'], eval_result['KLD_C'], eval_result['E_KLD_QT_PT'], eval_result['loss_reconst_zt'], eval_result['loss_reconst'], eval_result['KL_ZI'], eval_result['KLD_C'], eval_result['loss_y'], eval_result['loss_y_mse']
# backward propagation
(loss_a_reconst_zt).backward()
optimizer_1.step()
if active_opt[2]:
for i in range(3):
optimizer_3.zero_grad()
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(
adj_batch, input_treat_trn)
eval_result = loss_function(adj_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list,
logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target,
a_reconstby_zt, input_treat_trn)
loss, KLD_C, E_KLD_QT_PT, loss_a_reconst_zt, loss_reconst, KL_ZI, KLD_C, loss_y, loss_y_mse = \
eval_result['loss'], eval_result['KLD_C'], eval_result['E_KLD_QT_PT'], eval_result[
'loss_reconst_zt'], eval_result['loss_reconst'], eval_result['KL_ZI'], eval_result['KLD_C'], \
eval_result['loss_y'], eval_result['loss_y_mse']
# backward propagation
pm_beta = 1.0 if epoch < 100 else args.beta
(loss_reconst + pm_beta * KL_ZI).backward()
optimizer_3.step()
if active_opt[3]:
for i in range(20):
optimizer_4.zero_grad()
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(
adj_batch, input_treat_trn)
eval_result = loss_function(adj_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list,
logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target,
a_reconstby_zt, input_treat_trn)
loss, KLD_C, E_KLD_QT_PT, loss_a_reconst_zt, loss_reconst, KL_ZI, KLD_C, loss_y, loss_y_mse = \
eval_result['loss'], eval_result['KLD_C'], eval_result['E_KLD_QT_PT'], eval_result[
'loss_reconst_zt'], eval_result['loss_reconst'], eval_result['KL_ZI'], eval_result['KLD_C'], \
eval_result['loss_y'], eval_result['loss_y_mse']
# backward propagation
loss_y.backward()
optimizer_4.step()
# optimize for the centroid
if active_opt[1]:
for i in range(20):
optimizer_2.zero_grad()
# forward pass
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(
adj_batch, input_treat_trn)
eval_result = loss_function(adj_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list,
logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target,
a_reconstby_zt, input_treat_trn)
loss, KLD_C, E_KLD_QT_PT, loss_a_reconst_zt, loss_reconst, KL_ZI, KLD_C, loss_y, loss_y_mse = \
eval_result['loss'], eval_result['KLD_C'], eval_result['E_KLD_QT_PT'], eval_result[
'loss_reconst_zt'], eval_result['loss_reconst'], eval_result['KL_ZI'], eval_result['KLD_C'], \
eval_result['loss_y'], eval_result['loss_y_mse']
# backward propagation
(5*KLD_C+E_KLD_QT_PT).backward()
optimizer_2.step()
# evaluate
if epoch % 100 == 0:
show_disent = True
model.eval()
# eval_result_val = test(model, val_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C_true)
eval_result_tst = test(model, tst_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C_true, show_disent=show_disent)
pehe_tst, mae_ate_tst = eval_result_tst['pehe'], eval_result_tst['ate']
print('Epoch: {:04d}'.format(epoch + 1),
'pehe_tst: {:.4f}'.format(pehe_tst.item()),
'mae_ate_tst: {:.4f}'.format(mae_ate_tst.item()),
'time: {:.4f}s'.format(time.time() - time_begin))
model.train()
return
class Synthetic_dataset(Dataset):
def __init__(self, adj, y, trn_idx=None, val_idx=None, tst_idx=None, type='train'):
n = adj.shape[0]
if trn_idx is None:
size_trn = int(args.trn_rate * n)
size_tst = int(args.tst_rate * n)
size_val = n - size_trn - size_tst
if type == 'train':
self.adj_ins = adj[:size_trn]
self.target = y[:size_trn]
self.orin_index = np.array(range(size_trn))
elif type == 'val':
self.adj_ins = adj[size_trn: size_trn + size_val]
self.target = y[size_trn: size_trn + size_val]
self.orin_index = np.array(range(size_trn, size_trn + size_val))
elif type == 'test':
self.adj_ins = adj[size_trn + size_val:]
self.target = y[size_trn + size_val:]
self.orin_index = np.array(range(size_trn + size_val, n))
else:
if type == 'train':
self.adj_ins = adj[trn_idx]
self.target = y[trn_idx]
self.orin_index = trn_idx
elif type == 'val':
self.adj_ins = adj[val_idx]
self.target = y[val_idx]
self.orin_index = val_idx
elif type == 'test':
self.adj_ins = adj[tst_idx]
self.target = y[tst_idx]
self.orin_index = tst_idx
def __getitem__(self, index):
adj = self.adj_ins[index]
target = self.target[index]
orin_index = self.orin_index[index]
return adj, target, orin_index
def __len__(self):
return len(self.adj_ins)
def Sythetic_loader(batchSize, adj, y, trn_idx=None, val_idx=None, tst_idx=None):
adj = torch.FloatTensor(adj)
y = torch.FloatTensor(y)
train_loader = torch.utils.data.DataLoader(
Synthetic_dataset(adj, y, trn_idx, val_idx, tst_idx, type='train'),
batch_size=batchSize,
shuffle=False
)
val_loader = torch.utils.data.DataLoader(
Synthetic_dataset(adj, y, trn_idx, val_idx, tst_idx, type='val'),
batch_size=batchSize,
shuffle=False
)
test_loader = torch.utils.data.DataLoader(
Synthetic_dataset(adj, y, trn_idx, val_idx, tst_idx, type='test'),
batch_size=batchSize,
shuffle=False
)
return train_loader, val_loader, test_loader
def get_y_true_final(orin_index, adj_assign, Z_i_list, Zt, params):
batch_size = orin_index.shape[0]
num_assign = adj_assign.shape[0]
m = adj_assign.shape[1]
zi_all = None # n x (K x d)
for k in range(len(Z_i_list)): # every cluster
Z_i_k = Z_i_list[k]
zi_all = Z_i_k if zi_all is None else torch.cat([zi_all, Z_i_k], dim=1)
zi_batch = zi_all[orin_index] # batch size x (K x d)
# y0
y_true_0 = torch.zeros((batch_size,1))
if args.cuda:
y_true_0 = y_true_0.to(device)
if params['type'] == '0':
W1 = params['W1'][0][0]
W1 = torch.FloatTensor(W1)
if args.cuda:
W1 = W1.to(device)
y_true = torch.empty([batch_size, num_assign, 1], dtype=torch.float, device=args.device) # batch size x R x 1
for j in range(adj_assign.shape[0]): # each assignment
adj_assign_j = adj_assign[j] # size = m
adj_assign_j = adj_assign_j.unsqueeze(0) # 1 x m
adj_assign_j = adj_assign_j.expand(batch_size, m) # batch size x m
y_true_j = torch.diag(torch.matmul(torch.matmul(torch.matmul(adj_assign_j, Zt), W1), zi_batch.T)).reshape(-1,1)
y_true[:, j, :] = y_true_j
elif params['type'] == '1':
W1 = params['W1'][0][0]
W1 = torch.FloatTensor(W1)
W2 = params['W2'][0][0]
W2 = torch.FloatTensor(W2)
C1 = params['C1'][0][0][0][0]
C = params['C'][0][0][0][0]
if args.cuda:
W1 = W1.to(device)
W2 = W2.to(device)
y_true = torch.empty([batch_size, num_assign, 1], dtype=torch.float, device=args.device) # batch size x R x 1
for j in range(adj_assign.shape[0]): # each assignment
adj_assign_j = adj_assign[j]
adj_assign_j = adj_assign_j.unsqueeze(0)
adj_assign_j = adj_assign_j.expand(batch_size, m)
y_true_j = C * (C1 * torch.diag(torch.matmul(torch.matmul(torch.matmul(adj_assign_j, Zt), W1), zi_batch.T)).reshape(
-1, 1) + torch.matmul(zi_batch, W2))
y_true[:, j, :] = y_true_j
return y_true, y_true_0
def loadFromFile(path):
data = scio.loadmat(path)
Z_i_list = data['Z_i_list']
Zt = data['Zt']
adj = data['adj']
y = data['y'][0]
C = data['C'][0]
idx_trn_list = data['trn_idx_list']
idx_val_list = data['val_idx_list']
idx_tst_list = data['tst_idx_list']
params = data['params']
return Z_i_list, Zt, adj, y, C, idx_trn_list, idx_val_list, idx_tst_list, params
def load_data(dataset):
if dataset == 'synthetic':
Z_i_list, Zt, adj, YF, C, idx_trn_list, idx_val_list, idx_tst_list, params = loadFromFile('../../dataset/synthetic/synthetic_final.mat')
elif dataset == 'amazon':
Z_i_list, Zt, adj, YF, C, idx_trn_list, idx_val_list, idx_tst_list, params = loadFromFile(
'../../dataset/amazon/amazon_3C.mat')
elif dataset == 'amazon-6c':
Z_i_list, Zt, adj, YF, C, idx_trn_list, idx_val_list, idx_tst_list, params = loadFromFile(
'../../dataset/amazon/amazon_6C.mat')
print("True C: ", C)
cluster_size = [(C == i).sum() for i in range(args.K)]
print('cluster size: ', cluster_size)
return adj, Z_i_list, Zt, YF, idx_trn_list, idx_val_list, idx_tst_list, params, C
def experiment_ite(args):
adj, Z_i_list, Zt, YF, idx_trn_list, idx_val_list, idx_tst_list, params, C = load_data(args.dataset)
results_all = {'pehe': [], 'ate': []}
for i_exp in range(0, 10): # 10 runs of experiments
print("============== Experiment ", str(i_exp), " =========================")
trn_idx = idx_trn_list[i_exp]
val_idx = idx_val_list[i_exp]
tst_idx = idx_tst_list[i_exp]
trn_loader, val_loader, tst_loader = Sythetic_loader(args.batch_size, adj, YF, trn_idx, val_idx, tst_idx)
n = adj.shape[0]
m = adj.shape[1]
treated_rate = adj.sum() / adj.size
print('data: ', args.dataset, ' n=', n, ' m=', m, ' K=', args.K, 'treated rate: ', treated_rate)
#adj_assign = torch.eye(m)
num_R = m
adj_assign = np.random.binomial(1, 0.5, (num_R, m)) # R x m
adj_assign = torch.FloatTensor(adj_assign)
size_trn = len(trn_idx)
input_treat_trn = adj[trn_idx].T
input_treat_trn = torch.FloatTensor(input_treat_trn)
if i_exp == 0:
Z_i_list = [torch.FloatTensor(zi) for zi in Z_i_list]
Zt = torch.FloatTensor(Zt)
dim_t = size_trn
dim_x = m
model = MTvae(args, dim_t, dim_x)
# cuda
if args.cuda:
model = model.to(device)
input_treat_trn = input_treat_trn.to(device)
adj_assign = adj_assign.to(device)
Z_i_list = [zi.to(device) for zi in Z_i_list]
Zt = Zt.to(device)
par_t = list(model.mu_zt.parameters()) + list(model.logvar_zt.parameters()) + list(
model.a_reconstby_zt.parameters())
par_z = list(model.mu_zi_k.parameters()) + list(model.logvar_zi_k.parameters())
par_y = list(model.y_pred_1.parameters()) + list(model.y_pred_2.parameters()) + par_z
optimizer_1 = optim.Adam([{'params': par_t, 'lr': args.lr}], weight_decay=args.weight_decay) # zt
optimizer_2 = optim.Adam([{'params': [model.mu_p_zt, model.logvar_p_zt], 'lr': 0.01}],
weight_decay=args.weight_decay) # centroid
optimizer_3 = optim.Adam([{'params': par_z, 'lr': args.lr}], weight_decay=args.weight_decay) # zi
optimizer_4 = optim.Adam([{'params': par_y, 'lr': args.lr}], weight_decay=args.weight_decay) # y
optimizer = [optimizer_1, optimizer_2, optimizer_3, optimizer_4]
train(args.epochs, model, trn_loader, val_loader, tst_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params,
C, optimizer)
eval_result_tst = test(model, tst_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C,
show_cluster=True)
results_all['pehe'].append(eval_result_tst['pehe'])
results_all['ate'].append(eval_result_tst['ate'])
break # remove if more experiments are needed
results_all['average_pehe'] = np.mean(np.array(results_all['pehe'], dtype=np.float))
results_all['std_pehe'] = np.std(np.array(results_all['pehe'], dtype=np.float))
results_all['average_ate'] = np.mean(np.array(results_all['ate'], dtype=np.float))
results_all['std_ate'] = np.std(np.array(results_all['ate'], dtype=np.float))
print("============== Overall experiment results =========================")
for k in results_all:
if isinstance(results_all[k], list):
print(k, ": ", results_all[k])
else:
print(k, f": {results_all[k]:.4f}")
print("Total time elapsed: {:.4f}s".format(time.time() - t_begin))
if __name__ == '__main__':
t_begin = time.time()
if args.dataset == 'synthetic':
args.K = 4
elif args.dataset == 'amazon':
args.K = 3
elif args.dataset == 'amazon-6c':
args.K = 6
experiment_ite(args)
``` |
{
"source": "jmaack24/powerscenarios",
"score": 2
} |
#### File: powerscenarios/powerscenarios/grid.py
```python
from __future__ import print_function
import logging
import pandas as pd
import numpy as np
import sys
# should this be imported only as needed (in retrieve_wind_sites, retrieve_wtk_data,)
import os
logging.basicConfig()
# do this before importing pywtk, so that WIND_MET_NC_DIR and WIND_FCST_DIR are set correctly
# if cache dir is set, will use AWS (as opposed to local data)
#os.environ["PYWTK_CACHE_DIR"] = os.path.join(os.environ["HOME"], "pywtk-data")
from pywtk.wtk_api import get_nc_data, site_from_cache, WIND_FCST_DIR, WIND_MET_NC_DIR
import pywtk
class Grid(object):
""" docstring TBD
"""
def __init__(
self,
name,
buses,
generators,
wind_generators,
wind_sites=pd.DataFrame(),
actuals=None,
scenarios=None,
):
self.name = name
self.buses = buses
self.generators = generators
self.wind_generators = wind_generators
self.wind_sites = wind_sites
self.actuals = actuals
self.scenarios = scenarios
# attributes
blet = "ble"
WTK_DATA_PRECISION = 6
# if __repr__ is defined, then __str__ = __repr__ (converse is not true)
# def __str__(self):
# return 'Grid(name='+self.name+', buses='+str(len(self.buses)) + ', generators='+str(len(self.generators)) + ', wind_generators='+str(len(self.wind_generators)) + ')'
def __repr__(self):
# return self.name + ' grid object: buses='+str(len(self.buses)) + ' generators='+str(len(self.generators)) + ' wind generators='+str(len(self.wind_generators))
if self.wind_sites is None:
n_wind_sites = "None"
else:
n_wind_sites = str(len(self.wind_sites))
return (
"Grid(name="
+ self.name
+ ", buses="
+ str(len(self.buses))
+ ", generators="
+ str(len(self.generators))
+ ", wind_generators="
+ str(len(self.wind_generators))
+ ", wind_sites="
+ n_wind_sites
+ ")"
)
def info(self):
""" Method for displaying grid statistics
"""
info_str = ""
info_str = info_str + "\n{} grid info: \n".format(self.name)
# number of busses:
n_buses = len(self.buses)
info_str = info_str + "\n number of buses: {}".format(n_buses)
# number of generators
n_generators = len(self.generators)
info_str = info_str + "\n number of generators: {}".format(n_generators)
# number of wind generators
n_wind_generators = len(self.wind_generators)
info_str = info_str + "\n number of wind generators: {}".format(
n_wind_generators
)
# number of solar generators
n_solar_generators = len(
self.generators[self.generators["GenFuelType"] == "Solar"]
)
info_str = info_str + "\n number of solar generators: {}".format(
n_solar_generators
)
# total capacity
total_capacity = self.generators["GenMWMax"].sum()
info_str = info_str + "\n total generator capacity: {:.2f} MW".format(
total_capacity
)
# wind capacity
wind_capacity = self.generators[self.generators["GenFuelType"] == "Wind"][
"GenMWMax"
].sum()
wind_penetration = 100 * wind_capacity / total_capacity
info_str = (
info_str
+ "\n wind capacity/penetration: {:.2f} MW / {:.2f}%".format(
wind_capacity, wind_penetration
)
)
# solar capacity
solar_capacity = self.generators[self.generators["GenFuelType"] == "Solar"][
"GenMWMax"
].sum()
solar_penetration = 100 * solar_capacity / total_capacity
info_str = (
info_str
+ "\n solar capacity/penetration: {:.2f} MW / {:.2f}%".format(
solar_capacity, solar_penetration
)
)
return info_str
def change_wind_penetration(
self, new_wind_penetration, **kwargs,
):
""" Method to change wind penetration (multiplying existing wind generator capacities by a factor)
Changes generators and wind_generators.
Required Arg:
new_wind_penetration - (float), new wind penetration as percentage of total capacity, [0,100)
"""
# print('OLD:')
# total capacity
total_capacity = self.generators["GenMWMax"].sum()
# print('total gen capacity: {}'.format(total_capacity))
# wind capacity
wind_capacity = self.generators[self.generators["GenFuelType"] == "Wind"][
"GenMWMax"
].sum()
# print('total wind capacity: {}'.format(wind_capacity))
# wind penetration
wind_penetration = 100 * wind_capacity / total_capacity
# print('curent wind penetration: {:.2f}%'.format(wind_penetration))
# now find a factor by which to multiply wind capacity to get new_wind_penetration
print("\n\nChanging wind penetration to: {:.2f}%".format(new_wind_penetration))
factor = (
new_wind_penetration
* (wind_capacity - total_capacity)
/ (wind_capacity * (new_wind_penetration - 100))
)
print("using factor of {}".format(factor))
# modify GenMWMax column of wind_gen_df to achieve desired penetration
self.wind_generators["GenMWMax"] = self.wind_generators["GenMWMax"] * factor
# also need to modify gen_df to be consistent
self.generators.loc[self.generators["GenFuelType"] == "Wind", "GenMWMax"] = (
self.generators[self.generators["GenFuelType"] == "Wind"]["GenMWMax"]
* factor
)
# print('NEW:')
# total_capacity = self.generators['GenMWMax'].sum()
# print('total gen capacity: {}'.format(total_capacity))
# wind_capacity = self.generators[self.generators['GenFuelType']=='Wind']['GenMWMax'].sum()
# print('total wind capacity: {}'.format(wind_capacity))
# wind_penetration = 100*wind_capacity/total_capacity
# print('curent wind penetration: {:.2f}%'.format(wind_penetration))
def retrieve_wind_sites(self, method = "simple proximity", **kwargs):
""" Method to retrieve wind sites (SiteID) nearest to wind generators (up to their capacity, GenMWMax).
Requires pywtk_api.
Required Args:
method='simple proximity', TODO: 'capacity factor'
"""
# add Point (wkt) column to wind_gen_df
# i.e. turn Latitude and Longitude of wind generators into wkt POINT format (add it as a new column)
# both, astype and apply work
# wind_gen_df['wkt'] = 'POINT(' + wind_gen_df['Longitude'].astype(str) + ' ' + wind_gen_df['Latitude'].astype(str) + ')'
if method == "simple proximity":
wind_gen_df = self.wind_generators
wind_gen_df["Point"] = (
"POINT("
+ wind_gen_df["Longitude"].apply(str)
+ " "
+ wind_gen_df["Latitude"].apply(str)
+ ")"
)
print("Retrieving wind sites ...")
# will create a DataFrame out of this list of dicts (rows)
wind_sites_list = []
site_ids_list = [] # for keeping track of used sites (don't want repeats)
for row in wind_gen_df.itertuples():
gen_capacity = row.GenMWMax
gen_wkt_point = row.Point
# retrieve wind sites sorted by proximity to gen location
sorted_sites = pywtk.site_lookup.get_3tiersites_from_wkt(gen_wkt_point)
# keep adding sites to the list until gen capacity is exceeded
total_sites_capacity = 0.0
for site in sorted_sites.itertuples():
wind_site = {
"SiteID": site.Index,
"Capacity": site.capacity,
"Point": str(site.point),
"Latitude": site.lat,
"Longitude": site.lon,
"BusNum": row.BusNum, # add BusNum this site belongs to (maybe it'll be usefull)
"GenUID": row.GenUID, # add GenUID (generator unique ID) this site belongs to
}
# note that site.point is of type : shapely.geometry.point.Point
# hence, turn it into str, since get_wind_data_by_wkt() wants a str (stupid, isn't it?)
# if wind site is not in the list already, add it to the list (don't want repeats)
if not (site.Index in site_ids_list):
wind_sites_list.append(wind_site)
site_ids_list.append(site.Index)
total_sites_capacity += site.capacity
if total_sites_capacity > gen_capacity:
break
wind_sites_df = pd.DataFrame(wind_sites_list)
self.wind_sites = wind_sites_df
print("Done")
# return wind_sites_df
# internal, used for make_tables
def retrieve_wtk_data(
self, start_of_data, end_of_data, nc_dir="met", attributes=["power"], **kwargs,
):
""" Function to retrieve wind power data using self.wind_sites
Used to create initial actuals_df and scenarios_df
Required Args:
start_of_data - (pd.Timestamp) start of required power data
end_of_data - (pd.Timestamp) end of required power data
Optional Args:
nc_dir - (string) either 'met' for meteorological (WIND_MET_NC_DIR) or 'fcst' for forecast (WIND_FCST_DIR)
Returns:
pd.DataFrame with columns as BusNum from wind_site_df and
rows of power values indexed by Timestamp
i.e. power data from wind sites is aggregated by the bus they belong to
"""
if nc_dir == "met":
nc_dir = WIND_MET_NC_DIR
elif nc_dir == "fcst":
nc_dir = WIND_FCST_DIR
wind_sites_df = self.wind_sites
if wind_sites_df.empty:
raise Exception(
"No wind sites, retrieve wind sites before retrieving data."
)
# print('No wind sites, retrieve wind sites before retrieving data.')
# return
# what do we want? - power!
# attributes = ['power',]
print("Retrieving WTK data ...")
# initialize DataFrame by pulling one data point
site_id = wind_sites_df["SiteID"].loc[0]
wind_data_df_index = pywtk.wtk_api.get_nc_data(
site_id,
start_of_data,
end_of_data,
attributes=attributes,
leap_day=True,
utc=True,
nc_dir=nc_dir,
).index
wind_data_df = pd.DataFrame(index=wind_data_df_index)
# we need unique GenUID because BusNum is not unique ( multiple wind generators attached to one bus happen)
# bus_numbers = wind_sites_df['BusNum'].unique()
gen_uids = wind_sites_df["GenUID"].unique()
# for bus_number in bus_numbers:
for gen_uid in gen_uids:
# actuals_df['Bus'+str(bus_number)] = 0.
# initialize column
# wind_data_df[bus_number] = 0.
wind_data_df[gen_uid] = 0.0
# site_ids = wind_sites_df[wind_sites_df['BusNum']== bus_number]['SiteID'].values
# take site_ids that belong to the same gen_uid
site_ids = wind_sites_df[wind_sites_df["GenUID"] == gen_uid][
"SiteID"
].values
for site_id in site_ids:
# retrieve by site_id and keep adding
wind_data_df_temp = pywtk.wtk_api.get_nc_data(
site_id,
start_of_data,
end_of_data,
attributes=attributes,
leap_day=True,
utc=True,
nc_dir=nc_dir,
)
# wind_data_df[bus_number]+=wind_data_df_temp[attributes[0]].values
wind_data_df[gen_uid] += wind_data_df_temp[attributes[0]].values
# add name for column index?
# wind_data_df.columns.rename('BusNum',inplace=True)
# rename row index
wind_data_df.index.rename("IssueTime", inplace=True)
print("Done")
return wind_data_df
# new method, this one does not power condition, makes actuals_df and scenarios_df
def make_tables(
self,
actuals_start=pd.Timestamp("2007-01-01 00:00:00", tz="utc"),
actuals_end=pd.Timestamp("2007-12-31 23:55:00", tz="utc"),
scenarios_start=pd.Timestamp("2008-01-01 00:00:00", tz="utc"),
scenarios_end=pd.Timestamp("2013-12-31 23:55:00", tz="utc"),
**kwargs,
):
""" Method retrieves data from wtk and makes actuals(DataFrame) and scenarios(DataFrame)
Timestamps for actuals and scenarios: pick dates from 2007-01-01 to 2013-12-31
Required args:
actuals_start - (pd.Timestamp)
actuals_end - (pd.Timestamp)
scenarios_start - (pd.Timestamp)
scenarios_end - (pd.Timestamp)
source - (str) placeholder
"""
# time window selection:
# one year, e.g. 2007, for the actuals
# start_of_data = pd.Timestamp('2007-01-01 00:00:00', tz='utc')
# end_of_data = pd.Timestamp('2007-12-31 23:55:00', tz='utc')
# start_of_data = pd.Timestamp('2007-01-01 00:00:00').tz_localize('US/Pacific')
# end_of_data = pd.Timestamp('2007-12-31 23:55:00').tz_localize('US/Pacific')
# one year, 2013, for the actuals
# start_of_data = pd.Timestamp('2013-01-01 00:00:00', tz='utc')
# end_of_data = pd.Timestamp('2013-12-31 23:55:00', tz='utc')
wind_sites_df = self.wind_sites
actuals_df = self.retrieve_wtk_data(actuals_start, actuals_end)
# index does not have timezone in Devon's code, but it should
# actuals_df.index = actuals_df.index.tz_localize(None)
###### fix "over" problem
# max actual power can not go above GenMWMax
# note that, it does not go "under" - WTK takes care of that
max_gen_capacity = self.wind_generators.set_index("GenUID")["GenMWMax"]
actuals_df = actuals_df[actuals_df < max_gen_capacity].fillna(max_gen_capacity)
# grid.actuals = new_df
# add total power column (accross all buses)
actuals_df["TotalPower"] = actuals_df.sum(axis=1).values
self.actuals = actuals_df
# for scenarios_df, just change time window, last 6 years
# start_of_data = pd.Timestamp('2008-01-01 00:00:00', tz='utc')
# end_of_data = pd.Timestamp('2013-12-31 23:55:00', tz='utc')
# start_of_data = pd.Timestamp('2008-01-01 00:00:00').tz_localize('US/Pacific')
# end_of_data = pd.Timestamp('2013-12-31 23:55:00').tz_localize('US/Pacific')
# for scenarios_df, just change time window, first 6 years
# start_of_data = pd.Timestamp('2007-01-01 00:00:00', tz='utc')
# end_of_data = pd.Timestamp('2012-12-31 23:55:00', tz='utc')
scenarios_df = self.retrieve_wtk_data(scenarios_start, scenarios_end)
# fix "over". same as for actuals_df, but different way
# for GenMWMax info we take wind generators and reindex by GenUID
wind_generators_df = self.wind_generators.set_index("GenUID")
scenarios_df=scenarios_df.where(
scenarios_df <= wind_generators_df["GenMWMax"],
other=wind_generators_df["GenMWMax"],
axis=1,
#inplace=True,
)
# index does not have timezone in Devon's code, but it should
# scenarios_df.index = scenarios_df.index.tz_localize(None)
# add total power column (accross all generators)
scenarios_df["TotalPower"] = scenarios_df.sum(axis=1).values
# compute deviations
# i.e. make error calculations (instead of full power at each bus, have deviations from persistence)
# deviations from persistence at generators
gen_deviations_array = (
scenarios_df.iloc[1:, :-1].values - scenarios_df.iloc[:-1, :-1].values
)
# total power deviations
total_power_deviations_array = (
scenarios_df["TotalPower"].values[1:]
- scenarios_df["TotalPower"].values[:-1]
)
# drop last row
scenarios_df.drop(scenarios_df.tail(1).index, inplace=True)
# record deviations
scenarios_df.iloc[:, :-1] = gen_deviations_array
scenarios_df["Deviation"] = total_power_deviations_array
self.scenarios = scenarios_df
# old make tables method
def make_tables2(
self,
percentiles=(10, 90),
actuals_start=pd.Timestamp("2007-01-01 00:00:00", tz="utc"),
actuals_end=pd.Timestamp("2007-12-31 23:55:00", tz="utc"),
scenarios_start=pd.Timestamp("2008-01-01 00:00:00", tz="utc"),
scenarios_end=pd.Timestamp("2013-12-31 23:55:00", tz="utc"),
source="AWS",
**kwargs,
):
""" Method retrieves data from wtk and makes actuals(DataFrame) and scenarios(dictionary) containing power conditioned (low, medium, and high) tables(DataFrame)
and quantiles(ndarray) corresponding to input percentiles(tuple), default is (10,90).
e.g. rts.make_tables(percentiles=(20,80))
Timestamps for actuals and scenarios: pick dates from 2007-01-01 to 2013-12-31
Required args:
percentiles - (tuple)
actuals_start - (pd.Timestamp)
actuals_end - (pd.Timestamp)
scenarios_start - (pd.Timestamp)
scenarios_end - (pd.Timestamp)
source - (str) placeholder
"""
# time window selection:
# one year, e.g. 2007, for the actuals
# start_of_data = pd.Timestamp('2007-01-01 00:00:00', tz='utc')
# end_of_data = pd.Timestamp('2007-12-31 23:55:00', tz='utc')
# start_of_data = pd.Timestamp('2007-01-01 00:00:00').tz_localize('US/Pacific')
# end_of_data = pd.Timestamp('2007-12-31 23:55:00').tz_localize('US/Pacific')
# one year, 2013, for the actuals
# start_of_data = pd.Timestamp('2013-01-01 00:00:00', tz='utc')
# end_of_data = pd.Timestamp('2013-12-31 23:55:00', tz='utc')
wind_sites_df = self.wind_sites
actuals_df = self.retrieve_wtk_data(actuals_start, actuals_end)
# index does not have timezone in Devon's code, but it should
# actuals_df.index = actuals_df.index.tz_localize(None)
###### fix "over" problem
# max actual power can not go above GenMWMax
# note that, it does not go "under" - WTK takes care of that
max_gen_capacity = self.wind_generators.set_index("GenUID")["GenMWMax"]
actuals_df = actuals_df[actuals_df < max_gen_capacity].fillna(max_gen_capacity)
# grid.actuals = new_df
# add total power column (accross all buses)
actuals_df["TotalPower"] = actuals_df.sum(axis=1).values
self.actuals = actuals_df
# for scenarios_df, just change time window, last 6 years
# start_of_data = pd.Timestamp('2008-01-01 00:00:00', tz='utc')
# end_of_data = pd.Timestamp('2013-12-31 23:55:00', tz='utc')
# start_of_data = pd.Timestamp('2008-01-01 00:00:00').tz_localize('US/Pacific')
# end_of_data = pd.Timestamp('2013-12-31 23:55:00').tz_localize('US/Pacific')
# for scenarios_df, just change time window, first 6 years
# start_of_data = pd.Timestamp('2007-01-01 00:00:00', tz='utc')
# end_of_data = pd.Timestamp('2012-12-31 23:55:00', tz='utc')
scenarios_df = self.retrieve_wtk_data(scenarios_start, scenarios_end)
# index does not have timezone in Devon's code, but it should
# scenarios_df.index = scenarios_df.index.tz_localize(None)
# add total power column (accross all generators)
scenarios_df["TotalPower"] = scenarios_df.sum(axis=1).values
# compute deviations
# i.e. make error calculations (instead of full power at each bus, have deviations from persistence)
# deviations from persistence at generators
gen_deviations_array = (
scenarios_df.iloc[1:, :-1].values - scenarios_df.iloc[:-1, :-1].values
)
# total power veviations
total_power_deviations_array = (
scenarios_df["TotalPower"].values[1:]
- scenarios_df["TotalPower"].values[:-1]
)
# drop last row
scenarios_df.drop(scenarios_df.tail(1).index, inplace=True)
# record deviations
scenarios_df.iloc[:, :-1] = gen_deviations_array
scenarios_df["Deviation"] = total_power_deviations_array
# power conditioning:
# create total power conditioning tables based on chosen percentiles
# i.e. splits scenarios_df into three subsets (low, medium, high)
# should be generalized
scenarios = {}
# get quantiles
quantiles = np.percentile(scenarios_df["TotalPower"].values, percentiles)
scenarios["quantiles"] = quantiles
# print('quantiles = {}'.format(quantiles))
# create tables
scenarios["low"] = scenarios_df.loc[
scenarios_df["TotalPower"] < quantiles[0]
].copy()
scenarios["medium"] = scenarios_df.loc[
(scenarios_df["TotalPower"] > quantiles[0])
& (scenarios_df["TotalPower"] < quantiles[1])
].copy()
scenarios["high"] = scenarios_df.loc[
scenarios_df["TotalPower"] > quantiles[1]
].copy()
self.scenarios = scenarios
# new method
def generate_wind_scenarios(
self,
timestamp,
power_quantiles=[0.0, 0.1, 0.9, 1.0],
sampling_method="monte carlo",
n_scenarios=5,
n_periods=1,
random_seed=25,
output_format=0,
**kwargs,
):
"""Method to generate scenarios
Required Args:
timestamp - (pd.Timestamp)
power_quantiles - (list) quantiles for power conditioning
sampling_method - (string) either "importance" or "monte carlo"
n_scenarios - (integer) number of scenarios to draw
n_periods - (integer) number of periods for each scenario
Returns:
scenarios_df - (pd.DataFrame) multi-indexed DataFrame with all scenarios
"""
actuals_df = self.actuals
scenarios_df = self.scenarios
# # TODO: basic checks:
# # still needs work to Raise ValueExceptions
# for timestamp in timestamps:
# # type
# if type(timestamp) != pd._libs.tslibs.timestamps.Timestamp:
# print("timestamps must be pandas.Timestamp type")
# return
# # t0 and t1 must be in actuals.index
# if not (timestamps[0] in actuals_df.index) or not (
# timestamps[1] in actuals_df.index
# ):
# print(
# "timestamps[0] and timestamps[1] must be between {} and {}".format(
# actuals_df.index.min(), actuals_df.index.max()
# )
# )
# return
# # check if t0 < t1
# if not (timestamps[0] < timestamps[1]):
# print("timestamps[0] must be < timestamps[1]")
# return
# all needed timestamps
# all needed timestamps
timestamps = pd.date_range(
start=timestamp - pd.Timedelta("5min"), periods=n_periods + 1, freq="5min"
)
# print(timestamps)
# power at t0
total_power_t0 = actuals_df.loc[timestamps[0]]["TotalPower"]
# total_power_t0
# power conditioning
# .iloc[:-n_periods] is so that we can find consecutive timestamps for multiperiod scenarios
power_bins = pd.qcut(
scenarios_df["TotalPower"].iloc[:-n_periods], q=power_quantiles,
)
# power_bins
# which power bin does it belong to?
# for power_bin in power_bins.unique():
for power_bin in power_bins.cat.categories:
if total_power_t0 in power_bin:
break
# power_bin
# wanted power bin
p_bin = power_bins.loc[power_bins == power_bin]
if sampling_method == "monte carlo":
# sample of n_scenarios timestamps that are in wanted bin
sample_timestamps = p_bin.sample(n_scenarios, random_state=random_seed).index
# # # df of weights to return (all ones, redundant, but have to return something)
weights_df = pd.DataFrame(index=[timestamps[1]], columns=range(1, n_scenarios + 1))
weights_df.loc[timestamps[1]] = dict(
zip(range(1, n_scenarios + 1), np.ones(n_scenarios))
)
elif sampling_method == "importance":
# importance sample from an appropriate power bin
# cost of MW per 5-min period
loss_of_load_cost = 10000 / 12.0
spilled_wind_cost = 0.001
# compute costs of each 1-period scenario
cost_1 = scenarios_df["Deviation"].apply(
lambda dev: np.abs(loss_of_load_cost * dev)
if dev < 0
else np.abs(spilled_wind_cost * dev)
)
# print("any neg values in cost_1? {}".format((cost_1 < 0).any()))
# based on n_periods, compute cost_n, rolling window sum (if n_periods is 1, this will be the same as cost_1)
cost_n = cost_1.rolling(n_periods).sum().shift(-(n_periods - 1))
# rolling window operation looses digits, have to round (so we don't have negative values when adding zeroes)
cost_n = cost_n.round(self.WTK_DATA_PRECISION)
if (cost_n < 0).any():
print("any neg values in cost_n? {}".format((cost_n < 0).any()))
# IS
# probability mass function g(s) i.e. importance distribution
importance_probs = cost_n.loc[p_bin.index] / cost_n.loc[p_bin.index].sum()
# sample of n_scenarios timestamps that are in wanted bin with probabilities given by cost_n series
sample_timestamps = p_bin.sample(
n_scenarios, random_state=random_seed, weights=importance_probs
).index
# IS weights: f(s)/g(s), i.e. nominal/importance
importance_weights = (1 / p_bin.size) / importance_probs.loc[sample_timestamps]
# df of weights to return
weights_df = pd.DataFrame(index=[timestamps[1]], columns=range(1, n_scenarios + 1))
weights_df.loc[timestamps[1]] = dict(
zip(range(1, n_scenarios + 1), importance_weights.values)
)
# initialize multi-indexed df for all scenarios to return
iterables = [[timestamps[1]], range(1, n_scenarios + 1), timestamps[1:]]
# iterables
index = pd.MultiIndex.from_product(
iterables, names=["sim_timestamp", "scenario_nr", "period_timestamp"]
)
# index
multi_scenarios_df = pd.DataFrame(index=index, columns=actuals_df.columns[:-1])
# multi_scenarios_df
# now find wanted periods for each scenario i.e. consecutive timestamps in scenario_df
for sample_i, sample_timestamp in enumerate(sample_timestamps):
#print("sample_timestamp={}".format(sample_timestamp))
# needed timestamps
# pd.date_range(start=sample_timestamp, periods=n_periods, freq="5min")
# deviation will be a df even with 1 period because loc is used with pd.date_range
deviation = scenarios_df.loc[
pd.date_range(start=sample_timestamp, periods=n_periods, freq="5min")
]
# change deviation index to match actuals we adding it to
deviation.index = timestamps[1:]
# make scenario df for each sample_timestamp (could be done outside the loop)
scenario_df = pd.DataFrame(
index=timestamps[1:], columns=deviation.columns
).drop(["Deviation", "TotalPower"], axis=1)
# print("\nscenario_df:")
# scenario_df
# print("\nactual:")
# actuals_df.loc[timestamps[0]]
# first take actual
running_sum = actuals_df.loc[timestamps[0]].drop("TotalPower")
# now keep adding deviations to the above actual
for timestamp in timestamps[1:]:
running_sum += deviation.loc[timestamp].drop(
["TotalPower", "Deviation"],
)
scenario_df.loc[timestamp] = running_sum
# "under/over" problem
# for GenMWMax info we take wind generators and reindex by GenUID
wind_generators_df = self.wind_generators.set_index("GenUID")
# under
# .where replaces False
scenario_df.where(scenario_df >= 0.0, other=0.0, inplace=True)
# over
# scenario < wind_generators_df["GenMWMax"]
scenario_df=scenario_df.where(
scenario_df <= wind_generators_df["GenMWMax"],
other=wind_generators_df["GenMWMax"],
axis=1,
#inplace=True,
)
# scenario
# # add each scenario to the multi-indexed df to return
multi_scenarios_df.loc[(timestamps[1], sample_i + 1,)] = scenario_df.values
return multi_scenarios_df, weights_df
def generate_wind_scenarios2(
self,
timestamps,
sampling_method="monte carlo",
n_scenarios=5,
random_seed=25,
output_format=0,
**kwargs,
):
"""Function to generate scenarios
Required Args:
timestamps - (itterable of pd.Timestamp) [t0,t1,...tn]
t0 - initial/current timestamp, power conditioning will be done based on the actual power at this timestamp
t1 - time for wich we're making dispatch decisions,
t2,..,tn - required only if multiperiod scenarios are needed
sampling_method - (string) either "importance" or "monte carlo"
n_scenarios - (integer) number of scenarios to draw
if n_scenarios == 1 and sampling_method == monte carlo, will return "deterministic case" (zero deviations)
Returns:
actual_df - (pd.DataFrame) actual power for each GenUID (columns) at t1 (index)
forcast_df - (pd.DataFrame) actual power for each GenUID (columns) at t0 (index), at t1 values are the same because
of persistence forecast
scenarios_dict - (dictionary) of scenarios:
key - (integer) 1 through <n_scenarios>
value - (pd.DataFrame) one scenario: power deviatios from persitance for each BusNum (columns)
at index t0, all zeros (no deviation)
at index t1, actual @ t1 - actual @ t0 (power deviation from t0)
at index t2, actual @ t2 - actual @ t0 (power deviation from t0)
etc...
"""
# print('time_start={}'.format(time_start))
# print('time_end={}'.format(time_end))
# these were made with make_tables method
actuals_df = self.actuals
quantiles = self.scenarios.get("quantiles")
scenarios_low_power_df = self.scenarios.get("low")
scenarios_med_power_df = self.scenarios.get("medium")
scenarios_high_power_df = self.scenarios.get("high")
# tables list
tables = [
scenarios_low_power_df,
scenarios_med_power_df,
scenarios_high_power_df,
]
# just concat them all (then multiperiod is much simpler)
all_df = pd.concat(tables)
all_df.sort_index(inplace=True)
# basic checks: still needs work for multiperiod
for timestamp in timestamps:
# type
if type(timestamp) != pd._libs.tslibs.timestamps.Timestamp:
print("timestamps must be pandas.Timestamp type")
return
# t0 and t1 must be in actuals.index
if not (timestamps[0] in actuals_df.index) or not (
timestamps[1] in actuals_df.index
):
print(
"timestamps[0] and timestamps[1] must be between {} and {}".format(
actuals_df.index.min(), actuals_df.index.max()
)
)
return
# check if t0 < t1
if not (timestamps[0] < timestamps[1]):
print("timestamps[0] must be < timestamps[1]")
return
# actual power at t1 (same thing whether it is single- or multi-period)
actual_df = actuals_df.loc[
timestamps[1] : timestamps[1]
].copy() # loc[t:t] returns frame, loc[t] returns series
# actual power at t0, same values at index t1 (persistance forecast)
forecast_df = pd.DataFrame(index=timestamps[:2], columns=actual_df.columns)
forecast_df.loc[timestamps[0]] = actuals_df.loc[timestamps[0]].copy()
forecast_df.loc[timestamps[1]] = actuals_df.loc[
timestamps[0]
].copy() # persistence
# deterministic case: if n_scenarios == 1 and sampling_method == `monte carlo`
if n_scenarios == 1 and sampling_method == "monte carlo":
# initialize scenario_df (same index as forcast_df)
scenario_df = forecast_df.copy()
# first row of zeros, because Devon says so
scenario_df.loc[timestamps[0]] = 0.0
# second row of scenario_df is also 0, because it is deterministic case
scenario_df.loc[timestamps[1]] = 0.0
# drop TotalPower column
scenario_df.drop("TotalPower", axis=1, inplace=True)
scenarios_dict = {}
scenarios_dict[1] = scenario_df
# drop Total column from actual_df and forecast_df
forecast_df.drop("TotalPower", axis=1, inplace=True)
actual_df.drop("TotalPower", axis=1, inplace=True)
return scenarios_dict, actual_df, forecast_df
# power conditioning on total power at t0
actual_total_power = actuals_df.loc[timestamps[0]]["TotalPower"]
# print('Actual total power = {}'.format(actual_total_power))
if actual_total_power < quantiles[0]:
# draw scenarions from low power table
# print('using low power table (total power < {:.2f})'.format(quantiles[0]))
scenarios_df = scenarios_low_power_df.copy()
elif actual_total_power > quantiles[1]:
# draw scenarions from high power table
# print('using high power table (total power > {:.2f})'.format(quantiles[1]))
scenarios_df = scenarios_high_power_df.copy()
else:
scenarios_df = scenarios_med_power_df.copy()
# draw scenarions from medium power table
# print('using medium power table ({:.2f} < total power < {:.2f})'.format(quantiles[0],quantiles[1]))
# add weights column (sampling probabilities) depending on the sampling method
if sampling_method == "importance":
# temporary solution, cost function should come in as one of the variables
# if importance sampling (using low fidelity loss function)
# parameters of linear loss function
loss_of_load_cost = 1000 / 3.0
spilled_wind_cost = 0.001
scenarios_df["Weight"] = scenarios_df.apply(
lambda row: row["Deviation"] * (-loss_of_load_cost)
if row["Deviation"] < 0
else row["Deviation"] * spilled_wind_cost,
axis=1,
)
# some 'duplicate axis' error is showing when doing the bolow two lines... so lets do slow way as above
# this should be prebuilt anyway
# apply linear loss function to deviation column to get weight column
# scenarios_df.loc[scenarios_df['Deviation']<0,'Weight'] = scenarios_df['Deviation']*(-loss_of_load_cost)
# scenarios_df.loc[scenarios_df['Deviation']>0,'Weight'] = scenarios_df['Deviation']*(spilled_wind_cost)
# normalize
scenarios_df["Weight"] = (
scenarios_df["Weight"] / scenarios_df["Weight"].sum()
)
elif sampling_method == "monte carlo":
# if monte carlo method, all weights are equal
scenarios_df["Weight"] = 1.0 / len(scenarios_df)
# now draw random sample using weight column (the larger the weight the more likely it is to draw that sample)
# draw n_scenarios, one by one, accept only if tn is before t_final (which is 2013-12-31 23:50:00, but it could different)
# max_index_in_tables = max(scenarios_low_power_df.index.max(),scenarios_med_power_df.index.max(),scenarios_high_power_df.index.max())
# same as a bove in a more pythonic way
# max_index_in_tables = max([table.index.max() for table in tables])
# max_index_in_tables = all_df.index.max()
# time gaps betweet t1 and ti, i=2,3,...n
# for single-period this will be empty list
time_gaps_bw_t1_ti = [timestamp - timestamps[1] for timestamp in timestamps[2:]]
scenarios_dict = {}
key = 1
while len(scenarios_dict) < n_scenarios:
# print('key={}'.format(key))
# print('len(scenarios_dict)={}'.format(len(scenarios_dict)))
# while we haven't filled up scenario dict, keep sampling
scenario_sample_df = scenarios_df.sample(
n=1, weights="Weight", random_state=random_seed + key
)
sample_timestamp = scenario_sample_df.index[0]
# print("sample_timestamp={}".format(sample_timestamp))
# print('sample_timestamp={}'.format(sample_timestamp))
# check if all indices (every 5-min) from sample_timestamp to sample_timestamp + time_gap_bw_t1_tn are in the tables (if not start over)
# this check only needed only if multi-period scenarios are required i.e. len(timestamps)>2
if time_gaps_bw_t1_ti:
needed_timestamps = pd.date_range(
sample_timestamp,
sample_timestamp + time_gaps_bw_t1_ti[-1],
freq="5min",
)
else:
needed_timestamps = []
for timestamp in needed_timestamps:
if not timestamp in all_df.index:
continue
needed_timestamps_df = all_df.loc[needed_timestamps]
# drop Deviation and Weight column, no longer needed (they are for conditioning and sampling)
scenario_sample_df.drop(["Deviation", "Weight"], axis=1, inplace=True)
# make scenario df
# initialize scenario_df (index as given timestamps)
scenario_df = pd.DataFrame(
index=timestamps, columns=scenario_sample_df.columns
)
# deviations at index t0 are all zeros
scenario_df.loc[timestamps[0]] = 0.0
# deviations at index t1 are the scenarios_sample_df's only entry
scenario_df.loc[timestamps[1]] = scenario_sample_df.iloc[0].copy()
# all subsequent entries at t2,...,tn are found according to the time distance from t1 (running sum)
for i, timestamp in enumerate(timestamps[2:]):
# print("timestamp={}".format(timestamp))
# timedelta from t1
# once we stepped all the way to timestamp, record moving sum to the scenario_df
scenario_df.loc[timestamp] = needed_timestamps_df.loc[
sample_timestamp : sample_timestamp + time_gaps_bw_t1_ti[i]
].sum()
# drop TotalPower column
# print('scenario_df columns:')
# print(scenario_df.columns)
scenario_df.drop("TotalPower", axis=1, inplace=True)
##### fix "over" and "under" capacity problem
# "over" = if scenario power (forecast + scenario) goes over GenMWMax capacity
# "under" = if scenario power (forecast + scenario) goes under 0 i.e. negative generation
# for GenMWMax info we take wind generators and reindex by GenUID
wind_generators_df = self.wind_generators.set_index(
"GenUID"
) # this returns a reindexed copy
for timestamp in scenario_df.index[1:]:
# "over"
# if element of over is negative, we need to add it to current and consecutive timestamps
# (if positive don't do anything. we accomplish this by zeroing positive elements of over)
over = wind_generators_df["GenMWMax"] - (
forecast_df.iloc[0].drop("TotalPower") + scenario_df.loc[timestamp]
)
# if there are any negetive numbers in over, add the to the tail of scenario_df
if (over < 0).any():
over[over > 0.0] = 0.0
scenario_df.loc[timestamp:] = scenario_df.loc[timestamp:] + over
# "under"
# if element of under is negative, we need to subract it from current and consecutive timestamps
# (if positive don't do anything. we accomplish this by zeroing positive elements of under)
under = (
forecast_df.iloc[0].drop("TotalPower") + scenario_df.loc[timestamp]
)
if (under < 0).any():
under[under > 0.0] = 0.0
scenario_df.loc[timestamp:] = scenario_df.loc[timestamp:] - under
# add scenario_df to scenarios_dict
scenarios_dict[key] = scenario_df
key += 1
# drop Total column from actual_df and forecast_df
forecast_df.drop("TotalPower", axis=1, inplace=True)
actual_df.drop("TotalPower", axis=1, inplace=True)
if output_format == 0:
return scenarios_dict, actual_df, forecast_df
# make multi indexed df out of the scenarios_dict
elif output_format == 1:
actual_s = actual_df.iloc[0]
forecast_s = forecast_df.iloc[0]
iterables = [
[timestamps[1]],
list(scenarios_dict.keys()),
scenarios_dict[1].index[1:].to_list(),
]
index = pd.MultiIndex.from_product(
iterables, names=["sim_timestamp", "scenario_nr", "period_timestamp"]
)
multi_scenarios_df = pd.DataFrame(
index=index, columns=scenarios_dict[1].columns
)
# stick data to df from dict
for i, df in scenarios_dict.items():
# print("scenario: {}".format(i))
period_timestamps = df.index.to_list()
for period_timestamp in period_timestamps[
1:
]: # don't take the first one
# print("period timestamp: {}".format(period_timestamp))
multi_scenarios_df.loc[timestamps[1], i, period_timestamp] = (
df.loc[period_timestamp] + forecast_s
)
# return total wind power (as opposed to deviation)
# multi_scenarios_df=multi_scenarios_df+forecast_s
return multi_scenarios_df, actual_s
``` |
{
"source": "jmaarse/fissync",
"score": 3
} |
#### File: jmaarse/fissync/filehash.py
```python
import binascii
import hashlib
import json
import os
import stat
import sys
def hash_file(path):
"""Return sha1 hash of the file at the given path"""
print "hashing file:", path
hash = hashlib.sha1()
with open(path, "rb") as f:
hash.update(f.read(1024))
return hash.digest()
def map_file_tree(path, mapping = None):
"""Create hash and modify-time mapping for a file tree
'mapping' is a dictionairy mapping file names to a tuple of
last-modified-time and sha1 hash of the file. This function
walks the tree at the given 'path' and inserts or updates
mappings for all regular files in the tree."""
for path, subdirs, files in os.walk(path):
for f in files:
filepath = os.path.abspath(os.path.join(path, f).decode("utf8"))
statInfo = os.lstat(filepath)
if not stat.S_ISREG(statInfo.st_mode):
#print "Skipping non-regular file:", filepath
continue
timestamp = statInfo.st_mtime
if mapping is None:
hash = hash_file(filepath)
elif (filepath in mapping) and mapping[filepath][0] == timestamp:
# temporarily unhexlify the hash so it can be written
hash = binascii.unhexlify(mapping[filepath][1])
else:
hash = hash_file(filepath)
# temporarily hexlify the hash so it can be written
mapping[filepath] = timestamp, binascii.hexlify(hash)
#print binascii.hexlify(hash), timestamp, filepath
if __name__ == "__main__":
mapping = dict()
if len(sys.argv) > 1:
try:
with open(sys.argv[2], "rb") as indexFile:
mapping = json.load(indexFile)
except IOError:
# Index file doesn't exist yet: we'll write one later
pass
# TODO: remove mappings for deleted files
map_file_tree(sys.argv[1], mapping)
if len(sys.argv) > 1:
with open(sys.argv[2], "wb") as indexFile:
json.dump(mapping, indexFile, indent = 4)
#print "Hash: ", binascii.hexlify(filehash(sys.argv[1]))
``` |
{
"source": "jmaat/openff-spellbook",
"score": 3
} |
#### File: offsb/op/geometry.py
```python
import treedi.tree
import numpy as np
class AngleOperation( treedi.tree.TreeOperation):
def __init__( self, source, name):
super().__init__( source, name)
def op(self, mol, idx):
"""calculates angle between origin and consecutive atom pairs"""
atoms = mol.get( "geometry")[ np.newaxis, :, :]
mags = np.linalg.norm(atoms[:,[idx[0],idx[2]],:] - atoms[:,idx[1],:][:,np.newaxis,:], axis=2)
atoms_trans = atoms - atoms[:,idx[1],:][:,np.newaxis,:]
unit = atoms_trans[:,[idx[0],idx[2]],:] / mags[:,:,np.newaxis]
costheta = (unit[:,0,:] * unit[:,1,:]).sum(axis=1)
np.clip(costheta, -1.0, 1.0, out=costheta)
ret = np.arccos(costheta)*180/np.pi
return ret
class BondOperation( treedi.tree.TreeOperation):
def __init__( self, source, name):
super().__init__( source, name)
def op(self, mol, idx):
"""calculates distance from first atom to remaining atoms"""
atoms = mol.get( "geometry")[ np.newaxis, :, :]
#print( "Have", len(atoms[0]), "atoms and index is", idx)
return np.linalg.norm(atoms[:,idx[1],:] - atoms[:,idx[0],:], axis=1)
class TorsionOperation( treedi.tree.TreeOperation):
def __init__( self, source, name):
super().__init__( source, name)
def op(self, mol, idx):
"""calculates proper torsion of [i, j, k, l]"""
atoms = mol.get( "geometry")[ np.newaxis, :, :]
noncenter = [idx[0]]+idx[2:]
mags = np.linalg.norm(atoms[:,noncenter,:] - atoms[:,idx[1],:][:,np.newaxis,:], axis=2)
atoms_trans = atoms - atoms[:,idx[1],:][:,np.newaxis,:]
unit = atoms_trans[:,noncenter,:] / mags[:,:,np.newaxis]
#these are all Nx3
v0 = -unit[:,0,:]
v1 = unit[:,1,:]
v2 = unit[:,2,:]-unit[:,1,:]
w1 = np.cross(v0,v1)
w2 = np.cross(v1,v2)
w1_mag = np.linalg.norm(w1,axis=1)
w2_mag = np.linalg.norm(w2,axis=1)
mask = (w1_mag * w2_mag) > 0
# should be Nx1 costhetas
costheta = np.ones((atoms.shape[0]))
costheta[mask]= (w1[mask] * w2[mask]).sum(axis=1) / (w1_mag[mask]*w2_mag[mask])
np.clip(costheta, -1.0, 1.0, out=costheta)
theta = np.arccos(costheta)*180/np.pi
#distance = np.zeros((atoms.shape[0]))
#distance[mask] = ((w2[mask]*v0[mask]).sum(axis=1)/w2_mag[mask])
##theta[distance > 0] = 180 - theta[distance > 0]
theta[np.abs(theta) >= 180] %= 180.0
return theta
class ImproperTorsionOperation( treedi.tree.TreeOperation):
def __init__( self, source, name):
super().__init__( source, name)
def op(self, mol, idx):
"""calculates improper torsion of [i, center, j, k]"""
atoms = mol.get( "geometry")[ np.newaxis, :, :]
noncenter = [idx[0]]+idx[2:]
mags = np.linalg.norm(atoms[:,noncenter,:] - atoms[:,idx[1],:][:,np.newaxis,:], axis=2)
atoms_trans = atoms - atoms[:,idx[1],:][:,np.newaxis,:]
unit = atoms_trans[:,noncenter,:] / mags[:,:,np.newaxis]
#these are all Nx3
v0 = -unit[:,0,:]
v1 = unit[:,1,:]-unit[:,0,:]
v2 = unit[:,1,:]-unit[:,2,:]
w1 = np.cross(v0,v1)
w2 = np.cross(v1,v2)
w1_mag = np.linalg.norm(w1,axis=1)
w2_mag = np.linalg.norm(w2,axis=1)
mask = (w1_mag * w2_mag) > 0
# should be Nx1 costhetas
costheta = np.ones((atoms.shape[0]))
costheta[mask]= (w1[mask] * w2[mask]).sum(axis=1) / (w1_mag[mask]*w2_mag[mask])
np.clip(costheta, -1.0, 1.0, out=costheta)
theta = np.arccos(costheta)*180/np.pi
distance = np.zeros((atoms.shape[0]))
distance[mask] = ((w2[mask]*v0[mask]).sum(axis=1)/w2_mag[mask])
#theta[distance > 0] = 180 - theta[distance > 0]
theta[distance < 0] *= -1
return theta
```
#### File: offsb/qcarchive/plot_td.py
```python
import offsb.tools.const as const
import pickle
import numpy as np
import matplotlib.pyplot as plt
def plot_td( x, y, color=None, title="", figname=None):
fig = plt.figure()
if color is not None:
color -= np.min(color)
plt.scatter( x, y, marker='o', vmin=0.0, vmax=color.max(), c=color, cmap=cm.get_cmap("jet"))
plt.ylim(y.min(), y.max())
plt.colorbar()
plt.suptitle( title)
if figname is not None:
plt.savefig( figname)
plt.close( fig)
def select_param( lbl):
oFF_param = None
letter = lbl[0]
if letter == 'b':
oFF_param = "Bonds"
elif letter == 'a':
oFF_param = 'Angles'
elif letter == 't':
oFF_param = "ProperTorsions"
elif letter == 'i':
oFF_param = "ImproperTorsions"
elif letter == 'n':
oFF_param = "vdW"
return oFF_param
def plot_displacement( ax, dat, label_obj, displacement_fn=None):
label = label_obj.get( "id")
if displacement_fn is None:
if 'b' in label:
displacement_fn = bond_disp
elif 'a' in label:
displacement_fn = angle_disp
if displacement_fn is not None:
kT = (.001987*298.15)
equil, delta = displacement_fn( label_obj, thresh=kT)
color='blue'
if((dat < (equil - delta)).any() or (dat > (equil + delta)).any()):
color='red'
#print(label + "= R", end=" ")
elif(dat.max() < equil or dat.min() > equil):
#print(label + "= Y", end=" ")
color='yellow'
else:
color='green'
ax.axhline(y=equil, ls='-', marker='.', color='black', ms=20, mec='black', mfc=color)
ax.axhline(y=equil + delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color)
ax.axhline(y=equil - delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color)
ax.axhline(y=equil, ls='-', marker='.', color='black', ms=20, mec='black', mfc=color)
ax.axhline(y=equil + delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color)
ax.axhline(y=equil - delta, ls='--', marker='.', color='black', ms=10, mec='black', mfc=color)
def select_data_of_oFFlabel_from_td(QCAtree, oFFtree, datatree, query, entry=None, verbose=False):
oFF_param = select_param( query)
hits = []
angles = []
result = []
visited = set()
# iter_fn = node_iter_torsiondriverecord_minimum
if entry is None:
entries = QCAtree.iter_entry()
elif not hasattr( entry, "__iter__"):
entries = [entry]
else:
entries = entry
for entry in entries:
ID = entry.index
obj = QCAtree.db.get( entry.payload)
smiles = obj.get( "entry").attributes.get( "canonical_smiles")
status = obj.get( "data").get( "status")[:]
oFFobj = oFFtree.db.get( entry.payload)
labels = oFFobj.get( "data")
found = False
#for mol in QCAtree.node_iter_optimization_minimum( entry, select="Molecule"):
for mol in QCAtree.node_iter_torsiondriverecord_minimum( entry, select="Molecule"):
if mol.index in visited:
continue
visited.add(mol.index)
for pair, label in labels.get( oFF_param).items():
#print( pair, label)
if label != query:
continue
dobj = datatree.db.get( mol.payload)
angle = eval(list( QCAtree.node_iter_to_root( mol, select="Constraint"))[0].payload)[0]
molid = mol.index
assert mol.index in [ x.index for x in QCAtree.node_iter_to_root( mol)]
vals = dobj.get( pair)
result.append([ID, molid, query, pair, angle, vals, smiles])
pair="-".join([str(k) for k in pair])
if verbose:
print("Entry({:11s}) {:16s} {:16s} {} {:4s} {:4.0f} {} {:64s}".format( status, ID, molid, pair, query, angle, vals, smiles))
return result
def bond_disp( obj, thresh=1.0):
force_k = obj.get( "k")
force_k = force_k / force_k.unit
length = obj.get( "length")
length = length / length.unit
delta = (2*(thresh)/force_k)**.5
return length, delta
def angle_disp( obj, thresh=1.0):
force_k = obj.get( "k")
force_k = force_k / force_k.unit
length = obj.get( "angle")
length = length / length.unit
delta = (2*(thresh)/force_k)**.5
return length, delta
#def torsion_disp( obj, thresh=1.0):
# force_k = obj.get( "k")
# force_k = force_k / force_k.unit
# length = obj.get( "angle")
# length = length / length.unit
# delta = (2*(thresh)/force_k)**.5
# return length, delta
def plot_td_all_minima( ang, vals, label_obj, displacement_fn=None):
"""
plots torsiondrive data of all optimizations (does not connect points)
"""
rows = 1
fig = plt.figure(figsize=(8,4*rows),dpi=120)
ax_grid = []
for r in range(rows):
ax = [plt.subplot2grid((rows,3),(r,0), colspan=2, fig=fig)]
ax.append(plt.subplot2grid((rows,3),(r,2), fig=fig, sharey=ax[0]))
ax_grid.append(ax)
label = label_obj.get( "id")
ax_grid[0][0].plot(ang, vals, lw=0, ls='-', marker='.' , label=label, ms=2)
ax_grid[0][0].legend(loc='upper right')
ax_grid[0][1].hist(vals,bins=100, histtype='step', orientation='horizontal')
plot_displacement( ax_grid[0][0], vals, label_obj, displacement_fn)
plot_displacement( ax_grid[0][1], vals, label_obj, displacement_fn)
return fig
def plot_td_minima( ang, vals, atoms, label_obj, molid=None, displacement_fn=None):
"""
plots the lowest energy torsiondrive, using the optimizations which gave the lowest energy
atoms should be [( molid, *group)]
molid will only plot those which match e.g. ['2', '3', '4']
"""
import matplotlib.pyplot as plt
import numpy as np
rows = 1
fig = plt.figure(figsize=(8,4*rows),dpi=120)
ax_grid = []
for r in range(rows):
ax = [plt.subplot2grid((rows,3),(r,0), colspan=2, fig=fig)]
ax.append(plt.subplot2grid((rows,3),(r,2), fig=fig, sharey=ax[0]))
ax_grid.append(ax)
label = label_obj.get( "id")
labels = set()
#print(ang[0])
color='blue'
if molid is not None:
if not hasattr( molid, "__iter__"):
molid = [molid]
def gen_subset( ang, vals, mask):
ang_i = ang[ mask ]
vals_i = vals[ mask ]
srt = np.argsort(ang_i)
ang_i = ang_i[srt]
vals_i = vals_i[srt]
return ang_i, vals_i
for group in atoms:
if (molid is not None) and (group[0] not in molid):
continue
mask = [x == group for x in atoms]
ang_i, vals_i = gen_subset( ang, vals, mask)
thislabel = label
if thislabel in labels:
thislabel=""
labels.add(label)
ax_grid[0][0].plot(ang_i, vals_i, lw=0.1, ls='-', marker='.' , color=color, label=thislabel, ms=2, alpha=.3)
ax_grid[0][0].legend(loc='upper right')
ax_grid[0][1].hist(vals,bins=100, histtype='step', orientation='horizontal')
plot_displacement( ax_grid[0][0], vals, label_obj, displacement_fn)
plot_displacement( ax_grid[0][1], vals, label_obj, displacement_fn)
return fig
def example_bonds():
with open( 'oFF-1.1.0.p', 'rb') as fid:
oFF = pickle.load(fid)
with open( 'bonds.p', 'rb') as fid:
bonds = pickle.load(fid)
with open( 'QCA.p', 'rb') as fid:
QCA = pickle.load(fid)
if QCA.db is None:
with open( 'QCA.db.p', 'rb') as fid:
QCA.db = pickle.load(fid).db
labels = list(oFF.db.get( "ROOT").get( "data").get( "Bonds").keys())
def collect_and_plot( QCA, oFF, measurement, query, entry=None, verbose=False):
ret = select_data_of_oFFlabel_from_td( QCA, oFF, bonds, query, entry=entry, verbose=False)
if len(ret) == 0:
return
val = []
[val.extend(x[5]) for x in ret]
val = np.array(val)
ang = np.array( [x[4] for x in ret])
label_obj = oFF.db[ "ROOT"][ "data"][ "Bonds"][ q]
atoms = [(x[0], *x[3]) for x in ret]
fig = plot_td_minima( ang , val * const.bohr2angstrom, atoms, label_obj, molid=None, displacement_fn=bond_disp)
if entry is None:
figname = q + "." + "all" + ".min.png"
else:
figname = q + "." + entry.payload + ".min.png"
fig.savefig( figname)
plt.close('all')
if entry is not None:
print( "{:4s} {:3d} {:s}".format( q, len(val), QCA.db[ entry.payload][ 'entry'].name ))
else:
print( "{:4s} {:3d} {:s}".format( q, len(val), QCA.name))
for q in labels:
collect_and_plot( QCA, oFF, bonds, q, entry=None, verbose=False)
for entry in QCA.iter_entry():
collect_and_plot( QCA, oFF, bonds, q, entry=entry, verbose=False)
if __name__ == "__main__":
example_bonds()
```
#### File: offsb/qcarchive/quick.py
```python
import pickle
import sys
import os
import numpy as np
from . import qcatree as qca
from ..op import geometry
from ..op import openforcefield
from ..search import smiles
import treedi.node as Node
def load():
import qcfractal.interface as ptl
NAME = "QCA"
# this builds the index, starting with the client node
NAMEP = NAME + ".p"
if os.path.exists( NAMEP ):
with open( NAMEP, 'rb') as fid:
QCA = pickle.load( fid)
if QCA.db is None:
with open( NAME + ".db.p", 'rb') as fid:
QCA.db = pickle.load( fid).db
else:
client = ptl.FractalClient()
client_node = Node.Node( payload=client, name="client")
QCA = qca.QCATree( NAME, root_payload=client, node_index=None, db=None )
ds = client.get_collection("torsiondrivedataset", "openff group1 torsions")
QCA.build_index( ds, drop=["Optimization"])
#ds = client.get_collection("optimizationdataset", "openff optimization set 1")
#QCA.build_index( ds, drop=["Hessian"])
QCA.to_pickle( db=True)
#QCA.set_root( client_node)
if 1:
#print( QCA.db.keys())
#client = QCA.db.get( QCA.node_index.get( QCA.root_index).payload).get( "data")
dsopt = client.get_collection("optimizationdataset", "openff optimization set 1")
QCA.build_index( dsopt, drop=["Gradient"])
#roche_opt = client.get_collection("optimizationdataset", "openff optimization set 1")
#roche_opt_node = node.Node(payload=roche_opt, name=roche_opt.data.name, index=roche_opt.data.id)
#QCA.add(client_node.index, roche_opt_node)
#QCA.expand_qca_dataset_as_tree(QCA.root.children[-1], skel=True)
if 0:
#QCA.cache_torsiondriverecord_minimum_molecules()
QCA.cache_optimization_minimum_molecules()
if 1:
QCA.to_pickle( db=False)
QCA.to_pickle( name=QCA.name + ".db.p", index=False, db=True)
return QCA
def process( QCA=None):
if QCA is None:
with open("QCA.p", 'rb') as fid:
QCA = pickle.load(fid)
if QCA.db is None:
with open("QCA.db.p", 'rb') as fid:
QCA.db = pickle.load(fid)
#QCA.cache_optimization_minimum_molecules( QCA.root)
def save( tree):
name = os.path.join(".", tree.name + ".p")
print("Saving: ", tree.ID, "as", name, end=" ... ")
tree.to_pickle( db=True, name=name)
print( "{:12.1f} MB".format(os.path.getsize( name)/1024**2))
#with open( name, 'wb') as fid:
# fid.write( obj)
ds_nodes = [ QCA.node_index.get( index) for index in QCA.node_index.get( QCA.root_index).children]
print( "ds_nodes", ds_nodes)
entries = list(QCA.iter_entry( ds_nodes))
def run_bonds( QCA):
pairs = smiles.SmilesSearchTree( "*~*", QCA, name="pairs")
bonds = geometry.BondOperation( pairs, name="bonds")
bonds.apply( targets=entries)
save( bonds)
def run_angles( QCA):
triples = smiles.SmilesSearchTree( "*~*~*", QCA, name="triples")
angles = geometry.AngleOperation( triples, name="angles")
angles.apply( targets=entries)
save( angles)
def run_torsions( QCA):
linquads = smiles.SmilesSearchTree("*~*~*~*", QCA, name="linquads")
torsions = geometry.TorsionOperation(linquads, name="torsions")
torsions.apply( targets=entries)
save( torsions)
def run_outofplane( QCA):
pyramidquads = smiles.SmilesSearchTree("*~*(~*)~*", QCA, name="pyramidquads")
outofplane = geometry.ImproperTorsionOperation(pyramidquads, name="outofplane")
outofplane.apply( targets=entries)
save( outofplane)
def run_oFF09( QCA):
oFF09 = openforcefield.OpenForceFieldTree('smirnoff99Frosst-1.0.9.offxml', QCA, 'oFF-1.0.9')
oFF09.apply( targets=entries)
save( oFF09)
def run_oFF10( QCA):
oFF10 = openforcefield.OpenForceFieldTree('smirnoff99Frosst-1.1.0.offxml', QCA, 'oFF-1.1.0')
oFF10.apply( targets=entries)
save( oFF10)
def run_oMM09( QCA):
from ..op import openmm
oMM10 = openmm.OpenMMEnergy('smirnoff99Frosst-1.0.9.offxml', QCA, 'oMM.oFF-1.0.9')
oMM10.apply( targets=entries)
save( oMM10)
def run_oMM10( QCA):
from ..op import openmm
oMM10 = openmm.OpenMMEnergy('smirnoff99Frosst-1.1.0.offxml', QCA, 'oMM.oFF-1.1.0')
oMM10.apply( targets=entries)
save( oMM10)
#run_bonds( QCA)
#run_angles( QCA)
#run_torsions( QCA)
#run_outofplane( QCA)
#run_oFF09( QCA)
#run_oFF10( QCA)
run_oMM09( QCA)
run_oMM10( QCA)
```
#### File: openff-spellbook/treedi/node.py
```python
from abc import ABCMeta, abstractmethod
from enum import Flag
NEW = -1
ON = 1
OFF = 0
CLEAN = 3
DIRTY = 2
# ON and CLEAN is 1 + 2 = 3
# ON and not CLEAN is 1 + 0 = 1
# OFF and CLEAN is 0 + 2 = 2
# OFF and not CLEAN is 0 + 0 = 0
STATE = { DIRTY: "Dirty", CLEAN: "Clean", NEW: "New", ON: "On", OFF: "Off"}
def stamp( obj):
from datetime.datetime import now
return now()
class Node():
index = 0
def __init__( self, parent=None, index=None, name="", tree=None, payload=None, stamp=None, state=(not 2**ON)):
self.parent = parent
if parent is not None:
parent.add( self)
self.children = []
if index is None:
self.index = 'N-' + str( Node.index)
Node.index += 1
else:
self.index = str( index)
self.name = name
self.stamp = stamp
self.payload = payload
self.state = state # -1 is new, 0 is clean, 1 is dirty
self.tree = tree
IDcomponents = [self.index, self.name]
if tree:
IDcomponents.insert( 0, self.tree.ID)
self.ID=".".join( IDcomponents)
def skel( self):
return Node( index=self.index, name=self.name, state=self.state)
def set_state( self, state):
self.state = state
def __repr__( self):
return "<Node Name={} Tree={} IDX={} Payload={}>".format(
self.name, self.tree, self.index,
"No" if self.payload is None else str( self.payload))
return "<Node Name={} Tree={} IDX={} State={} Payload={}>".format(
self.name, self.tree, self.index, STATE.get( self.state),
"No" if self.payload is None else str( self.payload))
def add( self, v):
assert isinstance(v, Node)
self.children.append( v.index)
v.parent = self.index
``` |
{
"source": "jmaberk/DDB",
"score": 2
} |
#### File: DDB/prada_bayes_opt/acquisition_functions.py
```python
from __future__ import division
import numpy as np
from scipy.stats import norm
from scipy import stats
from sklearn.metrics.pairwise import euclidean_distances
from scipy.spatial.distance import cdist
from acquisition_maximization import acq_max
counter = 0
###############################################################################
'''
The max_bound_size variable below contols the size of the maximum allowed
bound. This is set at [0,max_bound_size] in each dimention.
###IMPORTANT###
This variable must be consistant in all of the following files:
1) acquisition_functions.py
2) bayesian_optimization_function.py
3) function.py
4) real_experiment_functon.py
'''
max_bound_size=10
###############################################################################
class AcquisitionFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, acq, bb_function=False):
self.acq=acq
acq_name=acq['name']
if bb_function:
self.bb_function=bb_function
if 'WW' not in acq:
self.WW=False
else:
self.WW=acq['WW']
if 'WW_dim' not in acq:
self.WW_dim=False
else:
self.WW_dim=acq['WW_dim']
ListAcq=['ei']
# check valid acquisition function
IsTrue=[val for idx,val in enumerate(ListAcq) if val in acq_name]
#if not in acq_name:
if IsTrue == []:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(acq_name)
raise NotImplementedError(err)
else:
self.acq_name = acq_name
self.dim=acq['dim']
if 'scalebounds' not in acq:
self.scalebounds=[0,1]*self.dim
else:
self.scalebounds=acq['scalebounds']
self.initialized_flag=0
self.objects=[]
def acq_kind(self, x, gp, y_max):
#print self.kind
if np.any(np.isnan(x)):
return 0
if self.acq_name == 'ei':
return self._ei(x, gp, y_max)
if self.acq_name == 'ei_regularizerH':
bound=np.array(self.scalebounds).reshape(self.dim,-1)
length=bound[:,1]-bound[:,0]
x_bar=bound[:,0]+length/2
return self._ei_regularizerH(x, gp, y_max,x_bar=x_bar,R=0.5)
@staticmethod
def _ei(x, gp, y_max):
"""
Calculates the EI acquisition function values
Inputs: gp: The Gaussian process, also contains all data
y_max: The maxima of the found y values
x:The point at which to evaluate the acquisition function
Output: acq_value: The value of the aquisition function at point x
"""
y_max=np.asscalar(y_max)
mean, var = gp.predict(x, eval_MSE=True)
var2 = np.maximum(var, 1e-8 + 0 * var)
z = (mean - y_max)/np.sqrt(var2)
out=(mean - y_max) * norm.cdf(z) + np.sqrt(var2) * norm.pdf(z)
out[var2<1e-8]=0
return out
def _ei_regularizerH(self,x, gp, y_max,x_bar,R=0.5):
"""
Calculates the EI acquisition function values with a hinge regulariser
Inputs: gp: The Gaussian process, also contains all data
y_max: The maxima of the found y values
x:The point at which to evaluate the acquisition function
x_bar: Centroid for the regulariser
R: Radius for the regulariser
Output: acq_value: The value of the aquisition function at point x
"""
mean, var = gp.predict(x, eval_MSE=True)
extended_bound=np.array(self.scalebounds).copy()
extended_bound=extended_bound.reshape(self.dim,-1)
extended_bound[:,0]=extended_bound[:,0]-2
extended_bound[:,1]=extended_bound[:,1]+2
for d in range(0,self.dim):
extended_bound[d,0]=max(extended_bound[d,0],0)
extended_bound[d,1]=min(extended_bound[d,1],max_bound_size)
#compute regularizer xi
dist= np.linalg.norm(x - x_bar)
if dist>R:
xi=dist/R-1
else:
xi=0
if gp.nGP==0:
var = np.maximum(var, 1e-9 + 0 * var)
z = (mean - y_max-y_max*xi)/np.sqrt(var)
out=(mean - y_max-y_max*xi) * norm.cdf(z) + np.sqrt(var) * norm.pdf(z)
return out
else:
z=[None]*gp.nGP
out=[None]*gp.nGP
# Avoid points with zero variance
for idx in range(gp.nGP):
var[idx] = np.maximum(var[idx], 1e-9 + 0 * var[idx])
z[idx] = (mean[idx] - y_max-y_max*xi)/np.sqrt(var[idx])
out[idx]=(mean[idx] - y_max-y_max*xi) * norm.cdf(z[idx]) + np.sqrt(var[idx]) * norm.pdf(z[idx])
if len(x)==1000:
return out
else:
return np.mean(out)# get mean over acquisition functions
return np.prod(out,axis=0) # get product over acquisition functions
class ThompsonSampling(object):
"""
Class used for calulating Thompson samples. Re-usable calculations are
done in __init__ to reduce compuational cost.
"""
#Calculates the thompson sample paramers
def __init__(self,gp,seed=False):
var_mag=1
ls_mag=1
if seed!=False:
np.random.seed(seed)
dim=gp.X.shape[1]
# used for Thompson Sampling
self.WW_dim=200 # dimension of random feature
self.WW=np.random.multivariate_normal([0]*self.WW_dim,np.eye(self.WW_dim),dim)/(gp.lengthscale*ls_mag)
self.bias=np.random.uniform(0,2*3.14,self.WW_dim)
# computing Phi(X)^T=[phi(x_1)....phi(x_n)]
Phi_X=np.sqrt(2.0/self.WW_dim)*np.hstack([np.sin(np.dot(gp.X,self.WW)+self.bias), np.cos(np.dot(gp.X,self.WW)+self.bias)]) # [N x M]
# computing A^-1
A=np.dot(Phi_X.T,Phi_X)+np.eye(2*self.WW_dim)*gp.noise_delta*var_mag
gx=np.dot(Phi_X.T,gp.Y)
self.mean_theta_TS=np.linalg.solve(A,gx)
#Calculates the thompson sample value at the point x
def __call__(self,x,gp):
phi_x=np.sqrt(2.0/self.WW_dim)*np.hstack([np.sin(np.dot(x,self.WW)+self.bias), np.cos(np.dot(x,self.WW)+self.bias)])
# compute the TS value
gx=np.dot(phi_x,self.mean_theta_TS)
return gx
def unique_rows(a):
"""
A functions to trim repeated rows that may appear when optimizing.
This is necessary to avoid the sklearn GP object from breaking
:param a: array to trim repeated rows from
:return: mask of unique rows
"""
# Sort array and kep track of where things should go back to
order = np.lexsort(a.T)
reorder = np.argsort(order)
a = a[order]
diff = np.diff(a, axis=0)
ui = np.ones(len(a), 'bool')
ui[1:] = (diff != 0).any(axis=1)
return ui[reorder]
class BColours(object):
BLUE = '\033[94m'
CYAN = '\033[36m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
RED = '\033[31m'
ENDC = '\033[0m'
``` |
{
"source": "jmaberk/RGPUCB",
"score": 2
} |
#### File: RGP-UCB/prada_bayes_opt/acquisition_maximization.py
```python
from __future__ import division
import itertools
import numpy as np
from scipy.optimize import minimize
from scipy.optimize import fmin_bfgs
from scipy.optimize import fmin_l_bfgs_b
from sklearn.metrics.pairwise import euclidean_distances
from prada_bayes_opt.acquisition_functions import AcquisitionFunction, unique_rows
from scipy.optimize import fmin_cobyla
import random
import time
__author__ = 'Vu'
def acq_max(ac, gp, y_max, bounds, opt_toolbox='scipy',seeds=[]):
"""
A function to find the maximum of the acquisition function using
the scipy python
Input Parameters
----------
ac: The acquisition function object that return its point-wise value.
gp: A gaussian process fitted to the relevant data.
y_max: The current maximum known value of the target function.
bounds: The variables bounds to limit the search of the acq max.
Returns
-------
x_max, The arg max of the acquisition function.
"""
if opt_toolbox=='scipy':
x_max = acq_max_scipy(ac=ac,gp=gp,y_max=y_max,bounds=bounds)
return x_max
def acq_max_scipy(ac, gp, y_max, bounds):
"""
A function to find the maximum of the acquisition function using
the scipy python
Input Parameters
----------
ac: The acquisition function object that return its point-wise value.
gp: A gaussian process fitted to the relevant data.
y_max: The current maximum known value of the target function.
bounds: The variables bounds to limit the search of the acq max.
Returns
-------
x_max, The arg max of the acquisition function.
"""
dim=bounds.shape[0]
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
max_acq = None
myopts ={'maxiter':5*dim,'maxfun':10*dim}
# multi start
for i in range(2*dim):
# Find the minimum of minus the acquisition function
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],size=(20*dim, dim))
# evaluate
start_eval=time.time()
y_tries=ac(x_tries,gp=gp, y_max=y_max)
end_eval=time.time()
#print "elapse evaluate={:.5f}".format(end_eval-start_eval)
#find x optimal for init
idx_max=np.argmax(y_tries)
#print "max y_tries {:.5f} y_max={:.3f}".format(np.max(y_tries),y_max)
x_init_max=x_tries[idx_max]
start_opt=time.time()
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),x_init_max.reshape(1, -1),bounds=bounds,
method="L-BFGS-B",options=myopts)#L-BFGS-B
if 'x' not in res:
val=ac(res,gp,y_max)
else:
val=ac(res.x,gp,y_max)
end_opt=time.time()
#print "elapse optimize={:.5f}".format(end_opt-start_opt)
# Store it if better than previous minimum(maximum).
if max_acq is None or val >= max_acq:
if 'x' not in res:
x_max = res
else:
x_max = res.x
max_acq = val
#print max_acq
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
#return np.clip(x_max[0], bounds[:, 0], bounds[:, 1])
#print max_acq
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
# COBYLA -> x_max[0]
# L-BFGS-B -> x_max
def acq_max_thompson(gp, y_max, bounds):
"""
A function to find the maximum of the acquisition function using
the scipy python
Input Parameters
----------
ac: The acquisition function object that return its point-wise value.
gp: A gaussian process fitted to the relevant data.
y_max: The current maximum known value of the target function.
bounds: The variables bounds to limit the search of the acq max.
Returns
-------
x_max, The arg max of the acquisition function.
"""
dim=bounds.shape[0]
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
max_acq = None
myopts ={'maxiter':5*dim,'maxfun':10*dim}
# multi start
for i in range(2*dim):
# Find the minimum of minus the acquisition function
TS=AcquisitionFunction.ThompsonSampling(gp,seed=False)
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],size=(20*dim, dim))
# evaluate
start_eval=time.time()
y_tries=TS(x_tries,gp=gp)
end_eval=time.time()
#print "elapse evaluate={:.5f}".format(end_eval-start_eval)
#find x optimal for init
idx_max=np.argmax(y_tries)
#print "max y_tries {:.5f} y_max={:.3f}".format(np.max(y_tries),y_max)
x_init_max=x_tries[idx_max]
start_opt=time.time()
res = minimize(lambda x: -TS(x.reshape(1, -1), gp=gp),x_init_max.reshape(1, -1),bounds=bounds,
method="L-BFGS-B",options=myopts)#L-BFGS-B
if 'x' not in res:
val=TS(res,gp)
else:
val=TS(res.x,gp)
end_opt=time.time()
#print "elapse optimize={:.5f}".format(end_opt-start_opt)
# Store it if better than previous minimum(maximum).
if max_acq is None or val >= max_acq:
if 'x' not in res:
x_max = res
else:
x_max = res.x
max_acq = val
#print max_acq
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
#return np.clip(x_max[0], bounds[:, 0], bounds[:, 1])
#print max_acq
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
# COBYLA -> x_max[0]
# L-BFGS-B -> x_max
def acq_max_global(ac, gp, bounds):
"""
A function to find the maximum of the acquisition function using
the scipy python
Input Parameters
----------
ac: The acquisition function object that return its point-wise value.
gp: A gaussian process fitted to the relevant data.
y_max: The current maximum known value of the target function.
bounds: The variables bounds to limit the search of the acq max.
Returns
-------
x_max, The arg max of the acquisition function.
"""
dim=bounds.shape[0]
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
max_acq = None
#x_tries = np.array([ np.linspace(i,j,500) for i,j in zip( bounds[:, 0], bounds[:, 1])])
#x_tries=x_tries.T
myopts ={'maxiter':5*dim,'maxfun':10*dim}
# multi start
for i in xrange(1*dim):
# Find the minimum of minus the acquisition function
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],size=(60*dim, dim))
# evaluate
y_tries=ac(x_tries,gp)
#print "elapse evaluate={:.5f}".format(end_eval-start_eval)
#find x optimal for init
idx_max=np.argmax(y_tries)
#print "max y_tries {:.5f} y_max={:.3f}".format(np.max(y_tries),y_max)
x_init_max=x_tries[idx_max]
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp),x_init_max.reshape(1, -1),bounds=bounds,
method="L-BFGS-B",options=myopts)#L-BFGS-B
#res = fmin_bfgs(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),x_init_max.reshape(1, -1),disp=False)#L-BFGS-B
# value at the estimated point
#val=ac(res.x,gp,y_max)
if 'x' not in res:
val=ac(res,gp)
else:
val=ac(res.x,gp)
#print "elapse optimize={:.5f}".format(end_opt-start_opt)
# Store it if better than previous minimum(maximum).
if max_acq is None or val >= max_acq:
if 'x' not in res:
x_max = res
else:
x_max = res.x
max_acq = val
#print max_acq
if res.fun==0:
y_max=ac(x_max,gp)
else:
y_max=res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
#return np.clip(x_max[0], bounds[:, 0], bounds[:, 1])
#print max_acq
return np.clip(x_max, bounds[:, 0], bounds[:, 1]), y_max
# COBYLA -> x_max[0]
# L-BFGS-B -> x_max
``` |
{
"source": "jmabry/pyaf",
"score": 2
} |
#### File: Bench/web_traffic/Bench.py
```python
import numpy as np
import pandas as pd
import pickle
import datetime
import sys
def get_bench_logger():
import logging;
logger = logging.getLogger('pyaf.bench');
if(logger.handlers == []):
import logging.config
logging.basicConfig(level=logging.INFO)
return logger;
logger = get_bench_logger()
class cProjectData:
def __init__(self):
self.mName = None
self.mAgents = set()
self.mAccess = set()
self.mArticleNames = set()
self.mArticleInfo = {}
self.mVisitsDF = pd.DataFrame()
def set_date(self, date_var):
self.mVisitsDF['Date'] = date_var
def add_article(self, article_id, full_name, name, project, access, agent, article_series):
if(np.random.random_sample() > 10.01):
return
self.mAccess.add(access)
self.mAgents.add(agent)
self.mArticleNames.add(name)
self.mArticleInfo[article_id] = (article_id, full_name, name, project, access, agent)
self.mVisitsDF[article_id] = article_series
def dump(self):
logger.info("PROJECT_DUMP_START " + str(self.mName))
logger.info("PROJECT_DUMP_AGENTS " +str(self.mAgents))
logger.info("PROJECT_DUMP_ACCESS " + str(self.mAccess))
logger.info("PROJECT_DUMP_NUMBER_OF_ARTICLES " + str(len(self.mArticleInfo)))
lIds = list(self.mArticleInfo.keys())
lArticles = lIds[0:5] + lIds[-5:]
logger.info("PROJECT_DUMP_ARTICLE_NAMES " + str([( k , self.mArticleInfo[k][2]) for k in lArticles]))
logger.info("PROJECT_DUMP_ARTICLE_PROJECTS" + str([( k , self.mArticleInfo[k][3]) for k in lArticles]))
df = self.mVisitsDF[['Date'] + lArticles]
print(df.info())
print(df.describe())
print(df.head())
print(df.tail())
logger.info("PROJECT_DUMP_END " + self.mName)
sys.stdout.flush()
class cDataExtractor:
def __init__(self):
self.mProjectDataByName = None
def read_projects_data(self, filename):
self.mOriginalData = pd.read_csv(filename)
self.mOriginalDataTransposed = self.mOriginalData.drop('Page' , axis=1).transpose()
self.mOriginalDataTransposed = self.mOriginalDataTransposed.reset_index()
self.mOriginalDataTransposed.sort_values(by='index', inplace=True)
self.mDate_var = self.mOriginalDataTransposed['index'].apply(lambda x : datetime.datetime.strptime(x, "%Y-%m-%d"))
self.parse_project_data()
for (name, project) in self.mProjectDataByName.items():
project.dump()
pass
def parse_project_data(self):
pages_dict = self.mOriginalData['Page'].to_dict()
self.mProjectDataByName = {}
for (k,v) in pages_dict.items():
series = self.mOriginalDataTransposed[k]
words = v.split('_')
(name, project, access, agent) = ("_".join(words[:-3]) , words[-3] , words[-2] , words[-1])
lProjectData = self.mProjectDataByName.get(project)
if(lProjectData is None):
lProjectData = cProjectData()
lProjectData.set_date(self.mDate_var)
lProjectData.mName = project
self.mProjectDataByName[project] = lProjectData
lProjectData.add_article(k, v, name, project, access, agent, series)
def save_project_data(self , dest_dir):
for (k,v) in self.mProjectDataByName.items():
output = open(dest_dir + "/" + k + '.pkl', 'wb')
pickle.dump(v, output)
output.close()
```
#### File: pyaf/TS/Perf.py
```python
import pandas as pd
import numpy as np
from . import Utils as tsutil
class cPerf:
def __init__(self):
self.mErrorStdDev = None;
self.mErrorMean = None;
self.mMAE = None;
self.mMAPE = None;
self.mSMAPE = None;
self.mMASE = None;
self.mL1 = None;
self.mL2 = None;
self.mR2 = None;
self.mPearsonR = None;
self.mCount = None;
self.mName = "No_Name";
self.mDebug = False;
def check_not_nan(self, sig , name):
#print("check_not_nan");
if(np.isnan(sig).any()):
logger = tsutil.get_pyaf_logger();
logger.error("PERF_WITH_NAN_IN_SIGNAL" + str(sig));
raise tsutil.Internal_PyAF_Error("INVALID_COLUMN _FOR_PERF ['" + self.mName + "'] '" + name + "'");
pass
def compute_MAPE_SMAPE_MASE(self, signal , estimator):
self.mMAPE = None;
self.mSMAPE = None;
self.mMASE = None;
if(signal.shape[0] > 0):
lEps = 1.0e-10;
abs_error = np.abs(estimator.values - signal.values);
sum_abs = np.abs(signal.values) + np.abs(estimator.values) + lEps
abs_rel_error = abs_error / (np.abs(signal) + lEps)
signal_diff = signal - signal.shift(1)
self.mMAPE = np.mean(abs_rel_error)
self.mSMAPE = np.mean(2.0 * abs_error / sum_abs)
if(signal_diff.shape[0] > 1):
mean_dev_signal = np.mean(abs(signal_diff.values[1:])) + lEps;
self.mMASE = np.mean(abs_error / mean_dev_signal)
self.mMASE = round( self.mMASE , 4 )
self.mMAPE = round( self.mMAPE , 4 )
self.mSMAPE = round( self.mSMAPE , 4 )
def compute_R2(self, signal , estimator):
SST = np.sum((signal.values - np.mean(signal.values))**2) + 1.0e-10;
SSRes = np.sum((signal.values - estimator.values)**2)
R2 = 1 - SSRes/SST
return R2
def dump_perf_data(self, signal , estimator):
logger = tsutil.get_pyaf_logger();
df = pd.DataFrame();
df['sig'] = signal.values;
df['est'] = estimator.values;
logger.debug(str(df.head()));
logger.debug(str(df.tail()));
def compute(self, signal , estimator, name):
try:
# self.dump_perf_data(signal, estimator);
return self.real_compute(signal, estimator, name);
except:
self.dump_perf_data(signal, estimator);
logger = tsutil.get_pyaf_logger();
logger.error("Failure when computing perf ['" + self.mName + "'] '" + name + "'");
raise tsutil.Internal_PyAF_Error("Failure when computing perf ['" + self.mName + "'] '" + name + "'");
pass
def compute_pearson_r(self, signal , estimator):
from scipy.stats import pearsonr
# print("PEARSONR_DETAIL1" , signal_std, estimator_std)
# print("PEARSONR_DETAIL2" , signal)
# print("PEARSONR_DETAIL3" , estimator)
signal_std = np.std(signal);
estimator_std = np.std(estimator);
r = 0.0;
if((signal_std > 0.0) and (estimator_std > 0.0) and (signal.shape[0] > 30)):
(r , pval) = pearsonr(signal.values , estimator.values)
return r;
def real_compute(self, signal , estimator, name):
self.mName = name;
if(self.mDebug):
self.check_not_nan(signal.values , "signal")
self.check_not_nan(estimator.values , "estimator")
signal_std = np.std(signal);
estimator_std = np.std(estimator);
self.compute_MAPE_SMAPE_MASE(signal, estimator);
myerror = (estimator.values - signal.values);
abs_error = abs(myerror)
self.mErrorMean = np.mean(myerror)
self.mErrorStdDev = np.std(myerror)
self.mMAE = np.mean(abs_error)
self.mL1 = np.mean(abs_error)
self.mL2 = np.sqrt(np.mean(abs_error ** 2))
self.mCount = signal.shape[0];
self.mR2 = self.compute_R2(signal, estimator)
self.mPearsonR = self.compute_pearson_r(signal , estimator);
def computeCriterion(self, signal , estimator, criterion, name):
self.mName = name;
self.mCount = signal.shape[0];
if(criterion == "L1"):
myerror = (estimator.values - signal.values);
abs_error = abs(myerror)
self.mL1 = np.mean(abs_error)
return self.mL1;
if(criterion == "L2"):
myerror = (estimator.values - signal.values);
self.mL2 = np.sqrt(np.mean(myerror ** 2))
return self.mL2;
if(criterion == "R2"):
self.mR2 = self.compute_R2(signal, estimator)
return self.mR2;
if(criterion == "PEARSONR"):
self.mPearsonR = self.compute_pearson_r(signal , estimator)
return self.mPearsonR;
if(criterion == "MAE"):
myerror = (estimator.values - signal.values);
abs_error = abs(myerror)
self.mMAE = np.mean(abs_error)
return self.mAE;
if(criterion == "MAPE"):
self.compute_MAPE_SMAPE_MASE(signal , estimator);
return self.mMAPE;
if(criterion == "SMAPE"):
self.compute_MAPE_SMAPE_MASE(signal , estimator);
return self.mSMAPE;
if(criterion == "MASE"):
self.compute_MAPE_SMAPE_MASE(signal , estimator);
return self.mMASE;
if(criterion == "COUNT"):
return self.mCount;
assert(0)
return 0.0;
def getCriterionValue(self, criterion):
if(criterion == "L1"):
return self.mL1;
if(criterion == "L2"):
return self.mL2;
if(criterion == "R2"):
return self.mR2;
if(criterion == "PEARSONR"):
return self.mPearsonR;
if(criterion == "MAE"):
return self.mAE;
if(criterion == "SMAPE"):
return self.mSMAPE;
if(criterion == "MAPE"):
return self.mMAPE;
if(criterion == "MASE"):
return self.mMASE;
if(criterion == "COUNT"):
return self.mCount;
assert(0)
return 0.0;
#def date_to_number(x):
# return date(int(x) , int(12 * (x - int(x) + 0.01)) + 1 , 1)
```
#### File: pyaf/TS/Plots.py
```python
import pandas as pd
import numpy as np
from io import BytesIO
import base64
SIGNAL_COLOR='green'
FORECAST_COLOR='blue'
RESIDUE_COLOR='red'
COMPONENT_COLOR='navy'
SHADED_COLOR='turquoise'
UPPER_COLOR='grey'
LOWER_COLOR='black'
def decomp_plot(df, time, signal, estimator, residue, name = None, format='png', max_length = 1000) :
assert(df.shape[0] > 0)
assert(df.shape[1] > 0)
assert(time in df.columns)
assert(signal in df.columns)
assert(estimator in df.columns)
assert(residue in df.columns)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
df1 = df.tail(max_length);
fig, axs = plt.subplots(ncols=2, figsize=(32, 16))
lColor = COMPONENT_COLOR;
if(name is not None and name.endswith("Forecast")):
lColor = FORECAST_COLOR;
df1.plot.line(time, [signal, estimator, residue],
color=[SIGNAL_COLOR, lColor, RESIDUE_COLOR],
ax=axs[0] , grid = True)
residues = df1[residue].values
import scipy.stats as scistats
resid = residues[~np.isnan(residues)]
scistats.probplot(resid, dist="norm", plot=axs[1])
if(name is not None):
fig.savefig(name + '_decomp_output.' + format)
plt.close(fig)
def decomp_plot_as_png_base64(df, time, signal, estimator, residue, name = None, max_length = 1000) :
assert(df.shape[0] > 0)
assert(df.shape[1] > 0)
assert(time in df.columns)
assert(signal in df.columns)
assert(estimator in df.columns)
assert(residue in df.columns)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
df1 = df.tail(max_length);
fig, axs = plt.subplots(ncols=2, figsize=(16, 8))
lColor = COMPONENT_COLOR;
if(name is not None and name.endswith("Forecast")):
lColor = FORECAST_COLOR;
df1.plot.line(time, [signal, estimator, residue],
color=[SIGNAL_COLOR, lColor, RESIDUE_COLOR],
ax=axs[0] , grid = True)
residues = df1[residue].values
import scipy.stats as scistats
resid = residues[~np.isnan(residues)]
scistats.probplot(resid, dist="norm", plot=axs[1])
figfile = BytesIO()
fig.savefig(figfile, format='png')
figfile.seek(0) # rewind to beginning of file
figdata_png = base64.b64encode(figfile.getvalue())
plt.close(fig)
return figdata_png.decode('utf8')
def prediction_interval_plot(df, time, signal, estimator, lower, upper, name = None, format='png', max_length = 1000) :
assert(df.shape[0] > 0)
assert(df.shape[1] > 0)
assert(time in df.columns)
assert(signal in df.columns)
assert(estimator in df.columns)
assert(lower in df.columns)
assert(upper in df.columns)
df1 = df.tail(max_length).copy();
lMin = np.mean(df1[signal]) - np.std(df1[signal]) * 3;
lMax = np.mean(df1[signal]) + np.std(df1[signal]) * 3;
df1[lower] = df1[lower].apply(lambda x : x if (np.isnan(x) or x >= lMin) else np.nan);
df1[upper] = df1[upper].apply(lambda x : x if (np.isnan(x) or x <= lMax) else np.nan);
# last value of the signal
lLastSignalPos = df1[signal].dropna().tail(1).index[0];
lEstimtorValue = df1[estimator][lLastSignalPos];
df1.loc[lLastSignalPos , lower] = lEstimtorValue;
df1.loc[lLastSignalPos , upper] = lEstimtorValue;
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, axs = plt.subplots(ncols=1, figsize=(16, 8))
df1.plot.line(time, [signal, estimator, lower, upper],
color=[SIGNAL_COLOR, FORECAST_COLOR, LOWER_COLOR, UPPER_COLOR],
ax=axs, grid = True)
x = df1[time];
type1 = np.dtype(x)
if(type1.kind == 'M'):
x = x.apply(lambda t : t.date());
axs.fill_between(x.values, df1[lower], df1[upper], color=SHADED_COLOR, alpha=.2)
if(name is not None):
fig.savefig(name + '_prediction_intervals_output.' + format)
plt.close(fig)
def prediction_interval_plot_as_png_base64(df, time, signal, estimator, lower, upper, name = None, max_length = 1000) :
assert(df.shape[0] > 0)
assert(df.shape[1] > 0)
assert(time in df.columns)
assert(signal in df.columns)
assert(estimator in df.columns)
assert(lower in df.columns)
assert(upper in df.columns)
df1 = df.tail(max_length).copy();
lMin = np.mean(df1[signal]) - np.std(df1[signal]) * 3;
lMax = np.mean(df1[signal]) + np.std(df1[signal]) * 3;
df1[lower] = df1[lower].apply(lambda x : x if (np.isnan(x) or x >= lMin) else np.nan);
df1[upper] = df1[upper].apply(lambda x : x if (np.isnan(x) or x <= lMax) else np.nan);
# last value of the signal
lLastSignalPos = df1[signal].dropna().tail(1).index;
lEstimtorValue = df1[estimator][lLastSignalPos];
df1.loc[lLastSignalPos , lower] = lEstimtorValue;
df1.loc[lLastSignalPos , upper] = lEstimtorValue;
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, axs = plt.subplots(ncols=1, figsize=(16, 8))
df1.plot.line(time, [signal, estimator, lower, upper],
color=[SIGNAL_COLOR, FORECAST_COLOR, FORECAST_COLOR, FORECAST_COLOR],
ax=axs, grid = True)
x = df1[time];
type1 = np.dtype(x)
if(type1.kind == 'M'):
x = x.apply(lambda t : t.date());
axs.fill_between(x.values, df1[lower], df1[upper], color=SHADED_COLOR, alpha=.5)
figfile = BytesIO()
fig.savefig(figfile, format='png')
plt.close(fig)
figfile.seek(0) # rewind to beginning of file
figdata_png = base64.b64encode(figfile.getvalue())
return figdata_png.decode('utf8')
def qqplot_residues(df , residue):
pass
def build_record_label(labels_list):
out = "<f0>" + str(labels_list[0]);
i = 1;
for l in labels_list[1:]:
out = out + " | <f" + str(i) + "> " + str(l) ;
i = i + 1;
return out + "";
def plot_hierarchy(structure , iAnnotations, name):
import pydot
graph = pydot.Dot(graph_type='graph', rankdir='LR', fontsize="12.0");
graph.set_node_defaults(shape='record')
lLevelsReversed = sorted(structure.keys(), reverse=True);
for level in lLevelsReversed:
color = '#%02x%02x%02x' % (255, 255, 127 + int(128 * (1.0 - (level + 1.0) / len(lLevelsReversed))));
for col in structure[level].keys():
lLabel = col if iAnnotations is None else str(iAnnotations[col]);
if iAnnotations is not None:
lLabel = build_record_label(iAnnotations[col]);
node_col = pydot.Node(col, label=lLabel, style="filled", fillcolor=color, fontsize="12.0")
graph.add_node(node_col);
for col1 in structure[level][col]:
lLabel1 = col1
if iAnnotations is not None:
lLabel1 = build_record_label(iAnnotations[col1]);
color1 = '#%02x%02x%02x' % (255, 255, 128 + int(128 * (1.0 - (level + 2.0) / len(lLevelsReversed))));
node_col1 = pydot.Node(col1, label=lLabel1, style="filled",
fillcolor=color1, fontsize="12.0")
graph.add_node(node_col1);
lEdgeLabel = "";
if iAnnotations is not None:
lEdgeLabel = iAnnotations[col + "_" + col1];
lEdge = pydot.Edge(node_col, node_col1, color="red", label=lEdgeLabel, fontsize="12.0")
graph.add_edge(lEdge)
# print(graph.obj_dict)
if(name is not None):
graph.write_png(name);
else:
from IPython.display import Image, display
plot1 = Image(graph.create_png())
display(plot1)
```
#### File: pyaf/TS/PredictionIntervals.py
```python
import pandas as pd
import numpy as np
from . import SignalDecomposition as sigdec
from . import Perf as tsperf
from . import Utils as tsutil
class cPredictionIntervalsEstimator:
def __init__(self):
self.mModel = None;
self.mSignalFrame = pd.DataFrame()
self.mHorizon = -1;
self.mFitPerformances = {}
self.mForecastPerformances = {}
self.mTestPerformances = {}
def computePerformances(self):
self.mTime = self.mModel.mTime;
self.mSignal = self.mModel.mOriginalSignal;
self.mHorizon = self.mModel.mTimeInfo.mHorizon;
lTimeColumn = self.mTime;
lSignalColumn = self.mSignal;
lForecastColumn = str(self.mSignal) + "_Forecast";
df = self.mModel.mTrend.mSignalFrame.reset_index();
N = df.shape[0];
(lOriginalFit, lOriginalForecast, lOriginalTest) = self.mModel.mTimeInfo.cutFrame(df);
df1 = df;
for h in range(0 , self.mHorizon):
df2 = None;
df2 = self.mModel.forecastOneStepAhead(df1, perf_mode = True);
df2 = df2.head(N);
lHorizonName = lForecastColumn + "_" + str(h + 1);
(lFrameFit, lFrameForecast, lFrameTest) = self.mModel.mTimeInfo.cutFrame(df2);
self.mFitPerformances[lHorizonName] = tsperf.cPerf();
self.mFitPerformances[lHorizonName].compute(lOriginalFit[lSignalColumn], lFrameFit[lForecastColumn], lHorizonName);
self.mForecastPerformances[lHorizonName] = tsperf.cPerf();
self.mForecastPerformances[lHorizonName].compute(lOriginalForecast[lSignalColumn], lFrameForecast[lForecastColumn], lHorizonName);
self.mTestPerformances[lHorizonName] = tsperf.cPerf();
self.mTestPerformances[lHorizonName].compute(lOriginalTest[lSignalColumn], lFrameTest[lForecastColumn], lHorizonName);
df1 = df2[[lTimeColumn , lForecastColumn]];
df1.columns = [lTimeColumn , lSignalColumn]
# self.dump_detailed();
def dump_detailed(self):
logger = tsutil.get_pyaf_logger();
lForecastColumn = str(self.mSignal) + "_Forecast";
for h in range(0 , self.mHorizon):
lHorizonName = lForecastColumn + "_" + str(h + 1);
hn = lHorizonName;
logger.info("CONFIDENCE_INTERVAL_DUMP_FIT " +str(hn) + " " + str(self.mFitPerformances[hn].mL2) + " " + str(self.mFitPerformances[hn].mMAPE));
logger.info("CONFIDENCE_INTERVAL_DUMP_FORECAST " +str(hn) + " " + str(self.mForecastPerformances[hn].mL2) + " " + str(self.mForecastPerformances[hn].mMAPE));
logger.info("CONFIDENCE_INTERVAL_DUMP_TEST " +str(hn) + " " + str(self.mTestPerformances[hn].mL2) + " " + str(self.mTestPerformances[hn].mMAPE));
def dump(self):
logger = tsutil.get_pyaf_logger();
lForecastColumn = str(self.mSignal) + "_Forecast";
for h in range(0 , self.mHorizon):
lHorizonName = lForecastColumn + "_" + str(h + 1);
hn = lHorizonName;
logger.info("CONFIDENCE_INTERVAL_DUMP_FORECAST " + str(hn) + " " + str(self.mForecastPerformances[hn].mL2));
```
#### File: bugs/issue_29/test_mem_1.py
```python
from __future__ import absolute_import
import pandas as pd
import numpy as np
def display_used_mem():
print("DISPLAY_USED_MEM_START");
import gc
gc.collect()
import objgraph
objgraph.show_most_common_types(limit=20)
print("DISPLAY_USED_MEM_END");
# import '.' as pyaf_new_name
# pyaf=pyaf_new_name
# from pyaf
display_used_mem();
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
display_used_mem();
b1 = tsds.load_ozone()
df = b1.mPastData
display_used_mem();
df.info();
display_used_mem();
```
#### File: tests/bugs/test_artificial_bug_2.py
```python
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import warnings
def process_dataset(idataset):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("error")
N = idataset.mFullDataset.shape[0];
lSignalVar = idataset.mSignalVar;
H = 2;
idataset.mPastData = idataset.mFullDataset[:-H];
idataset.mFutureData = idataset.mFullDataset.tail(H);
training_ds = idataset.mPastData
# #df.to_csv("outputs/rand_exogenous.csv")
# N = df.shape[0];
# df1 = df;
lEngine = autof.cForecastEngine()
# lEngine.mOptions.mEnableSeasonals = False;
# lEngine.mOptions.mDebugCycles = True;
# lEngine.mOptions.enable_slow_mode();
# mDebugProfile = True;
# lEngine
lExogenousData = (idataset.mExogenousDataFrame , idataset.mExogenousVariables)
lEngine.train(training_ds , idataset.mTimeVar , lSignalVar, H, lExogenousData);
lEngine.getModelInfo();
# lEngine.standardPlots(name = "outputs/my_exog_" + str(nbex) + "_" + str(n));
# lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = training_ds.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[idataset.mTimeVar , lSignalVar, lSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
with warnings.catch_warnings():
warnings.simplefilter("error")
dataset = tsds.generate_random_TS(N = 40 , FREQ = 'D', seed = 0, trendtype = "linear", cycle_length = 4, transform = "exp", sigma = 2.0, exog_count = 0);
process_dataset(dataset);
```
#### File: tests/exog/gen_all.py
```python
import os
def createDirIfNeeded(dirname):
try:
os.mkdir(dirname);
except:
pass
createDirIfNeeded("tests/exog/random");
N = 600
for n in [600, 300, 150, 75, 32, 16, 8]:
for nbex in [10 , 20 , 40 , 80, 160, 320, 640, 1280 , 2000]:
filename= "tests/exog/random/random_exog_" + str(n) + "_" + str(nbex) + ".py";
file = open(filename, "w");
print("WRTITING_FILE" , filename);
file.write("import pyaf.tests.exog.test_random_exogenous as testrandexog\n");
file.write("\n\ntestrandexog.test_random_exogenous( " + str(n) + "," + str(nbex) + ");");
file.close();
```
#### File: tests/heroku/build_generic_heroku_model.py
```python
def build_model(iDict):
import pyaf.WS.WS_Backend as be
lModel = be.cWSModel();
lModel.from_dict(iDict);
lEngine = lModel.mForecastEngine
df = lModel.mTrainDataFrame.copy();
H = lModel.mHorizon
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
# dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[lModel.mTimeVar , lModel.mSignalVar, lModel.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
return lModel;
```
#### File: tests/hierarchical/test_hier_prototyping_2.py
```python
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import datetime
#get_ipython().magic('matplotlib inline')
def read_dataset():
trainfile = "data/Hierarchical/hts_dataset.csv"
lDateColumn = 'Date'
df = pd.read_csv(trainfile, sep=r',', engine='python', skiprows=0);
df[lDateColumn] = df[lDateColumn].apply(lambda x : datetime.datetime.strptime(x, "%Y-%m-%d"))
print(df.tail(10))
# df[:-10].tail()
# df[:-10:-1]
print(df.info())
print(df.describe())
return df;
def define_hierarchy_info():
rows_list = [];
# Sydney NSW Melbourne VIC BrisbaneGC QLD Capitals Other
rows_list.append(['Sydney' , 'NSW_State' , 'Australia']);
rows_list.append(['NSW' , 'NSW_State' , 'Australia']);
# rows_list.append(['Melbourne' , 'VIC_State' , 'Australia']);
# rows_list.append(['VIC' , 'VIC_State' , 'Australia']);
# rows_list.append(['BrisbaneGC' , 'QLD_State' , 'Australia']);
# rows_list.append(['QLD' , 'QLD_State' , 'Australia']);
rows_list.append(['Capitals' , 'Other_State' , 'Australia']);
rows_list.append(['Other' , 'Other_State' , 'Australia']);
lLevels = ['City' , 'State' , 'Country'];
lHierarcyInfo = {};
lHierarcyInfo['Levels'] = lLevels;
lHierarcyInfo['Data'] = pd.DataFrame(rows_list, columns = lLevels);
print(lHierarcyInfo['Data'].head(lHierarcyInfo['Data'].shape[0]));
return lHierarcyInfo;
def enrich_dataset(df , hier):
df1 = df.copy();
lLevelCount = len(hier['Levels']);
lContent = {};
df = hier['Data'];
for level in range(lLevelCount):
lContent[level] = {};
for row in range(df.shape[0]):
for level in range(lLevelCount):
col = df[df.columns[level]][row];
if(col not in lContent[level].keys()):
lContent[level][col] = set();
if(level > 0):
col1 = df[df.columns[level - 1]][row];
lContent[level][col].add(col1);
print(lContent);
for level in range(lLevelCount):
if(level > 0):
for col in lContent[level].keys():
new_col = None;
for col1 in lContent[level][col]:
if(new_col is None):
new_col = df1[col1];
else:
new_col = new_col + df1[col1];
df1[col] = new_col;
return df1;
df = read_dataset();
hier = define_hierarchy_info();
df1 = enrich_dataset(df, hier);
print(df1.head())
lDateColumn = 'Date'
lAllLevelColumns = [col for col in df1.columns if col != lDateColumn]
print("ALL_LEVEL_COLUMNS" , lAllLevelColumns);
H = 4;
for signal in lAllLevelColumns:
lEngine = autof.cForecastEngine()
lEngine
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.set_active_autoregressions([]);
lEngine.train(df1 , lDateColumn , signal, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
#lEngine.standardPlots("outputs/hierarchical_" + signal);
dfapp_in = df1.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[lDateColumn , signal, signal + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
```
#### File: tests/pickling/test_ozone_pickle.py
```python
from __future__ import absolute_import
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
def pickleModel(iModel):
import pickle
output = pickle.dumps(iModel)
lReloadedObject = pickle.loads(output)
output2 = pickle.dumps(lReloadedObject)
assert(iModel.to_json() == lReloadedObject.to_json())
return lReloadedObject;
b1 = tsds.load_ozone()
df = b1.mPastData
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots("outputs/my_ozone");
lEngine2 = pickleModel(lEngine)
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine2.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine2.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
``` |
{
"source": "JMacalinao/solax",
"score": 2
} |
#### File: solax/tests/test_smoke.py
```python
import pytest
import solax
from solax import inverter
from tests import fixtures
@pytest.mark.asyncio
async def test_smoke(inverters_fixture):
conn, inverter_class, values = inverters_fixture
inv = inverter_class(*conn)
rt_api = solax.RealTimeAPI(inv)
parsed = await rt_api.get_data()
msg = 'data size should match expected values'
assert len(values) == len(parsed.data), msg
for sensor, value in values.items():
assert parsed.data[sensor] == value, \
f"{sensor}: expected {value} but got {parsed.data[sensor]}"
@pytest.mark.asyncio
async def test_throws_when_unable_to_parse(inverters_garbage_fixture):
conn, inverter_class = inverters_garbage_fixture
with pytest.raises(inverter.InverterError):
i = inverter_class(*conn)
await i.get_data()
def test_registry_matches_inverters_under_test():
test_inverters = {i.inverter for i in fixtures.INVERTERS_UNDER_TEST}
registry_inverters = set(inverter.REGISTRY)
assert test_inverters == registry_inverters, 'tests do not match registry'
def test_inverter_sensors_match():
test_values = (
(i.inverter, i.values)
for i
in fixtures.INVERTERS_UNDER_TEST
)
for i, expected_values in test_values:
sensor_map = i.sensor_map()
msg = f"""{sorted(sensor_map.keys())} vs
{sorted(expected_values.keys())}"""
assert len(sensor_map) == len(expected_values), msg
for name, _ in sensor_map.items():
assert name in expected_values
```
#### File: solax/tests/test_solax.py
```python
from unittest.mock import Mock
import asyncio
import pytest
import solax
@pytest.mark.asyncio
async def test_waits_when_asked(monkeypatch):
sleep_f = asyncio.Future()
sleep_f.set_result(None)
mock_sleep = Mock(return_value=sleep_f)
monkeypatch.setattr(asyncio, 'sleep', mock_sleep)
inv = Mock()
get_data_f = asyncio.Future()
get_data_f.set_result({})
inv.get_data = Mock(return_value=get_data_f)
wait_time = 2
await solax.rt_request(inv, 10, wait_time)
mock_sleep.assert_called_once_with(wait_time)
inv.get_data.assert_called_once()
@pytest.mark.asyncio
async def test_tries_again_on_timeout(monkeypatch):
sleep_f = asyncio.Future()
sleep_f.set_result(None)
mock_sleep = Mock(return_value=sleep_f)
monkeypatch.setattr(asyncio, 'sleep', mock_sleep)
inv = Mock()
get_data_f = asyncio.Future()
get_data_f.set_exception(asyncio.TimeoutError)
inv.get_data = Mock(return_value=get_data_f)
wait_time = 2
with pytest.raises(asyncio.TimeoutError):
await solax.rt_request(inv, 2, wait_time)
assert mock_sleep.call_count == 2
assert inv.get_data.call_count == 2
``` |
{
"source": "jmacarthur/2dgames",
"score": 3
} |
#### File: 2dgames/platformatic/main.py
```python
import pygame
from pygame.locals import *
import levels
from spritenames import spriteNames
import copy
screenwidth = 1280
def init1():
global screen, clock, background, black, titleScreen
pygame.init()
#pygame.font.init()
screen = pygame.display.set_mode((screenwidth,480))
pygame.display.set_caption('Platformatic')
clock = pygame.time.Clock()
# Background is only used during the game over screen
background = pygame.surface.Surface((screenwidth,256))
# Black is used to fade the background for the game over screen
black = background.copy()
black.fill((16,16,16))
titleScreen = pygame.image.load("titlescreen.png")
delay = 0
toCollect = 0
NONE = 0
RIGHT = 1
LEFT = 2
plan = NONE
BS = 32 # Block size (in pixels)
speed = (BS/4)
GSY= 50 # Grid size (in blocks)
GSX= 50
zoom = False
started = False
startx = 3
starty = 6
def makegrid(x,y,default=0):
v = [0]*y
for i in range(0,y):
v[i] = [default]*x
return v
def initGrids():
global blocks, behaviour, frame, maxFrame
blocks = makegrid(GSX,GSY)
behaviour = makegrid(GSX,GSY)
frame = makegrid(GSX,GSY)
maxFrame = makegrid(GSX,GSY)
def spriteNumber(name):
for (k,v) in spriteNames.iteritems():
if(v==name):
return k
return 9999
class Golem(object): # This is the standard enemy
def __init__(self,x,y):
self.x = x
self.y = y
self.w = BS
self.h = BS*2
self.left = False
self.frame = 0
def animate(self):
global blocks
self.x += -1 if self.left else 1
if(behaviour[self.y/BS][self.x/BS]==spriteNumber("reflector")):
self.left = not self.left
self.frame = (self.frame+1)%2
def overlaps(self,x,y,w,h):
tolerance = -3
if(x > self.x + self.w + tolerance): return False
if(x + w + tolerance < self.x): return False
if(y > self.y + self.h + tolerance): return False
if(y + h + tolerance < self.y): return False
return True
class GolemVert(Golem): # Vertical-moving version
def animate(self):
global blocks
self.y += -1 if self.left else 1
if(behaviour[self.y/BS][self.x/BS]==spriteNumber("reflector")):
self.left = not self.left
self.frame = (self.frame+1)%2
levelphysicals = [ None, levels.level1Physical, levels.level2Physical, levels.level3Physical, levels.level4Physical]
levelbehaviours = [ None, levels.level1Behaviour, levels.level2Behaviour, levels.level3Behaviour, levels.level4Behaviour ]
def loadLevel(levelno): # Loads a level from a text file into blocks etc
global blocks, startx, starty, behaviour, toCollect
blocks = copy.deepcopy(levelphysicals[levelno])
toCollect = 0
# Now process for special blocks
for y in range(0,GSY):
for x in range(0,GSX):
if(blocks[y][x] == spriteNumber("player")):
blocks[y][x] = 0
startx = x
starty = y
print "Found starting position in level: %d,%d"%(startx,starty)
elif(blocks[y][x] == spriteNumber("updownmonster")):
g = GolemVert(x*BS,y*BS)
actives.append(g)
blocks[y][x] = 0
elif(blocks[y][x] == spriteNumber("leftrightmonster")):
g = Golem(x*BS,y*BS)
actives.append(g)
blocks[y][x] = 0
elif(blocks[y][x] == spriteNumber("key")):
toCollect += 1
behaviour = levelbehaviours[levelno]
def touchingBlocks(x,y): # List of blocks a player at x,y touches
sgx = x/BS
egx = (x+BS-1)/BS
sgy = y/BS
egy = (y+BS*2-1)/BS
b = []
for gx in range(sgx,egx+1):
for gy in range(sgy,egy+1):
b.append((gx,gy))
return b
def canEnter(x,y): # True if the player can exist at (x,y) without being inside scenery
global blocks
if(x<0 or y<0): return False
for (gx,gy) in touchingBlocks(x,y):
if(flags[blocks[gy][gx]] & SOLID):
return False
return True
def isDeadly(x,y):
global blocks
if(x<0 or y<0): return False
for (gx,gy) in touchingBlocks(x,y):
if(flags[blocks[gy][gx]] & DEADLY):
return True
for a in actives:
if(a.overlaps(x,y,BS,BS*2)):
return True
return False
def checkCollect(x,y):
global blocks, collections
if(x<0 or y<0): return False
for (gx,gy) in touchingBlocks(x,y):
if(blocks[gy][gx] == spriteNumber("key")): # TODO
blocks[gy][gx] = 0
collections += 1
def nextLevel():
global level, directions, state
level += 1
if(level>4): level=1
perLifeInit()
directions = makegrid(GSX,GSY)
state = WINPAGE
def checkExit(x,y):
global blocks, collections
if(x<0 or y<0): return False
for (gx,gy) in touchingBlocks(x,y):
if(blocks[gy][gx] == spriteNumber("exit")):
if(collections >= toCollect):
nextLevel()
def canDrop(x,y): # Like canEnter, but only checks if we can drop into a space (many platform blocks can be entered but not dropped through)
global blocks
if(x<0 or y<0): return False
if(y%BS != 0): return True
sgx = x/BS
egx = (x+BS-1)/BS
egy = (y+1+BS*2-1)/BS
gy = egy
for gx in range(sgx,egx+1):
if(flags[blocks[gy][gx]] & SUPPORTING):
return False
return True
def loadTransparent(filename):
i = pygame.image.load("tiles/"+filename)
i.set_colorkey((0,0,0))
return i
def loadSpriteSet(name):
try:
s = 0
spriteArray = []
while True:
spriteArray.append(loadTransparent("../sprites/%s%d.png"%(name,s)))
s+=1
except pygame.error:
print "[loadSpriteSet] Not found: ../sprites/%s%d.png"%(name,s)
if(len(spriteArray)>0):
print "End of sprite set"
return spriteArray
print "Warning: No sprite set found for %s"%(name)
return None
def loadTileSet(name):
print "Loading sprite set for %s"%name
try:
s = 0
spriteArray = []
while True:
spriteArray.append(loadTransparent("%s%d.png"%(name,s)))
s+=1
except pygame.error:
print "Not found: %s.png"%name
if(len(spriteArray)>0):
print "Loaded set %s, with %d frames"%(name, len(spriteArray))
return spriteArray
print "Warning: No sprite set found for %s"%(name)
return None
def loadSprite(filename):
i = pygame.image.load("sprites/"+filename+".png")
i.set_colorkey((0,0,0))
return i
def drawBlock(blockNo, x, y):
global zoom
bs = BS
if(zoom): bs = BS/2
offsetx = 10*BS
if(spritesByNumber[blockNo] is not None):
spriteData = spritesByNumber[blockNo]
if(type(spriteData) is list):
sprite = spriteData[frame[y][x]];
screen.blit(sprite, (x*bs-px+10*BS,y*bs-py+6*BS))
if blockNo == spriteNumber("conveyor"):
frame[y][x] = (frame[y][x]+1) % (getMaxFrame(spriteNumber("conveyor"))+1)
if blockNo == spriteNumber("conveyRight"):
frame[y][x] = (frame[y][x]-1) % (spriteNumber("conveyor")+1)
else:
sprite = spriteData
if(zoom):
sprite = pygame.transform.scale(sprite,(16,16))
screen.blit(sprite, (x*bs-px+10*bs,y*bs-py+6*bs))
else:
#print "Caution: No sprite for blockNo %d"%blockNo
pass
def standingEffects():
# Degenerate platforms etc
global px,py
opy = py+(BS*2)
sgx = px/BS
egx = (px+BS-1)/BS
gy = opy/BS
for x in range(sgx,egx+1):
if blocks[gy][x] == spriteNumber("breakingledge"):
frame[gy][x] += 1
if frame[gy][x] > getMaxFrame(spriteNumber("breakingledge")):
frame[gy][x] = 0
blocks[gy][x] = 0
elif blocks[gy][x] == spriteNumber("conveyor"):
if canEnter(px+1,py):
px += 1
elif blocks[gy][x] == spriteNumber("conveyorr"):
if canEnter(px-1,py):
px -= 1
# Per-life initialization things
def perLifeInit():
global grounded, vel, walkframe, left, dx, px, py, collections, actives, frame, level, directions,plan, started,delay
actives = []
frame = makegrid(GSX,GSY)
loadLevel(level)
grounded = False
vel = 0
walkframe = 0
left = False
dx = 0
px = startx*BS
py = starty*BS
print "Level %d: starting position is (%d,%d)\n"%(level,startx,starty)
collections = 0
plan = NONE
started = False
delay = 0
# Per-game initialization
def perGameInit():
global lives, level, flash, directions
flash = 0
level = 1
lives = 100
directions = makegrid(GSX,GSY)
def drawBlocks():
global zoom
bs = BS
if(zoom): bs = BS/2
for y in range(py/bs-6,py/bs+20):
for x in range(px/bs-10,px/bs+31):
if(x<0 or x>=GSX or y<0 or y>=GSY):
pygame.draw.rect(screen, (0,0,255), (x*bs-px+10*BS,y*bs-py+6*BS,bs,bs))
else:
drawBlock(blocks[y][x],x,y)
if(directions[y][x]>0):
d = directions[y][x]
if(commandSprites[d] is None):
pygame.draw.rect(screen, (255*(d & 1),255*((d >> 1) & 1),255*((d>>2)&1)), (x*bs-px+10*bs,y*bs-py+6*bs,bs,bs))
else:
if(zoom):
blockSprite = pygame.transform.scale(commandSprites[d],(16,16))
else:
blockSprite = commandSprites[d]
screen.blit(blockSprite, (x*bs-px+10*BS,y*bs-py+6*BS))
def drawActives():
for a in actives:
if(state==RUNNING): a.animate()
sprite = monsterSprite[a.frame]
screen.blit(pygame.transform.flip(sprite,a.left,False), (a.x-px+10*BS,a.y-py+6*BS,BS,2*BS))
#Draw player
playerSprite = playerSprites[walkframe%4]
if(state==RUNNING):
screen.blit(pygame.transform.flip(playerSprite,left,False),(10*BS,6*BS))
else:
screen.blit(pygame.transform.flip(playerSprite,left,False), (startx*BS-px+10*BS,starty*BS-py+6*BS,BS,2*BS))
def processUserInput():
global px, py, dx, left, walkframe, vel, grounded, plan, started, delay, zoom
key = pygame.key.get_pressed()
zoom = key[K_z]
if(delay>0):
delay -=1
return
if key[K_RETURN]:
plan = RIGHT
started = True
# Left/right movement
if grounded:
if plan==RIGHT and canEnter(px+4,py):
px+=speed
walkframe += 1
dx = 1
left = False
elif plan==LEFT and canEnter(px-4,py):
px-=speed
walkframe += 1
dx = -1
left = True
else:
walkframe = 0
dx = 0
else:
if canEnter(px+dx*speed,py):
px += dx*speed
if(grounded):
order = directions[(py+64)/BS][(px+16)/BS]
else:
order = 0
if(order == 1):
plan = LEFT
elif(order == 2):
plan = RIGHT
elif(order == 4): # Hourglass
delay = 8
if (order == 3) and canEnter(px,py-1) and grounded:
vel = -16
grounded = False
def placeTile(tileno, pos):
(x,y) = pos
print "Original click at %d,%d\n"%(x,y)
x += px-10*BS
y += py-6*BS
x /= BS
y /= BS
print "Placing tile %d at position %d,%d"%(tileno,x,y)
directions[y][x] = tileno
def processGravity():
global vel, py, grounded
dead = False
if(vel<0):
for i in range(0,vel,-1):
# Check whether we hit our head on something
if(canEnter(px,py-1)):
py -=1
else:
vel = 0
vel += 2
elif(vel>=0):
for i in range(0,max(vel,1)):
if(canDrop(px,py)):
py += 1
grounded = False
else:
# Landed
if(vel>24): dead = True
standingEffects()
vel = 0
grounded = True
break
vel += 1
return dead
def displayPlayScreen():
global flash, state
if(flash):
screen.fill((255,0,0))
flash -= 1
else:
screen.fill((0,0,0))
drawBlocks()
drawActives()
# Drawing sidebar
screen.fill((0,0,127),(screenwidth-80,0,80,512))
startcommand = 0
if(state == RUNNING):
startcommand = 5
for i in range(startcommand,7):
if(commandSprites[i] is None):
pygame.draw.rect(screen, (255,0,255), (screenwidth-80+8,8+40*i,32,32))
else:
screen.blit(commandSprites[i], (screenwidth-80+8,8+40*i))
# Highlight current action
if(state == PLACING):
pygame.draw.rect(screen, (255,255,255), (screenwidth-80+4,4+40*action,40,40),1)
# Draw remaining lives
for i in range(0,lives):
screen.blit(playerSprites[0],(i*BS,20*BS))
def displayGameOverScreen():
background.blit(black, (0,0), None, BLEND_SUB)
screen.blit(background,(0,0))
#text = font.render("GAME OVER", 1, (255,0,0))
textpos = text.get_rect(centerx=screenwidth/2,y=64)
screen.blit(text, textpos)
def displayWinScreen():
global cycles
background.blit(black, (0,0), None, BLEND_SUB)
screen.blit(background,(0,0))
#text = font.render("LEVEL COMPLETED", 1, (255,0,0))
textpos = text.get_rect(centerx=screenwidth/2,y=64)
screen.blit(text, textpos)
#text = font.render("%d cycles"%cycles, 1, (255,0,0))
textpos = text.get_rect(centerx=screenwidth/2,y=96)
screen.blit(text, textpos)
def displayTitleScreen():
screen.blit(titleScreen, (0,0))
def getMaxFrame(sym):
global maxFrame
return maxFrame[sym]
def checkDead(deadFlag):
global flash, lives, state, background
if(deadFlag or isDeadly(px,py)):
lives -= 1
if(lives < 0):
state = GAMEOVER
perGameInit()
background.blit(screen, (0,0))
perLifeInit()
flash = 2
def initCommandSprites():
global commandSprites
i = 0
for c in commands:
if(c is not None):
commandSprites[ i ] = loadTransparent(c+".png")
i += 1
SOLID = 1 ; DEADLY = 2 ; SUPPORTING = 4
def init2():
global font, blockSpriteMap, commands, commandSprites, spritesByNumber, flags
global maxFrame
#font = pygame.font.Font(pygame.font.match_font("Arial"), 36)
blockSpriteMap = spriteNames
commands = [ "eraser", "goleft", "goright", "jump", "hourglass", "go", "stop" ]
commandSprites = [ None ] * 12
initCommandSprites()
spritesByNumber = [ None ] *20
flags = [ 0 ] * 20
flags [ 1 ] = 0
for i in range(2,10):
flags [ i ] = SUPPORTING | SOLID
flags[spriteNumber("exitsign")] = 0
flags[spriteNumber("exit")] = 0
flags[spriteNumber("breakingledge")] = SUPPORTING
flags[spriteNumber("conveyor")] = SUPPORTING
flags[spriteNumber("shittytree")] = DEADLY
for k,v in blockSpriteMap.iteritems():
try:
spritesByNumber[k] = loadTransparent("%s.png"%v)
print "Loading sprite %d: %s\n"%(k,v)
except pygame.error:
print "Not found: %s.png"%v
spriteSet = loadTileSet(v)
spritesByNumber[k] = spriteSet
if(spriteSet is not None):
maxFrame[k] = len(spriteSet)-1
print "Finished init2"
def init3():
global playerSprites, monsterSprite
playerSprites = loadSpriteSet("player")
monsterSprite = loadSpriteSet("monster")
print "Finished init3"
# Constants for game state
TITLE=0
PLACING=1
RUNNING=2
PLAYING=2 # Deprecated
GAMEOVER=3
WINPAGE = 4
state = TITLE
flash = 0
gameOverTimeout = 32
action = 0
drag = False
(dragx,dragy) = (0,0)
def mainLoop():
global state, dead, action, px, py, drag, gameOverTimeout, cycles, level
init1()
initGrids()
init2()
print "Init 2 completed. state=%d"%state
init3()
print "All init completed. state=%d"%state
while True:
clock.tick(25)
if state==RUNNING or state==PLACING:
processUserInput()
displayPlayScreen()
pygame.display.flip()
if state==RUNNING:
cycles += 1
dead = processGravity()
checkCollect(px,py)
checkExit(px,py)
checkDead(dead)
elif state == GAMEOVER:
displayGameOverScreen()
gameOverTimeout -= 1
if(gameOverTimeout < 0):
gameOverTimeout = 32
state = TITLE
pygame.display.flip()
elif state == WINPAGE:
displayWinScreen()
gameOverTimeout -= 1
if(gameOverTimeout < 0):
gameOverTimeout = 32
state = PLACING
pygame.display.flip()
elif state == TITLE:
displayTitleScreen()
pygame.display.flip()
for event in pygame.event.get():
if event.type == QUIT:
exit(0)
elif event.type == KEYDOWN:
if event.key == K_ESCAPE or event.key == K_q:
exit(0)
elif event.key == K_p: # Cheat code, skips all collections
collections = 100
if state==TITLE and event.key == K_3:
state = PLACING
perGameInit()
level = 3
perLifeInit()
if state==TITLE and event.key == K_4:
state = PLACING
perGameInit()
level = 4
perLifeInit()
if state==PLAYING and event.key == K_s:
nextLevel()
elif event.type == MOUSEBUTTONDOWN:
print "Verify event = click"
if state==TITLE:
state = PLACING
perGameInit()
perLifeInit()
else:
(x,y) = event.pos
(dragx,dragy) = (x,y)
if(x>screenwidth-80):
icon = (y-8)/40
if(icon == 5):
print "Switching to state RUNNING"
px = startx*BS
py = starty*BS
cycles = 0
state = RUNNING
elif(icon == 6):
# Reset. How
print "Switching to state PLACING"
state = PLACING
perLifeInit()
elif(icon == 7):
# Reset. How
print "Switching to state PLACING"
state = TITLE
elif state==PLACING:
action = icon
elif event.type == MOUSEBUTTONUP:
(x,y) = event.pos
if(x<screenwidth-80 and not drag):
if(state == PLACING ):
placeTile(action, event.pos)
drag = False
elif event.type == MOUSEMOTION and state==PLACING:
(left,middle,right) = event.buttons
(x,y) = event.pos
if(left):
if(abs(x-dragx) > 32 or abs(y-dragy)>32 or drag):
drag = True
px -= (x-dragx)
py -= (y-dragy)
(dragx,dragy)=(x,y)
def main():
mainLoop()
# This isn't run on Android.
if __name__ == "__main__":
main()
``` |
{
"source": "jmacarthur/aoc2017",
"score": 3
} |
#### File: aoc2017/day10/solve10b.py
```python
import sys
import operator # For xor
def swap_list_elements(l, a, b):
wrap = len(l)
(l[a % wrap], l[b % wrap]) = (l[b % wrap], l[a % wrap])
def reverse_in_place(ring, l, pos):
for x in range(0,l/2):
y = l-x-1
swap_list_elements(ring, pos+x, pos+y)
def xor_16_byte(array):
output = []
for i in range(0, len(array), 16):
output.append(reduce(operator.xor, array[i:i+16]))
return output
def process_reversions(lengths):
ring = range(0,256)
pos = 0
skip = 0
for r in range(0,64):
for l in lengths:
reverse_in_place(ring, l, pos)
pos += l
pos += skip
skip += 1
pos = pos % len(ring)
hashval = xor_16_byte(ring)
return hashval
def knot_hash(inp):
reversions = map(ord, inp)
reversions.extend([17, 31, 73, 47, 23])
hashval = process_reversions(reversions)
return hashval
def main():
filename = sys.argv[1]
with open(filename, "rt") as f:
lengths = f.readlines()
for l in lengths:
print("Processing line '%s'"%l.strip())
hashval = knot_hash(l.strip())
hashascii = "".join(["%2.2x"%x for x in hashval])
print hashascii
print("----------------------------------------")
if __name__=="__main__":
main()
```
#### File: aoc2017/day14/solve14.py
```python
import sys
sys.path.insert(0, '../day10')
from solve10b import knot_hash
real_input = "jxqlasbh"
test_input = "flqrgnkx"
hash_base = real_input
used_spaces = 0
disc = []
def find_occupied_space(disc):
for y in range(0,128):
row = disc[y]
for x in range(0,128):
if row[x] == '#': return (x,y)
return None
def delete_region((x,y)):
disc[y][x] = '.'
neighbours = [ (x+1, y), (x-1, y), (x, y+1), (x, y-1) ]
for (nx,ny) in neighbours:
if nx >=0 and ny >= 0 and nx < 128 and ny < 128:
if disc[ny][nx] == '#': delete_region((nx, ny))
for row in range(0,128):
line_input = "%s-%d"%(hash_base, row)
hash_list = knot_hash(line_input)
line_int = 0
line = []
for i in range(0,16):
line.extend(['.' if (hash_list[i] & (1<<bit)) == 0 else '#' for bit in range(7,-1,-1)])
used_spaces += line.count('#')
print("%s %16.16s"%(line_input, "".join(line)))
disc.append(line)
regions = 0
while True:
pos = find_occupied_space(disc)
if pos == None: break
print("Deleting region at %d, %d"%pos)
regions += 1
delete_region(pos)
print ("%d used spaces"%used_spaces)
print ("%d regions"%regions)
```
#### File: aoc2017/day16/solve16.py
```python
import sys
import copy
stage_length = 16
stage = map(chr, range(ord('a'),ord('a')+stage_length))
def spin(amount):
"""To save time, this function isn't used except at the end.
Normally, a counter marks the start of the stage and this changes
instead. """
global stage
stage = stage[amount:] + stage[:amount]
def swap(pos1, pos2):
global stage
(stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1])
with open(sys.argv[1], 'rt') as f:
program = ",".join(f.readlines()).split(",")
n = 0
pos = 0
arguments_list = [x[1:].strip().split("/") for x in program]
action_list = [x[0] for x in program]
history = []
# Change this to 1 for the solution to part 1.
iterations = 1000000000
while n<iterations:
for s in range(0,len(program)):
arguments = arguments_list[s]
if action_list[s] == 's':
pos += stage_length-int(arguments[0])
elif action_list[s] == 'x':
swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length)
elif action_list[s] == 'p':
pos1 = stage.index(arguments[0])
pos2 = stage.index(arguments[1])
swap(pos1, pos2)
if stage in history:
print("Duplicate found: %r at index %d matches at stage %d"%(stage, history.index(stage), n))
loop_length = n - history.index(stage)
complete_cycles = (iterations - n) / loop_length
n += complete_cycles * loop_length
history.append(copy.copy(stage))
n += 1
spin(pos % stage_length)
print "".join(stage)
```
#### File: aoc2017/day3/solve3a.py
```python
import sys
target = int(sys.argv[1])
n = 2
x = 1
y = 0
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
radius = 1;
directions = ['up', 'left', 'down', 'right']
deltas = [(0,1), (-1,0), (0,-1), (1,0)]
def abs(x):
return -x if x<0 else x
def traverse(a, direction):
global n,x,y
print("Traverse %d %s"%(a, directions[direction]))
for i in range(0,a):
n += 1
x += deltas[direction][0]
y += deltas[direction][1]
print("%d at %d,%d =dist %d"%(n,x,y,abs(x)+abs(y)))
while n < target:
traverse(radius, UP)
traverse(2*radius, LEFT)
traverse(2*radius, DOWN)
traverse(2*radius+1, RIGHT)
traverse(radius, UP)
radius += 1
```
#### File: aoc2017/day9/solve9.py
```python
import sys
def parse_line(l):
print("Processing line %s"%l.strip())
input_data = list(l.strip())
mode = "Group" # Either "Group" or "Garbage"
group_level = 0 # The current group nesting level
group_levels = [] # Array showing the level of each group when we start it
garbage_count = 0 # Amount of garbage counted so far, not counting cancelled characters
pos = 0 # Position in string that's currently being processed - only required for debugging
while len(input_data)>0:
c = input_data.pop(0)
pos += 1
print("Processing '%c' in mode %s"%(c,mode_string[mode]))
if mode == "Group":
if c == '}':
group_level -= 1
if(group_level < 0):
print("} without matching { at pos %d" % pos);
sys.exit(1)
elif c == '{':
group_level += 1
group_levels.append(group_level)
elif c == '<':
mode = "Garbage"
elif mode == "Garbage":
if c == '!':
input_data.pop(0)
pos += 1
elif c == '>':
mode = "Group"
else:
garbage_count += 1
print("-------------------- Group levels: %r = %s; garbage count = %d"%(group_levels,sum(group_levels), garbage_count))
if __name__ == "__main__":
input_file = open(sys.argv[1], "rt")
for l in input_file.readlines():
parse_line(l)
``` |
{
"source": "jmacarthur/diagram-script",
"score": 3
} |
#### File: jmacarthur/diagram-script/diagram.py
```python
import header
import math
import pyclipper
import sys
from copy import deepcopy as objcopy
# Set some initial and default settings
style = {"stroke":"black", "fill":"green", "fill-opacity":"1", "stroke-opacity":"1"}
z = 0
clipper_scale = 1000
def convert_style(style_dict):
""" Converts a dictionary which contains style settings into a string suitable for use in SVG. """
style_text = ""
for d in style_dict:
style_text += "%s=\"%s\" "%(d,style_dict[d])
return style_text
def zorder():
""" Increments then returns the global variable z, for use in z-ordering shapes. """
global z
z += 1
return z
class Drawable(object):
def svg(self):
return "<!--- unimplemented shape --->"
def copy_style(self, other_shape):
self.style = other_shape.style
self.z = other_shape.z
def copy_attributes(self, other_shape):
self.ref_x = other_shape.ref_x
self.ref_y = other_shape.ref_y
self.copy_style(other_shape)
class Polygon(Drawable):
def __init__(self, pointsets):
self.pointsets = pointsets
self.ref_x = 0
self.ref_y = 0
self.z = zorder()
self.style = objcopy(style)
def svg(self):
path = "<path d=\""
for paths in self.pointsets:
c = paths[0]
path += "M %f %f "%(c[0]+self.ref_x,c[1]+self.ref_y)
for c in paths[1:]:
path += "L %f %f "%(c[0]+self.ref_x,c[1]+self.ref_y)
path += "z "
path += "\" fill-rule=\"evenodd\" "+convert_style(self.style)+ "/>"
return path
class Circle(Drawable):
def __init__(self, x, y, radius):
self.ref_x = x;
self.ref_y = y;
self.radius = radius;
self.z = zorder()
self.style = objcopy(style)
def svg(self):
circle = '<circle cx="%d" cy="%d" r="%d" %s />'%(self.ref_x, self.ref_y, self.radius, convert_style(self.style))
return circle
def to_polygon(self):
fn = 20
points = []
for step in range(0,fn):
angle = step*math.pi*2.0/(fn)
points.append([self.ref_x + self.radius*math.cos(angle), self.ref_y+self.radius*math.sin(angle)])
p = Polygon([points])
p.copy_style(self)
return p
class Rect(Polygon):
def __init__(self, x, y, width, height):
self.pointsets = [[[x,y],[x+width,y],[x+width,y+height],[x,y+height]]]
self.ref_x = x;
self.ref_y = y;
self.z = zorder()
self.style = objcopy(style)
Rectangle = Rect
def translation(obj, x, y):
""" Returns a copy of the original object translated by (x,y) """
d = objcopy(obj)
d.ref_x += x
d.ref_y += y
return d
def copy(obj, x, y):
""" Returns a copy of the original object translated by (x,y) """
return translation(obj, x,y)
def move(obj, x, y):
""" Moves an object by (x,y) without making a copy. """
obj.ref_x += x
obj.ref_y += y
def deep_tuple(l):
""" Convert lists of lists of lists (...) into tuples of tuples of tuples, and so on. """
if type(l) != list: return l
return tuple(deep_tuple(a) for a in l)
def scale_poly(poly, scale):
"""Converts polygons and lists of polygons by scaling their
coordinates. Works recursively on lists of polygons."""
if type(poly) != list: return poly*scale
return list(scale_poly(p, scale) for p in poly)
def binops(start, operation, *args):
""" Binary geometry - perform the operation on start and args[0], then
the result of that with args[1], etc. If any shape is not a
polygon, it will attempt to convert it to a polygon which
approximates it, using that object's to_polygon method.
"""
if isinstance(start,Drawable) and not isinstance(start,Polygon):
start = start.to_polygon()
a = deep_tuple(scale_poly(start.pointsets, clipper_scale))
for rhs in args:
if isinstance(rhs,Drawable) and not isinstance(rhs,Polygon):
rhs = rhs.to_polygon()
pc = pyclipper.Pyclipper()
b = deep_tuple(scale_poly(rhs.pointsets, clipper_scale))
pc.AddPaths(a, pyclipper.PT_SUBJECT, True)
pc.AddPaths(b, pyclipper.PT_CLIP, True)
a = pc.Execute(operation, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD)
new_p = Polygon(scale_poly(a, 1.0/clipper_scale))
new_p.copy_attributes(start)
return new_p
def add(start, *args):
return binops(start, pyclipper.CT_UNION, *args)
def subtract(start, *args):
return binops(start, pyclipper.CT_DIFFERENCE, *args)
def setstyle(**kwargs):
""" Set any number of style arguments in the global style variable.
Because 'fill-opacity' cannot be an argument name, underscores
will be converted to hyphens here. """
global style
for x in kwargs:
style[x.replace('_','-')] = kwargs[x]
def usage():
print("Usage: %s <script file>"%sys.argv[0])
def main():
if len(sys.argv) != 2:
usage()
return
input_filename = sys.argv[1]
with open(input_filename, "rt") as f:
code = "\n".join(f.readlines())
locals = dict()
globs = globals()
exec(code, globs, locals)
print header.svg_header
print "width=\"297\" height=\"210\" viewPort=\"0 0 297 210\" >"
for l in locals:
print "<!-- %s -->"%l
if l[0] != "_" and isinstance(locals[l], Drawable):
print locals[l].svg()
print "</svg>"
if __name__=="__main__": main()
``` |
{
"source": "jmacarthur/ld38b",
"score": 4
} |
#### File: jmacarthur/ld38b/amenity-finder.py
```python
import sys
import xml.etree.ElementTree as ET
"""Sorts through OSM XML and find amenities - nodes with particular
tags."""
def interesting(node):
for tag in node:
if tag.tag == "tag":
if tag.attrib['k'] == "tourism" and tag.attrib['v'] == "museum": return True
return False
def name(node):
for tag in node:
if tag.tag == "tag":
if tag.attrib['k'] == "name": return tag.attrib['v']
return None
def main():
input_filename = sys.argv[1]
tree = ET.parse(input_filename)
root = tree.getroot()
for child in root:
if(child.tag == "node"):
(lon,lat) = (child.attrib["lon"], child.attrib["lat"])
if interesting(child):
print("%s,%s,%s"%(name(child), lon,lat))
if __name__=="__main__":
main()
```
#### File: jmacarthur/ld38b/subdivide.py
```python
import sys
def abs(x):
return -x if x<0 else x
def main():
for gridX in range(-140,-130):
for gridY in range(3202,3212):
generate_map(gridX, gridY)
def generate_map(gridX, gridY):
print("Generating %d,%d"%(gridX, gridY))
minute = 1/60.0; # Grid size in degrees
input_filename = sys.argv[1]
ways = []
all_nodes = {}
inside_nodes = []
with open(input_filename, "rt") as f:
while True:
l = f.readline()
if l=="": break
if l[0] == 'w':
ways.append(map(int, l[1:].split(",")))
elif l[0] == 'n':
fields = l[1:].split(",")
node_id = int(fields[0])
lon = float(fields[1])
lat = float(fields[2])
all_nodes[node_id] = (lon,lat)
if lon >= gridX * minute and lon < (gridX+1)*minute and lat >= gridY*minute and lat < (gridY+1)*minute:
inside_nodes.append(node_id)
keep_ways = []
keep_nodes = {}
lon_prefix = 'w' if gridX<0 else 'e'
lat_prefix = 's' if gridY<0 else 'n'
output_file = open("%s%d%s%d.map"%(lon_prefix,abs(gridX),lat_prefix,abs(gridY)), "wt")
for w in ways:
for nodeid in w:
if nodeid in inside_nodes:
keep_ways.append(w)
break
for w in keep_ways:
output_file.write("w"+",".join(map(str,w))+"\n")
for nodeid in w:
keep_nodes[nodeid] = 1
for n in keep_nodes.keys():
output_file.write("n%d,%f,%f\n"%(n,all_nodes[n][0], all_nodes[n][1]))
output_file.close()
if __name__=="__main__":
main()
```
#### File: jmacarthur/ld38b/xml-to-map.py
```python
import sys
import xml.etree.ElementTree as ET
"""This can be passed an OSM extract file (.osm). It will produce a
CSV text file containing selected ways and all the nodes contained in
one of the selected ways."""
importantRoads = ["motorway", "motorway_link", "trunk", "trunk_link",
"primary", "primary_link", "river"]
def processRoad(way, road_type):
points = []
if(road_type in importantRoads):
for point in way:
if(point.tag == "nd"):
points.append(int(point.attrib['ref']))
return points
def main():
input_filename = sys.argv[1]
tree = ET.parse(input_filename)
root = tree.getroot()
all_nodes = {}
important_nodes = {}
for child in root:
if(child.tag == "node"):
(lon,lat) = (child.attrib["lon"], child.attrib["lat"])
node_id = int(child.attrib["id"])
all_nodes[node_id] = (lon,lat)
if(child.tag == "way"):
valid_road = False
road_type = None
for i in child:
if(i.tag == "tag"):
if(i.attrib['k']=="highway" or i.attrib['k']=="waterway"):
valid_road = True
road_type = i.attrib['v']
break
if valid_road:
ways = processRoad(child, road_type)
if len(ways)>0:
print("w"+",".join(map(str,ways)))
for w in ways:
important_nodes[w] = 1
for k in important_nodes.keys():
print("n%d,%s,%s"%(k, all_nodes[k][0], all_nodes[k][1]))
if __name__=="__main__":
main()
``` |
{
"source": "jmacarthur/ohio",
"score": 3
} |
#### File: jmacarthur/ohio/ohio.py
```python
import curses
import fourletterphat as flp
import time
lookup = {
0: 'EARL',
1: 'CYLN',
2: 'ASAM',
3: 'DARJ',
4: 'RUSC',
5: 'LAPS' }
# Display:
# If no tea is being brewed here, then just display 'tea?' or 'ohio' or some placeholder.
# Once a button is pressed, tea starts brewing for three minutes. During this phase:
# Scroll "(tea name) ready in MM:SS". Repeat the scroll every ten seconds.
# If 'cancel' is pressed, display "Cancel" for two seconds, then go back to 'no tea' mode.
# When the tea is ready, alternate "(tea name) MMMm".
# When tea has been ready for two hours, go back to 'no tea' mode.
TEA_BREWING_TIME=180 # Seconds
MAX_TEA_AGE = 120*60 # Seconds
class State():
def __init__(self):
self.current_tea_index = None
self.time_brewing_started = None
self.message = None
self.brewing = False
self.message_timeout = 0
self.scroll_position = 0
self.brewed = False
def end_of_scroll(self):
""" True if we can scroll and have finished scrolling. """
return (self.scroll_position > len(self.message)-4) and len(self.message)>4
def reset_scroll(self):
self.scroll_position = -4
def get_scroll_pos(self):
return 0 if self.scroll_position < 0 else self.scroll_position
def main(stdscr):
# Clear screen
s = State()
seq = 0
x = None
stdscr.clear()
stdscr.nodelay(1) # set getch() non-blocking
s.message = "Tea?"
quit = False
flp.clear()
flp.print_number_str('OHIO')
flp.show()
while not quit:
v = stdscr.getch()
if v != -1:
if v >= ord('0') and v <= ord('9') and v-ord('0') in lookup:
s.current_tea_index = v-ord('0')
tea = lookup[s.current_tea_index]
s.message = "{} ready in ????".format(tea)
s.message_timeout = 120
s.time_brewing_started = time.localtime()
s.brewing = True
s.reset_scroll()
if v == curses.KEY_BACKSPACE:
# Cancel
s.message = "CANC"
s.current_tea_index = None
s.message_timeout = 20
s.time_brewing_started = None
s.brewing = False
s.reset_scroll()
x = v
# Update curses display for debugging
stdscr.addstr(0,0,"Last keypress: {}".format(x))
stdscr.addstr(1,0,"Scrolling message: >{}<".format(s.message))
stdscr.addstr(2,0,"Scroll position: >{}<".format(s.get_scroll_pos()))
stdscr.addstr(3,0,"Message timeout: {}".format(s.message_timeout))
if s.time_brewing_started:
stdscr.addstr(4,0,"Brewing started: {}:{}:{}".format(s.time_brewing_started.tm_hour, s.time_brewing_started.tm_min, s.time_brewing_started.tm_sec))
stdscr.addstr(5,0,"Brewing started: {}s".format(time.mktime(s.time_brewing_started)))
else:
stdscr.addstr(4,0,"Nothign brewing")
stdscr.addstr(6,0,"Time now : {}s".format(time.mktime(time.localtime())))
# Update tea time, if currently brewing
if s.time_brewing_started:
elapsed = time.mktime(time.localtime()) - time.mktime(s.time_brewing_started)
remaining = int(TEA_BREWING_TIME-elapsed)
if remaining > 0:
s.message = s.message [:-4] + "{:1d}m{:02d}".format(int(remaining/60), remaining%60)
else:
if s.brewing:
s.reset_scroll()
s.brewing = False
tea = lookup[s.current_tea_index]
s.message = "{} {:3d}m".format(tea, int((elapsed - TEA_BREWING_TIME)/60))
if elapsed > MAX_TEA_AGE:
s.time_brewing_started = None
s.current_tea_index = None
s.brewing = False
s.message = "Tea?"
s.reset_scroll()
# Update four-letter display
if s.message_timeout > 0:
flp.clear()
if s.message:
flp.print_str(s.message[s.get_scroll_pos():])
if seq % 5 == 0 and s.scroll_position < len(s.message)-4:
s.scroll_position += 1
flp.show()
s.message_timeout -= 1
else:
if s.current_tea_index:
s.reset_scroll()
s.message_timeout = 120
else:
flp.clear()
s.message = "Tea?"
flp.print_str(s.message)
flp.show()
seq += 1
time.sleep(0.1)
curses.wrapper(main)
``` |
{
"source": "jmacauley/opennsa",
"score": 2
} |
#### File: opennsa/backends/junosex.py
```python
import random
from twisted.python import log
from twisted.internet import defer
from opennsa import constants as cnt, config
from opennsa.backends.common import genericbackend, ssh
# parameterized commands
COMMAND_CONFIGURE = 'edit private'
COMMAND_COMMIT = 'commit'
COMMAND_SET_INTERFACES = 'set interfaces %(port)s encapsulation ethernet-ccc' # port, source vlan, source vlan
COMMAND_SET_INTERFACES_CCC = 'set interfaces %(port)s unit 0 family ccc'
COMMAND_SET_INTERFACES_MTU = 'set interfaces %(port)s mtu 9000'
COMMAND_SET_INTERFACE_VLN_T = 'set interfaces %(port)s vlan-tagging'
COMMAND_SET_INTERFACE_ENC_V = 'set interfaces %(port)s encapsulation vlan-ccc'
COMMAND_SET_VLAN_ENCAP = 'set interfaces %(port)s unit %(vlan)s encapsulation vlan-ccc'
COMMAND_SET_VLAN_ID = 'set interfaces %(port)s unit %(vlan)s vlan-id %(vlan)s'
COMMAND_SET_SWAP_PUSH_POP = 'set interfaces %(port)s unit %(vlan)s swap-by-poppush'
COMMAND_DELETE_INTERFACES = 'delete interfaces %(port)s' # port / vlan
COMMAND_DELETE_INTERFACES_VL= 'delete interfaces %(port)s.%(vlan)s'
COMMAND_DELETE_CONNECTIONS = 'delete protocols connections interface-switch %(switch)s' # switch
COMMAND_DELETE_MPLS_LSP = 'delete protocols mpls label-switched-path %(unique-id)s'
COMMAND_DELETE_REMOTE_INT_SW= 'delete protocols connections remote-interface-switch %(connectionid)s'
COMMAND_LOCAL_CONNECTIONS = 'set protocols connections interface-switch %(switch)s interface %(interface)s.%(subinterface)s'
COMMAND_REMOTE_LSP_OUT_TO = 'set protocols mpls label-switched-path %(unique-id)s to %(remote_ip)s'
COMMAND_REMOTE_LSP_OUT_NOCSPF = 'set protocols mpls label-switched-path %(unique-id)s no-cspf'
COMMAND_REMOTE_CONNECTIONS_INT = 'set protocols connections remote-interface-switch %(connectionid)s interface %(port)s'
COMMAND_REMOTE_CONNECTIONS_TRANSMIT_LSP = 'set protocols connections remote-interface-switch %(connectionid)s transmit-lsp %(unique-id)s'
COMMAND_REMOTE_CONNECTIONS_RECEIVE_LSP = 'set protocols connections remote-interface-switch %(connectionid)s receive-lsp %(unique-id)s'
LOG_SYSTEM = 'EX4550'
class SSHChannel(ssh.SSHChannel):
name = 'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.line = ''
self.wait_defer = None
self.wait_line = None
@defer.inlineCallbacks
def sendCommands(self, commands):
LT = '\r' # line termination
try:
yield self.conn.sendRequest(self, 'shell', '', wantReply=1)
d = self.waitForLine('{master:0}[edit]')
self.write(COMMAND_CONFIGURE + LT)
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, system=LOG_SYSTEM)
d = self.waitForLine('{master:0}[edit]')
self.write(cmd + LT)
yield d
# commit commands, check for 'commit complete' as success
# not quite sure how to handle failure here
## test stuff
#d = self.waitForLine('[edit]')
#self.write('commit check' + LT)
d = self.waitForLine('commit complete')
self.write(COMMAND_COMMIT + LT)
yield d
except Exception, e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully committed', debug=True, system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForLine(self, line):
self.wait_line = line
self.wait_defer = defer.Deferred()
return self.wait_defer
def matchLine(self, line):
if self.wait_line and self.wait_defer:
if self.wait_line == line.strip():
d = self.wait_defer
self.wait_line = None
self.wait_defer = None
d.callback(self)
else:
pass
def dataReceived(self, data):
if len(data) == 0:
pass
else:
self.line += data
if '\n' in data:
lines = [ line.strip() for line in self.line.split('\n') if line.strip() ]
self.line = ''
for l in lines:
self.matchLine(l)
class JunosEx4550CommandSender:
def __init__(self, host, port, ssh_host_fingerprint, user, ssh_public_key_path, ssh_private_key_path,
network_name):
self.ssh_connection_creator = \
ssh.SSHConnectionCreator(host, port, [ ssh_host_fingerprint ], user, ssh_public_key_path, ssh_private_key_path)
self.ssh_connection = None # cached connection
self.connection_lock = defer.DeferredLock()
self.network_name = network_name
def _getSSHChannel(self):
def setSSHConnectionCache(ssh_connection):
log.msg('SSH Connection created and cached', system=LOG_SYSTEM)
self.ssh_connection = ssh_connection
return ssh_connection
def gotSSHConnection(ssh_connection):
channel = SSHChannel(conn = ssh_connection)
ssh_connection.openChannel(channel)
return channel.channel_open
if self.ssh_connection:
log.msg('Reusing SSH connection', debug=True, system=LOG_SYSTEM)
return gotSSHConnection(self.ssh_connection)
else:
# since creating a new connection should be uncommon, we log it
# this makes it possible to see if something fucks up and creates connections continuously
log.msg('Creating new SSH connection', system=LOG_SYSTEM)
d = self.ssh_connection_creator.getSSHConnection()
d.addCallback(setSSHConnectionCache)
d.addCallback(gotSSHConnection)
return d
@defer.inlineCallbacks
def _sendCommands(self, commands):
channel = yield self._getSSHChannel()
log.msg('Acquiring ssh session lock', debug=True, system=LOG_SYSTEM)
yield self.connection_lock.acquire()
log.msg('Got ssh session lock', debug=True, system=LOG_SYSTEM)
try:
yield channel.sendCommands(commands)
finally:
log.msg('Releasing ssh session lock', debug=True, system=LOG_SYSTEM)
self.connection_lock.release()
log.msg('Released ssh session lock', debug=True, system=LOG_SYSTEM)
def setupLink(self, connection_id, source_port, dest_port, bandwidth):
cg = JunosEx4550CommandGenerator(connection_id,source_port,dest_port,self.network_name,bandwidth)
commands = cg.generateActivateCommand()
return self._sendCommands(commands)
def teardownLink(self, connection_id, source_port, dest_port, bandwidth):
cg = JunosEx4550CommandGenerator(connection_id,source_port,dest_port,self.network_name,bandwidth)
commands = cg.generateDeactivateCommand()
return self._sendCommands(commands)
class JunosEx4550Target(object):
def __init__(self, port, original_port,value=None):
self.port = port
self.value = value
self.original_port = original_port
# NEVER USE : in port name!
def __str__(self):
if self.port.remote_network is None:
return '<JuniperEX4550Target %s#%s=%s>' % (self.original_port,self.port.label.type_,self.value)
else:
return '<JuniperEX4550Target %s#%s=%s -> %s>' % (self.original_port,self.port.label.type_,self.value,self.port.remote_port,)
class JunosEx4550ConnectionManager:
def __init__(self, port_map, host, port, host_fingerprint, user, ssh_public_key, ssh_private_key,
network_name):
self.network_name = network_name
self.port_map = port_map
self.command_sender = JunosEx4550CommandSender(host, port, host_fingerprint, user, ssh_public_key, ssh_private_key,
network_name)
def getResource(self, port, label):
return self.port_map[port] + ':' + '' if label is None else str(label.labelValue())
def getTarget(self, port, label):
return JunosEx4550Target(self.port_map[port], port,label.labelValue())
def createConnectionId(self, source_target, dest_target):
return 'JuniperEx4550-' + str(random.randint(100000,999999))
def canSwapLabel(self, label_type):
return True
def setupLink(self, connection_id, source_target, dest_target, bandwidth):
def linkUp(_):
log.msg('Link %s -> %s up' % (source_target, dest_target), system=LOG_SYSTEM)
d = self.command_sender.setupLink(connection_id,source_target, dest_target,bandwidth)
d.addCallback(linkUp)
return d
def teardownLink(self, connection_id, source_target, dest_target, bandwidth):
def linkDown(_):
log.msg('Link %s -> %s down' % (source_target, dest_target), system=LOG_SYSTEM)
d = self.command_sender.teardownLink(connection_id,source_target, dest_target, bandwidth)
d.addCallback(linkDown)
return d
def JunosEXBackend(network_name, nrm_ports , parent_requester, cfg):
name = 'JunosEX %s' % network_name
nrm_map = dict( [ (p.name, p) for p in nrm_ports ] ) # for the generic backend
port_map = dict( [ (p.name, p) for p in nrm_ports ] ) # for the nrm backend
host = cfg[config.JUNIPER_HOST]
port = cfg.get(config.JUNIPER_PORT, 22)
host_fingerprint = cfg[config.JUNIPER_HOST_FINGERPRINT]
user = cfg[config.JUNIPER_USER]
ssh_public_key = cfg[config.JUNIPER_SSH_PUBLIC_KEY]
ssh_private_key = cfg[config.JUNIPER_SSH_PRIVATE_KEY]
cm = JunosEx4550ConnectionManager(port_map, host, port, host_fingerprint, user, ssh_public_key, ssh_private_key,
network_name)
return genericbackend.GenericBackend(network_name, nrm_map, cm, parent_requester, name)
class JunosEx4550CommandGenerator(object):
def __init__(self,connection_id,src_port,dest_port,network_name,bandwidth=None):
self.connection_id = connection_id
self.src_port = src_port
self.dest_port = dest_port
self.bandwidth = bandwidth
self.network_name = network_name
log.msg('Initialised with params src %s dst %s bandwidth %s connectionid %s' %
(src_port,dest_port,bandwidth,connection_id), debug=True, system=LOG_SYSTEM)
def generateActivateCommand(self):
commands = []
source_port = self.src_port.port
dest_port = self.dest_port.port
log.msg("%s %s " % (source_port,dest_port))
log.msg("Activate commands between %s:%s:%s and %s:%s:%s " %
(source_port.remote_network, source_port.interface, source_port.label.type_,
dest_port.remote_network, dest_port.interface, dest_port.label.type_), debug=True,
system=LOG_SYSTEM)
# Local connection
if source_port.remote_network is None and dest_port.remote_network is None:
commands = self._generateLocalConnectionActivate()
elif source_port.remote_network is not None and dest_port.remote_network is not None:
commands = self._generateLocalConnectionActivate()
log.msg('Transit connection-HERE SHOULD BE COMMANDS FOR TRANSIT', system=LOG_SYSTEM)
else:
#commands = self._generateRemoteConnectionActivate() All cases are the same tODO: remove IFs competely here
commands = self._generateLocalConnectionActivate()
return commands
def generateDeactivateCommand(self):
commands = {}
source_port = self.src_port.port
dest_port = self.dest_port.port
log.msg("Deactivate commands between %s:%s#%s=%s and %s:%s#%s=%s " %
(source_port.remote_network, source_port.interface, source_port.label.type_,self.src_port.value,
dest_port.remote_network, dest_port.interface, dest_port.label.type_,self.dest_port.value), debug=True,
system=LOG_SYSTEM)
# Local connection
if source_port.remote_network is None and dest_port.remote_network is None:
commands = self._generateLocalConnectionDeActivate()
elif source_port.remote_network is not None and dest_port.remote_network is not None:
#commands = ["Transit connection"]
commands = self._generateLocalConnectionDeActivate()
else:
#commands = self._generateRemoteConnectionDeactivate() DTTO as activate
commands = self._generateLocalConnectionDeActivate()
return commands
def _createSwitchName(self,connection_id):
switch_name = 'OpenNSA-local-%s' % (connection_id)
return switch_name
def _generateLocalConnectionActivate(self):
commands = []
switch_name = self._createSwitchName( self.connection_id )
""" For configuration reason, we're going to generate port things first, then the interface-switch commands"""
for gts_port in self.src_port,self.dest_port:
#if gts_port.port.label is not None and gts_port.port.label.type_ == "port":
# commands.append( COMMAND_SET_INTERFACES % { 'port':gts_port.port.interface} )
# commands.append( COMMAND_SET_INTERFACES_MTU % { 'port':gts_port.port.interface} )
# commands.append( COMMAND_SET_INTERFACES_CCC % { 'port':gts_port.port.interface} )
# tODO remove this as ports are not supported
if gts_port.port.label is not None and gts_port.port.label.type_ == "vlan":
commands.append( COMMAND_SET_INTERFACE_VLN_T % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
commands.append( COMMAND_SET_INTERFACES_MTU % { 'port':gts_port.port.interface} )
commands.append( COMMAND_SET_INTERFACE_ENC_V % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
commands.append( COMMAND_SET_VLAN_ENCAP % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
commands.append( COMMAND_SET_VLAN_ID % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
commands.append( COMMAND_SET_SWAP_PUSH_POP % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
for gts_port in self.src_port,self.dest_port:
commands.append( COMMAND_LOCAL_CONNECTIONS % { 'switch':switch_name,
'interface':"%s" % gts_port.port.interface,
'subinterface': "%s" % gts_port.value if
gts_port.port.label.type_ == "vlan" else '0' } )
return commands
def _generateLocalConnectionDeActivate(self):
commands = []
switch_name = self._createSwitchName( self.connection_id )
for gts_port in self.src_port,self.dest_port:
#if gts_port.port.label.type_ == "port":
# commands.append( COMMAND_DELETE_INTERFACES % { 'port':gts_port.port.interface } )
if gts_port.port.label is not None and gts_port.port.label.type_ == "vlan":
commands.append( COMMAND_DELETE_INTERFACES_VL % { 'port':gts_port.port.interface, 'vlan' : "%s"
% gts_port.value})
commands.append( COMMAND_DELETE_CONNECTIONS % { 'switch':switch_name } )
return commands
# def _generateRemoteConnectionActivate(self):
# commands = []
#
# local_port = self.src_port if self.src_port.port.remote_network is None else self.dest_port
# remote_port = self.src_port if self.src_port.port.remote_network is not None else self.dest_port
# log.msg("%s" % local_port.original_port)
# log.msg("%s" % remote_port.original_port)
#
# if local_port.port.label.type_ == "port":
# commands.append( COMMAND_SET_INTERFACES % { 'port':local_port.port.interface} )
# commands.append( COMMAND_SET_INTERFACES_MTU % { 'port':local_port.port.interface} )
# commands.append( COMMAND_SET_INTERFACES_CCC % { 'port':local_port.port.interface} )
# if local_port.port.label.type_ == "vlan":
# commands.append( COMMAND_SET_INTERFACE_VLN_T % {'port':local_port.port.interface, 'vlan':local_port.value} )
# commands.append( COMMAND_SET_INTERFACE_ENC_V % {'port':local_port.port.interface, 'vlan':local_port.value} )
# commands.append( COMMAND_SET_VLAN_ENCAP % {'port':local_port.port.interface, 'vlan':local_port.value} )
# commands.append( COMMAND_SET_VLAN_ID % {'port':local_port.port.interface, 'vlan':local_port.value} )
# commands.append( COMMAND_SET_SWAP_PUSH_POP % {'port':local_port.port.interface, 'vlan':local_port.value} )
#
# if remote_port.port.label.type_ == "mpls":
# remote_sw_ip = self._getRouterLoopback(remote_port.port.remote_network)
#
# commands.append(COMMAND_REMOTE_LSP_OUT_TO % {
# 'unique-id':"T-"+remote_port.port.remote_network+"-F-"+self.network_name+"-mpls"+str(remote_port.value),
# 'remote_ip':remote_sw_ip } )
# commands.append(COMMAND_REMOTE_LSP_OUT_NOCSPF % {
# 'unique-id':"T-"+remote_port.port.remote_network+"-F-"+self.network_name+"-mpls"+str(remote_port.value),
# 'remote_ip':remote_sw_ip } )
#
#
# if local_port.port.label.type_ == "port":
# commands.append(COMMAND_REMOTE_CONNECTIONS_INT % { 'connectionid' : self.connection_id,
# 'port' : local_port.port.interface
# } )
# if local_port.port.label.type_ == "vlan":
# commands.append(COMMAND_REMOTE_CONNECTIONS_INT % { 'connectionid' : self.connection_id,
# 'port' : local_port.port.interface + "." + str(local_port.value)
# } )
#
# commands.append(COMMAND_REMOTE_CONNECTIONS_TRANSMIT_LSP % { 'connectionid' : self.connection_id,
# 'unique-id':"T-"+remote_port.port.remote_network+"-F-"+self.network_name+"-mpls"+str(remote_port.value)
# } )
# commands.append(COMMAND_REMOTE_CONNECTIONS_RECEIVE_LSP % { 'connectionid' : self.connection_id,
# 'unique-id':"T-"+self.network_name+"-F-"+remote_port.port.remote_network+"-mpls"+str(remote_port.value)
# } )
# if remote_port.port.label.type_ == "vlan":
# switch_name = self._createSwitchName( self.connection_id )
#
# commands.append( COMMAND_SET_INTERFACE_VLN_T % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
# commands.append( COMMAND_SET_INTERFACE_ENC_V % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
# commands.append( COMMAND_SET_VLAN_ENCAP % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
# commands.append( COMMAND_SET_VLAN_ID % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
# commands.append( COMMAND_SET_SWAP_PUSH_POP % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
#
# for gts_port in local_port,remote_port:
# commands.append( COMMAND_LOCAL_CONNECTIONS % { 'switch':switch_name,
# 'interface':"%s" % gts_port.port.interface,
# 'subinterface': "%s" % gts_port.value if
# gts_port.port.label.type_ == "vlan" else '0' } )
#
#
# return commands
#
#
# def _generateRemoteConnectionDeactivate(self):
# commands = []
#
# local_port = self.src_port if self.src_port.port.remote_network is None else self.dest_port
# remote_port = self.src_port if self.src_port.port.remote_network is not None else self.dest_port
#
# if local_port.port.label.type_ == "port":
# commands.append( COMMAND_DELETE_INTERFACES % { 'port':local_port.port.interface } )
# if local_port.port.label.type_ == "vlan":
# commands.append( COMMAND_DELETE_INTERFACES_VL % { 'port':local_port.port.interface, 'vlan' : "%s"
# % local_port.value})
#
# if remote_port.port.label.type_ == "mpls":
# remote_sw_ip = self._getRouterLoopback(remote_port.port.remote_network)
# commands.append( COMMAND_DELETE_MPLS_LSP % {
# 'unique-id' : "T-"+remote_port.port.remote_network+"-F-"+self.network_name+"-mpls"+str(remote_port.value)
# } )
# commands.append( COMMAND_DELETE_REMOTE_INT_SW % { 'connectionid' :
# self.connection_id } )
# if remote_port.port.label.type_ == "vlan":
# switch_name = self._createSwitchName( self.connection_id )
# commands.append( COMMAND_DELETE_INTERFACES_VL % { 'port':remote_port.port.interface, 'vlan' : "%s"
# % remote_port.value})
# commands.append( COMMAND_DELETE_CONNECTIONS % { 'switch':switch_name } )
#
# return commands
#def _getRouterLoopback(self,network_name):
#
# if ":topology" in network_name:
# network_name = network_name.replace(":topology","")
# if network_name in self.gts_routers:
# return self.gts_routers[network_name]
# else:
# raise Exception("Can't find loopback IP address for network %s " % network_name)
```
#### File: opennsa/plugins/canarie.py
```python
from zope.interface import implements
from twisted.internet import defer
from twisted.python import log
from opennsa import database, plugin
from opennsa.interface import IPlugin
LOG_SYSTEM = 'Canarie'
class CanariePlugin(plugin.BasePlugin):
implements(IPlugin)
@defer.inlineCallbacks
def createConnectionId(self):
"""
Create a Canarie service identifier for the connection.
"""
unique_id = yield database.getBackendConnectionId()
if unique_id is None:
raise ValueError("Could not generate an connection id from the database, most likely serviceid_start isn't set")
unique_id = str(unique_id)
connection_id = unique_id[:5] + 'CS' + unique_id[5:] + '-ANA'
log.msg('Generated id: ' + connection_id, system=LOG_SYSTEM)
defer.returnValue(connection_id)
plugin = CanariePlugin()
```
#### File: nsi2/bindings/p2pservices.py
```python
from xml.etree import ElementTree as ET
# types
class OrderedStpType(object):
def __init__(self, order, stp):
self.order = order # int
self.stp = stp # StpIdType -> string
@classmethod
def build(self, element):
return OrderedStpType(
element.get('order'),
element.findtext('stp')
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'order' : str(self.order)})
ET.SubElement(r, 'stp').text = self.stp
return r
class TypeValueType(object):
def __init__(self, type_, value):
self.type_ = type_
self.value = value
@classmethod
def build(self, element):
return TypeValueType(
element.get('type'),
element.text
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'type' : self.type_})
r.text = self.value
return r
class P2PServiceBaseType(object):
def __init__(self, capacity, directionality, symmetricPath, sourceSTP, destSTP, ero, parameter):
self.capacity = capacity # long
self.directionality = directionality # DirectionalityType -> string
self.symmetricPath = symmetricPath # boolean
self.sourceSTP = sourceSTP # StpIdType -> string
self.destSTP = destSTP # StpIdType -> string
self.ero = ero # [ OrderedStpType ]
self.parameter = parameter # [ TypeValueType ]
@classmethod
def build(self, element):
return P2PServiceBaseType(
int(element.findtext('capacity')),
element.findtext('directionality'),
True if element.findtext('symmetricPath') == 'true' else False if element.find('symmetricPath') is not None else None,
element.findtext('sourceSTP'),
element.findtext('destSTP'),
[ OrderedStpType.build(e) for e in element.find('ero') ] if element.find('ero') is not None else None,
[ TypeValueType.build(e) for e in element.findall('parameter') ] if element.find('parameter') is not None else None
)
def xml(self, elementName):
r = ET.Element(elementName)
ET.SubElement(r, 'capacity').text = str(self.capacity)
ET.SubElement(r, 'directionality').text = self.directionality
if self.symmetricPath is not None:
ET.SubElement(r, 'symmetricPath').text = 'true' if self.symmetricPath else 'false'
ET.SubElement(r, 'sourceSTP').text = self.sourceSTP
ET.SubElement(r, 'destSTP').text = self.destSTP
if self.ero is not None:
ET.SubElement(r, 'ero').extend( [ e.xml('orderedSTP') for e in self.ero ] )
if self.parameter is not None:
for p in self.parameter:
ET.SubElement(r, 'parameter', attrib={'type': p.type_}).text = p.value
return r
POINT2POINT_NS = 'http://schemas.ogf.org/nsi/2013/12/services/point2point'
p2ps = ET.QName(POINT2POINT_NS, 'p2ps')
capacity = ET.QName(POINT2POINT_NS, 'capacity')
parameter = ET.QName(POINT2POINT_NS, 'parameter')
def parse(input_):
root = ET.fromstring(input_)
return parseElement(root)
def parseElement(element):
type_map = {
str(p2ps) : P2PServiceBaseType,
str(parameter) : TypeValueType
}
if not element.tag in type_map:
raise ValueError('No type mapping for tag %s' % element.tag)
type_ = type_map[element.tag]
return type_.build(element)
```
#### File: protocols/nsi2/queryhelper.py
```python
from twisted.python import log
from opennsa import constants as cnt, nsa
from opennsa.shared.xmlhelper import createXMLTime, parseXMLTimestamp
from opennsa.protocols.nsi2 import helper
from opennsa.protocols.nsi2.bindings import nsiconnection, p2pservices
LOG_SYSTEM = 'NSI2.queryhelper'
## ( nsa native -> xsd )
def buildServiceDefinitionType(service_def):
if type(service_def) is nsa.Point2PointService:
sd = service_def
p2ps = p2pservices.P2PServiceBaseType(sd.capacity, sd.directionality, sd.symmetric, sd.source_stp.urn(), sd.dest_stp.urn(), None, [])
return p2ps
else:
log.msg('Cannot build query service definition for %s (not P2PService)' % str(service_def), system=LOG_SYSTEM)
return None
def buildConnectionStatesType(states):
rsm, psm, lsm, dsm = states
data_plane_status = nsiconnection.DataPlaneStatusType(dsm[0], dsm[1], dsm[2])
connection_states = nsiconnection.ConnectionStatesType(rsm, psm, lsm, data_plane_status)
return connection_states
def buildQuerySummaryResultType(connection_infos):
query_results = []
for ci in connection_infos:
criterias = []
for crit in ci.criterias:
sched_start_time = createXMLTime(crit.schedule.start_time) if crit.schedule.start_time is not None else None
sched_end_time = createXMLTime(crit.schedule.end_time) if crit.schedule.end_time is not None else None
schedule = nsiconnection.ScheduleType(sched_start_time, sched_end_time)
service_type = cnt.EVTS_AGOLE
service_def = buildServiceDefinitionType(crit.service_def)
children = []
criteria = nsiconnection.QuerySummaryResultCriteriaType(crit.revision, schedule, service_type, children, service_def)
criterias.append(criteria)
connection_states = buildConnectionStatesType(ci.states)
qsrt = nsiconnection.QuerySummaryResultType(ci.connection_id, ci.global_reservation_id, ci.description, criterias,
ci.requester_nsa, connection_states, ci.notification_id, ci.result_id)
query_results.append(qsrt)
return query_results
def buildQueryRecursiveResultType(reservations):
def buildQueryRecursiveResultCriteriaType(criteria):
assert type(criteria) is nsa.QueryCriteria, 'Wrong criteria type for buildQueryRecursiveResultCriteriaType: %s' % (str(criteria))
# handle optional start / end time
start_time = createXMLTime(criteria.schedule.start_time) if criteria.schedule.start_time is not None else None
end_time = createXMLTime(criteria.schedule.end_time) if criteria.schedule.end_time is not None else None
schedule = nsiconnection.ScheduleType(start_time, end_time)
service_type = str(p2pservices.p2ps) # we need this to have the bindings working properly
service_def = buildServiceDefinitionType(criteria.service_def)
crts = []
for idx, child in enumerate(criteria.children):
assert type(child) is nsa.ConnectionInfo, 'Invalid child criteria type for buildQueryRecursiveResultCriteriaType: %s' % str(type(child))
sub_states = buildConnectionStatesType(child.states)
sub_qrrct = [ buildQueryRecursiveResultCriteriaType( sc ) for sc in child.criterias ]
crt = nsiconnection.ChildRecursiveType(idx, child.connection_id, child.provider_nsa, sub_states, sub_qrrct)
crts.append(crt)
qrrct = nsiconnection.QueryRecursiveResultCriteriaType(criteria.revision, schedule, service_type, crts, service_def)
return qrrct
query_results = []
for ci in reservations:
criterias = [ buildQueryRecursiveResultCriteriaType(c) for c in ci.criterias ]
connection_states = buildConnectionStatesType(ci.states)
qsrt = nsiconnection.QuerySummaryResultType(ci.connection_id, ci.global_reservation_id, ci.description, criterias,
ci.requester_nsa, connection_states, ci.notification_id, ci.result_id)
query_results.append(qsrt)
return query_results
## ( xsd -> nsa native )
def buildSchedule(schedule):
start_time = parseXMLTimestamp(schedule.startTime) if schedule.startTime is not None else None
end_time = parseXMLTimestamp(schedule.endTime) if schedule.endTime is not None else None
return nsa.Schedule(start_time, end_time)
def buildServiceDefinition(service_definition):
if service_definition is None:
log.msg('Did not get any service definitions, cannot build service definition', system=LOG_SYSTEM)
return None
if type(service_definition) is p2pservices.P2PServiceBaseType:
sd = service_definition
source_stp = helper.createSTP(sd.sourceSTP)
dest_stp = helper.createSTP(sd.destSTP)
return nsa.Point2PointService(source_stp, dest_stp, sd.capacity, sd.directionality, sd.symmetricPath, None)
else:
log.msg('Got %s service definition, can only build for P2PService' % str(service_definition), system=LOG_SYSTEM)
return None
def buildConnectionStates(connection_states):
r_dps = connection_states.dataPlaneStatus
dps = (r_dps.active, r_dps.version, r_dps.versionConsistent)
states = (connection_states.reservationState, connection_states.provisionState, connection_states.lifecycleState, dps)
return states
def buildCriteria(r_criteria, include_children=False):
schedule = buildSchedule(r_criteria.schedule)
service_def = buildServiceDefinition(r_criteria.serviceDefinition)
children = []
if include_children:
for r_child in sorted(r_criteria.children, key=lambda c : c.order):
# print 'child', r_child.connectionId, r_child.providerNSA, len(r_child.criteria), [ c.children for c in r_child.criteria ]
crit = r_child.criteria[0] # we only use this for service type, so should be ok
c_crits = [ buildCriteria(rc, include_children) for rc in r_child.criteria ]
states = buildConnectionStates(r_child.connectionStates)
ci = nsa.ConnectionInfo(r_child.connectionId, None, None, crit.serviceType, c_crits, r_child.providerNSA, None, states, None, None)
children.append( ci )
crit = nsa.QueryCriteria(int(r_criteria.version), schedule, service_def, children)
return crit
def buildQueryResult(query_confirmed, provider_nsa, include_children=False):
qc = query_confirmed
states = buildConnectionStates(query_confirmed.connectionStates)
criterias = []
if qc.criteria is not None:
for rc in qc.criteria:
crit = buildCriteria(rc, include_children)
criterias.append(crit)
return nsa.ConnectionInfo(qc.connectionId, qc.globalReservationId, qc.description, cnt.EVTS_AGOLE, criterias, provider_nsa, qc.requesterNSA, states, qc.notificationId, qc.resultId)
```
#### File: protocols/shared/httplog.py
```python
from twisted.python import log
LOG_SYSTEM = 'HTTP'
def logRequest(request):
length = request.sentLength or '-'
user_agent = request.getHeader('user-agent') or '-'
log.msg('%s - %s %s %s %s %s %s' % \
(request.getClientIP(), request.method, request.uri, request.clientproto, request.code, length, user_agent),
system=LOG_SYSTEM
)
```
#### File: opennsa/opennsa/provreg.py
```python
from twisted.python import log
from opennsa import error
LOG_SYSTEM = 'providerregistry'
class ProviderRegistry(object):
def __init__(self, providers, provider_factories):
# usually initialized with local providers
self.providers = providers.copy()
self.provider_factories = provider_factories # { provider_type : provider_spawn_func }
self.provider_networks = {} # { provider_urn : [ network ] }
def getProvider(self, nsi_agent_urn):
"""
Get a provider from a NSI agent identity/urn.
"""
try:
return self.providers[nsi_agent_urn]
except KeyError:
raise error.STPResolutionError('Could not resolve a provider for %s' % nsi_agent_urn)
def getProviderByNetwork(self, network_id):
"""
Get the provider urn by specifying network.
"""
for provider, networks in self.provider_networks.items():
if network_id in networks:
return provider
else:
raise error.STPResolutionError('Could not resolve a provider for %s' % network_id)
def addProvider(self, nsi_agent_urn, provider, network_ids):
"""
Directly add a provider. Probably only needed by setup.py
"""
if not nsi_agent_urn in self.providers:
log.msg('Creating new provider for %s' % nsi_agent_urn, system=LOG_SYSTEM)
self.providers[ nsi_agent_urn ] = provider
self.provider_networks[ nsi_agent_urn ] = network_ids
def spawnProvider(self, nsi_agent, network_ids):
"""
Create a new provider, from an NSI agent.
ServiceType must exist on the NSI agent, and a factory for the type available.
"""
if nsi_agent.urn() in self.providers and self.provider_networks[nsi_agent.urn()] == network_ids:
log.msg('Skipping provider spawn for %s (no change)' % nsi_agent, debug=True, system=LOG_SYSTEM)
return self.providers[nsi_agent.urn()]
factory = self.provider_factories[ nsi_agent.getServiceType() ]
prov = factory(nsi_agent)
self.addProvider(nsi_agent.urn(), prov, network_ids)
log.msg('Spawned new provider for %s' % nsi_agent, system=LOG_SYSTEM)
return prov
``` |
{
"source": "jmacdonald2010/arithmetic-formatter",
"score": 4
} |
#### File: jmacdonald2010/arithmetic-formatter/arithmetic_arranger.py
```python
def arithmetic_arranger(problems, answer=False):
# check to see if the list is too long
num_problems = len(problems)
answers_list = []
top_operand = []
operator = []
bottom_operand = []
# calculate answers, create a list of answers
# check to see if there are too many problems
if num_problems > 5:
return "Error: Too many problems."
for problem in problems:
problem_list = problem.split()
# if too many problems, return error
if (len(problem_list[0]) > 4) or (len(problem_list[2]) > 4):
return "Error: Numbers cannot be more than four digits."
# try/except to look for ValueErrors
try:
if problem_list[1] == '+':
result = int(problem_list[0]) + int(problem_list[2])
elif problem_list[1] == '-':
result = int(problem_list[0]) - int(problem_list[2])
# return error if wrong operator
else:
return "Error: Operator must be '+' or '-'."
except ValueError:
return "Error: Numbers must only contain digits."
answers_list.append(str(result))
top_operand.append(problem_list[0])
operator.append(problem_list[1])
bottom_operand.append(problem_list[2])
# determine lengths
# get lengths of all operands, append to list, then determine which to use for spacing purposes
top_lengths = []
bottom_lengths = []
actual_lengths = []
for value in top_operand:
top_lengths.append(len(value))
for value in bottom_operand:
bottom_lengths.append(len(value))
# determine which to use for spacing purposes
for i in range(0, len(top_lengths)):
if top_lengths[i] >= bottom_lengths[i]:
actual_lengths.append(top_lengths[i])
else:
actual_lengths.append(bottom_lengths[i])
actual_length_int = (int(actual_lengths[i]) + 2) # is for space/operator
actual_lengths[i] = actual_length_int
# build string
arranged_problems = ""
# top row
i = 0
for value in top_lengths:
space_length = actual_lengths[i] - value
spacing = " " * space_length
arranged_problems = arranged_problems + spacing
arranged_problems = arranged_problems + top_operand[i]
arranged_problems = arranged_problems + " "
i += 1
arranged_problems = arranged_problems.rstrip()
arranged_problems = arranged_problems + "\n"
# bottom operands and operators
i = 0
for value in bottom_lengths:
arranged_problems = arranged_problems + operator[i] + " "
space_length = (actual_lengths[i] - 2) - value
if space_length > 0:
spacing = " " * space_length
arranged_problems = arranged_problems + spacing
arranged_problems = arranged_problems + bottom_operand[i]
arranged_problems = arranged_problems + " "
i += 1
arranged_problems = arranged_problems.rstrip()
arranged_problems = arranged_problems + "\n"
# dashes
i = 0
for value in actual_lengths:
arranged_problems = arranged_problems + ("-" * actual_lengths[i])
arranged_problems = arranged_problems + " "
i += 1
arranged_problems = arranged_problems.rstrip()
if answer is True:
arranged_problems = arranged_problems + "\n"
i = 0
for answer in answers_list:
space_length = actual_lengths[i] - len(answer)
arranged_problems = arranged_problems + (" " * space_length)
arranged_problems = arranged_problems + answer
arranged_problems = arranged_problems + " "
i += 1
arranged_problems = arranged_problems.rstrip()
return arranged_problems
``` |
{
"source": "jmacdonald2010/probability_calculator",
"score": 4
} |
#### File: jmacdonald2010/probability_calculator/prob_calculator.py
```python
import copy
import random
class Hat:
def __init__(self, **kwargs):
# init the hat, and create a list for the hat w/ a variable # of kwargs
self.contents = []
self.not_in_hat = []
for key, value in kwargs.items():
for i in range(0, value):
self.contents.append(key)
def draw(self, num_balls):
# select random balls from the hat contents, not allowing for duplicates. If the # of balls being drawn is larger than the contents of the hat, returns the entire self.contents as a list.
rand_balls = []
random.seed(95)
i = 0
if num_balls >= (len(self.contents) + len(self.not_in_hat)):
rand_balls = self.contents
else:
while i < num_balls:
if len(self.contents) == 0: # when the self.contents list is empty, copy the not_in_hat list to it, then empty the not_in_hat list
self.contents = self.not_in_hat.copy()
self.not_in_hat= []
rand_num = random.randrange(0, len(self.contents))
rand_balls.append(self.contents[(rand_num - 1)])
self.not_in_hat.append(self.contents[(rand_num - 1)])
del self.contents[(rand_num - 1)]
i += 1
return rand_balls
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
m = 0 # is # of successful experiments we run
# first thing is to run the experiment the # of times specified (which is to draw)
for i in range (0, num_experiments):
experiment_success = True
result = hat.draw(num_balls_drawn)
# then lets clean up the results into a new dict
org_result = {} # organized results, aka a dict
# go thru results of the hat.draw, compile those values into a dict
for ball in result:
if ball not in org_result:
org_result[ball] = 1
else:
org_result[ball] = org_result[ball] + 1
# compare results to expected_balls
# loop thru keys in expected_balls, check to see if vals match w/ those balls in org_result
for ball in expected_balls:
try:
if org_result[ball] >= expected_balls[ball]:
continue
else:
experiment_success = False
break
except KeyError: # if none of our expected balls where in the results, this is used to avoid an error
experiment_success = False
break
if experiment_success == True:
m += 1
return m/num_experiments
``` |
{
"source": "jmacdonald2010/shopping-list",
"score": 3
} |
#### File: jmacdonald2010/shopping-list/main.py
```python
from item import Item
import sqlite3
from kivy.app import App, runTouchApp
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.dropdown import DropDown
from kivy.uix.spinner import Spinner
from kivy.uix.accordion import Accordion, AccordionItem
from kivy.uix.checkbox import CheckBox
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.label import Label
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.scrollview import ScrollView
from kivy.core.window import Window
from kivy.uix.popup import Popup
from kivy.lang import Builder
from kivy.properties import ObjectProperty
# import pandas as pd
# import numpy as np
# from collections import Counter
# connect to db, or create if not exists
conn = sqlite3.connect('shoppingList.db')
# create tables
# start w/ creating a table for the units, then store, then departments
conn.execute('''CREATE TABLE IF NOT EXISTS units (
unit_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
unit_name TEXT NOT NULL
);''')
conn.execute('''CREATE TABLE IF NOT EXISTS stores(
store_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
store_name TEXT NOT NULL
);''')
conn.execute('''CREATE TABLE IF NOT EXISTS departments(
department_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
department_name TEXT NOT NULL
);''')
# create item table, foreign keys are quantity_unit, department, store
conn.execute('''CREATE TABLE IF NOT EXISTS items(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
name TEXT NOT NULL,
quantity TEXT NOT NULL,
unit_id INT NOT NULL,
department_id INT NOT NULL,
isle INT,
collected INT NOT NULL,
store_id TEXT,
time_created TEXT NOT NULL,
FOREIGN KEY (unit_id)
REFERENCES units (unit_id)
ON UPDATE CASCADE
ON DELETE CASCADE,
FOREIGN KEY (department_id)
REFERENCES departments (department_id)
ON UPDATE CASCADE
ON DELETE CASCADE,
FOREIGN KEY (store_id)
REFERENCES stores (store_id)
ON UPDATE CASCADE
ON DELETE CASCADE
);
''')
# after creating the tables, populate the units, stores, and departments tables with ordinary information
# users will be given the ability to add stores and departments, but not diff. units
# starts w/ units, then stores, then departments
# NOTE!!!! this needs to be made more robust
# this needs to be set so if those values are MISSING, it adds them, OTHERWISE, it does nothing
'''conn.execute("INSERT INTO units (unit_name) VALUES ('each'), ('lbs'), ('oz'), ('mL'), ('L'), ('gallons');")
conn.execute("INSERT INTO stores (store_name) VALUES ('kroger westerville'), ('costco easton'), ('costco polaris'), ('kroger stoneridge'), ('aldi westerville');")
conn.execute("INSERT INTO departments (department_name) VALUES ('produce'), ('deli/bakery'), ('meat'), ('grocery'), ('beer/wine'), ('liquor'), ('dairy'), ('frozen'), ('pharmacy'), ('electronics'), ('other');")'''
print('database initialized')
# Builder.load_file('main.kv') # i may not needs this line
class MainScreen(Screen, GridLayout):
# allows us to add accordion items to the Accordion in the main.kv file.
shopping_list = ObjectProperty(None)
# accordion_size = ObjectProperty(None)
def __init__(self, **kwargs):
super(MainScreen, self).__init__(**kwargs)
# build the main screen; is its own function since we call on it while the app is still running.
# stores_dict_init = 0
accordion_size = 1000
MainScreen.build_accordions(self)
@classmethod
def calc_accordion_size(cls, **kwargs):
# call to the db, get the item id # and dept. id's
# then, organize the data to see which department has the most items
# then, determine the largest # of items and use that to help size things.
calc_size = conn.execute('SELECT id, department_id FROM items;')
calc_size = calc_size.fetchall()
biggest_val_dict = dict()
# assemble a dict of # of items (val) per dept (key)
for x in calc_size:
try:
if biggest_val_dict[x[1]] > 0: # if the val for key (department name) > 0, add 1 to the count
biggest_val_dict[x[1]] += 1
except: # used when the key is not yet in the dict
biggest_val_dict[x[1]] = 1
# then, iterate to determine which one is largest
biggest_val = 0
for v in biggest_val_dict:
if v > biggest_val:
biggest_val = v
calcd_size = biggest_val * 100
return int(calcd_size)
@classmethod
def build_accordions(cls, self, **kwargs):
# create lists of the departments
departments = []
department_ids =[]
departments_q = conn.execute('SELECT * FROM departments;')
departments_q = departments_q.fetchall()
for department in departments_q:
department_id = department[0]
department_name = department[1]
department_ids.append(department_id)
departments.append(department_name)
# create dict of stores and their ids
global current_store
global stores_dict_init
global stores_dict
try:
if stores_dict_init == 1:
pass
except NameError:
stores_dict = dict()
stores_dict_q = conn.execute('SELECT store_name, store_id FROM stores;')
stores_dict_q = stores_dict_q.fetchall()
for store in stores_dict_q:
stores_dict[store[0]] = store[1]
if store[1] == 1:
current_store = store[0]
stores_dict_init = 1
# current_store =
# declare for later
toggles = dict()
# dict for unit labels
unit_dict = dict()
units = conn.execute('SELECT * FROM units;')
units = units.fetchall()
for unit in units:
unit_id = unit[0]
unit_name = unit[1]
unit_dict[unit_id] = unit_name
# build the accordion items to the main screen
department_dfs = dict()
for department, id in zip(departments, department_ids):
# create the accordion item
department_accordion = AccordionItem(orientation='vertical', title=department)
department_grid = GridLayout(cols=6)
self.shopping_list.add_widget(department_accordion)
department_accordion.add_widget(department_grid)
# now populate the accordion, first the column names
department_grid.add_widget(Label(text="Collected?"))
department_grid.add_widget(Label(text="Item"))
department_grid.add_widget(Label(text="Amt"))
department_grid.add_widget(Label(text="Unit"))
department_grid.add_widget(Label(text="Isle"))
department_grid.add_widget(Label(text="DateTime Added"))
# get data from DB, store in dict as a dict
department_dfs[id] = dict()
db_query = conn.execute(f'SELECT name, quantity, unit_id, isle, collected, store_id, id, time_created FROM items WHERE department_id = {id} AND store_id = {stores_dict[current_store]};')
db_query = db_query.fetchall()
# next, iterate thru the query and store items in the dict
# this for loop is essentially replacing the pd.read_sql command
department_dfs[id]['isles'] = []
for item in db_query:
department_dfs[id][item[6]] = dict()
department_dfs[id][item[6]]['name'] = item[0]
department_dfs[id][item[6]]['amt'] = item[1]
department_dfs[id][item[6]]['unit'] = unit_dict[item[2]]
department_dfs[id][item[6]]['isle'] = item[3]
department_dfs[id][item[6]]['collected'] = item[4]
# department_dfs[id]['store'] = stores_dict[item[5]] # problem line, possible not needed
department_dfs[id][item[6]]['item_id'] = item[6]
department_dfs[id][item[6]]['time_created'] = item[7]
# this next part is to help organize the items by isle #
if item[3] in department_dfs[id]['isles']:
pass
else:
department_dfs[id]['isles'].append(item[3])
# organize isles in ascending order
department_dfs[id]['isles'] = sorted(department_dfs[id]['isles'])
# now, iterate thru the dict of items for this dept. and create buttons/labels for them
for item in department_dfs[id]:
if item == 'isles':
continue
toggles[department_dfs[id][item]['item_id']] = ToggleButton(state=MainScreen.check_toggle_state(department_dfs[id][item]['collected'], item))
department_grid.add_widget(toggles[department_dfs[id][item]['item_id']])
department_grid.add_widget(Label(text=str(department_dfs[id][item]['name']), text_size= (100, None)))
department_grid.add_widget(Label(text=str(department_dfs[id][item]['amt'])))
department_grid.add_widget(Label(text=str(department_dfs[id][item]['unit'])))
department_grid.add_widget(Label(text=str(department_dfs[id][item]['isle']))) # still need to figure out how to organize by isle #
department_grid.add_widget(Label(text=str(department_dfs[id][item]['time_created']), text_size= (100, None)))
# get the keys and vals in ordered lists
toggles_lambdas = dict()
global toggles_key_list
toggles_key_list = []
global toggles_val_list
toggles_val_list = []
for key, val in toggles.items():
toggles_key_list.append(key)
toggles_val_list.append(val)
# add functionality to the toggle buttons
for button in department_grid.children[1:]:
if isinstance(button, ToggleButton):
for key, value in toggles.items():
if button == value:
# self.toggle_id = self.produce_toggles_key_list.index(key)
toggles_lambdas[key] = lambda key: MainScreen.change_toggle_state(key)
button.bind(on_press= toggles_lambdas[key])
# the code below is a mess and I apologize
# b/c of the way the department accordionItems are constructed,
# the settings menu has to be manually built in python
# doing so in python is not nearly as clean as doing so in the kivy file
# hence the messy code below
# <<<<<<<<<<<<<<<< Settings Menu >>>>>>>>>>>>>>>>>>
# add a settings accordion item
settings_accordion = AccordionItem(orientation='vertical', title='Settings')
# create the grid for the accordion item
settings_grid = GridLayout(cols=1, padding = [1, 1, 1, 1])
# add the accordion item to the accordion, then add the grid to that accordion item
self.shopping_list.add_widget(settings_accordion)
settings_accordion.add_widget(settings_grid)
# <<<<<<<<<<<<<<<< Delete Button >>>>>>>>>>>>>>>>>>
# add buttons to the settings menu
# first, the delete all items button
delete_all_items = Button(text='Delete All Items')
# create a popup item for the above button
global delete_popup
delete_popup = Popup(title='Delete All Items',size_hint=(None,None), size=(400,400))
# create a grid layout for the delete popup, add the grid to the popup
delete_grid = GridLayout(cols=1, padding=[.2, .2, .2, .2])
delete_popup.add_widget(delete_grid)
# add 'are you sure?' delete all items button to the popup grid
you_sure_delete = Button(text="Are you Sure? This is irreversable! This will delete items from ALL stores!")
you_sure_delete.bind(on_press= lambda x2: MainScreen.delete_all_items_func(self))
delete_grid.add_widget(you_sure_delete)
# add a cancel button as well
cancel_delete = Button(text="Cancel")
cancel_delete.bind(on_press= lambda xx: delete_popup.dismiss())
delete_grid.add_widget(cancel_delete)
# bind the opening of the popup to the button
delete_all_items.bind(on_press= lambda x: delete_popup.open())
# add the delete button to the settings grid
settings_grid.add_widget(delete_all_items)
# <<<<<<<<<<<<<<<< Add Store Button >>>>>>>>>>>>>>>>>>
# add an add store button
add_store = Button(text='Add Store') # launches the popup
global add_store_popup
add_store_popup = Popup(title='Add Store (REQUIRES RELAUNCH TO REFLECT CHANGES)', size_hint=(None, None), size=(400, 400)) # is the popup
add_store_grid = GridLayout(cols=1, padding=[.2, .2, .2, .2]) # grid layout for popup
add_store_popup.add_widget(add_store_grid)
add_store_text = TextInput(hint_text='Type Store Name Here', multiline=False) # text input for stores
add_store_text.bind(on_text= lambda asct: MainScreen.add_store_func(self, 0))
add_store_grid.add_widget(add_store_text)
add_store_button = Button(text='Add Store')
add_store_button.bind(on_press= lambda ast: MainScreen.add_store_func(self, add_store_text.text, 1))
add_store_grid.add_widget(add_store_button)
add_store_cancel = Button(text="Cancel") # cancel button for popup
add_store_cancel.bind(on_press= lambda asc: add_store_popup.dismiss())
add_store_grid.add_widget(add_store_cancel)
# add_store_grid.add_widget(add_store_cancel)
add_store.bind(on_press= lambda asb: add_store_popup.open())
settings_grid.add_widget(add_store)
# <<<<<<<<<<<<<<<< Add Departments >>>>>>>>>>>>>>>>>>
add_department = Button(text='Add Department') # define button in settings grid
global add_department_popup
add_department_popup = Popup(title='Add Department (REQUIRES RELAUNCH TO REFLECT CHANGES)', size_hint=(None, None), size=(400, 400)) # define popup from add_department
add_department_grid = GridLayout(cols=1, padding=[.2, .2, .2, .2]) # define grid to add to the popup
add_department_popup.add_widget(add_department_grid) # add grid to popup
add_department_text = TextInput(hint_text='Type Department Name Here', multiline=False) # text entry box for the grid
add_department_text.bind(on_text= lambda adt: MainScreen.add_department_func(self, 0)) # bind the function to the text entry
add_department_grid.add_widget(add_department_text) # add text entry to the grid
add_department_button = Button(text='Add Department') # button to add text to db, will be added to grid
add_department_button.bind(on_press= lambda adb: MainScreen.add_department_func(self, add_department_text.text, 1)) # binds function to button
add_department_grid.add_widget(add_department_button)
add_department_cancel = Button(text='Cancel')
add_department_cancel.bind(on_press= lambda adc: add_department_popup.dismiss())
add_department_grid.add_widget(add_department_cancel)
add_department.bind(on_press= lambda add: add_department_popup.open())
settings_grid.add_widget(add_department)
# <<<<<<<<<< Change Store Spinner >>>>>>>>>>>>>>
change_store = Button(text='Select Store')
global change_store_popup
change_store_popup = Popup(title='Select Store', size_hint=(None, None), size=(400,200))
change_store_spinner = Spinner(text='Store',size=(100, 50), values=MainScreen.pop_current_store_spinner(self))
change_store_spinner.bind(text=lambda change_store_spinner, css: MainScreen.change_store_spinner_func(self,change_store_spinner.text))
change_store_popup.add_widget(change_store_spinner)
change_store.bind(on_press= lambda csb: change_store_popup.open())
settings_grid.add_widget(change_store)
# populate recently added items on second screen
# AddItems.recent_added_list(add_items)
@classmethod
def pop_current_store_spinner(cls, self, **kwargs):
stores_list_q = conn.execute('SELECT store_name FROM stores;')
stores_list_q = stores_list_q.fetchall()
global stores_list
stores_list = []
for store in stores_list_q:
stores_list.append(store[0])
return stores_list
@classmethod
def change_store_spinner_func(cls, self, store, **kwargs):
global current_store
current_store = store
print(current_store)
change_store_popup.dismiss()
MainScreen.refresh_main_screen(self)
# return self
@classmethod
def delete_all_items_func(cls, self, **kwargs):
conn.execute('DELETE FROM items;')
conn.commit()
print('Items table content deleted')
delete_popup.dismiss()
MainScreen.refresh_main_screen(self)
# return True
@classmethod
def add_store_func(cls, self, text, execute, **kwargs):
global new_store_name
new_store_name = text
if execute == 1:
conn.execute(f'INSERT INTO stores (store_name) VALUES ("{new_store_name}"); ')
conn.commit()
print("Added store to database")
add_store_popup.dismiss()
execute = 0
MainScreen.refresh_main_screen(self)
@classmethod
def add_department_func(cls, self, text, execute, **kwargs):
global new_department_name
new_department_name = text
if execute == 1:
conn.execute(f'INSERT INTO departments (department_name) VALUES ("{new_department_name}"); ')
conn.commit()
print("Added department to database")
add_store_popup.dismiss()
execute = 0
MainScreen.refresh_main_screen(self)
@classmethod
def check_toggle_state(cls, state, id, **kwargs):
# print('toggle value changes')
if state == True:
# conn.execute(f'UPDATE items SET collected = True WHERE id = {id}')
# conn.commit()
# print(conn.total_changes)
state = 'down'
else:
state = 'normal'
return state
@classmethod
def change_toggle_state(cls, id, **kwargs):
id = toggles_val_list.index(id)
id = toggles_key_list[id]
state = conn.execute(f'SELECT collected FROM items WHERE id = {id}')
state = state.fetchall()
state = state[0]
if state[0] == 0:
conn.execute(f'UPDATE items SET collected = 1 WHERE id = {id}')
conn.commit()
print(conn.total_changes)
state = 'normal'
elif state[0] == 1:
conn.execute(f'UPDATE items SET collected = 0 WHERE id = {id}')
conn.commit()
print(conn.total_changes)
state = 'down'
return state[0]
@classmethod
def remove_collected_items(cls):
query = conn.execute('DELETE FROM items WHERE collected = 1;')
conn.commit()
print('deleted collected entries')
# now delete the accordion items that are currently present so we can rebild them
MainScreen.refresh_main_screen(main_screen)
@classmethod
def refresh_main_screen(cls, self):
for accordion in self.shopping_list.children[0:]:
self.shopping_list.remove_widget(accordion)
MainScreen.build_accordions(self) # commenting out to see if the issue is in the destruction
'''def test_func(self):
print('test func ran')'''
class AddItems(Screen):
# object properties not yet tested
unit = ObjectProperty(None)
store: ObjectProperty(None)
department: ObjectProperty(None)
item = ObjectProperty(None)
quantity = ObjectProperty(None)
isle = ObjectProperty(None)
recently_added = ObjectProperty(None)
# main_screen = MainScreen()
def __init__(self, **kwargs):
super(AddItems, self).__init__(**kwargs)
AddItems.recent_added_list(self)
def add_new_item(self, value):
# creating a new object
global new_item
new_item = Item(value)
return new_item
def update_item_value(self, field, text):
try:
if field == 'Quantity':
new_item.quantity = text
print(new_item.quantity)
elif field == 'Unit':
unit_id = units.index(text)
new_item.quantity_unit = unit_id
elif field == 'Department':
department_id = departments.index(text)
new_item.department = department_id
elif field == 'Isle':
new_item.isle = text
elif field == 'Store':
store_id = stores.index(text)
new_item.store = store_id
except NameError:
print("Provide an Item name before entering other characteristics.")
def write_to_db(self, **kwargs):
try:
if (self.department.text == 'Departments') | (self.unit.text == 'Units') | (self.store.text == 'Stores') | (self.item.text == '') | (self.quantity.text == ''):
print('Please ensure that all fields are completed prior to adding the item.')
else:
conn.execute(new_item.add_item())
conn.commit()
print('Added Item to DB')
self.clear_inputs()
except (NameError, sqlite3.OperationalError) as e:
print("Please ensure that all fields are completed prior to adding the item.")
MainScreen.refresh_main_screen(main_screen)
AddItems.destroy_recent_added_list(self)
AddItems.recent_added_list(self)
def get_units(self, **kwargs):
global units
units = ["Units"]
cursor = conn.execute("SELECT unit_name FROM units;")
for unit in cursor:
units.append(unit[0])
return units
def unit_spinner(self, text, **kwargs):
try:
new_item.quantity_unit = text
except NameError:
print("Need to provide an item name first!")
def pop_department_spinner(self, **kwargs):
global departments
departments = ["Departments"]
cursor = conn.execute("SELECT department_name FROM departments;")
for department in cursor:
departments.append(department[0])
return departments
def department_spinner(self, text, **kwargs):
print(text)
def pop_store_spinner(self, **kwargs):
global stores
stores = ["Stores"]
cursor = conn.execute("SELECT store_name FROM stores;")
for store in cursor:
stores.append(store[0])
return stores
def store_spinner(self, text, **kwargs):
print(text)
def clear_inputs(self):
self.item.text = ''
self.quantity.text = ''
self.isle.text = ''
self.unit.text = "Units"
self.department.text = "Departments"
self.store.text = "Stores"
@classmethod
def recent_added_list(cls, self):
# first, add the necessary labels
self.recently_added.add_widget(Label(text='Item'))
self.recently_added.add_widget(Label(text='Amount'))
self.recently_added.add_widget(Label(text='Unit'))
self.recently_added.add_widget(Label(text='Department'))
self.recently_added.add_widget(Label(text='Isle'))
self.recently_added.add_widget(Label(text='Store'))
self.recently_added.add_widget(Label(text='DateTime Added'))
# then, assemble dicts for the foreign keys in the db
# assemble a dict of unit_id, department_id, and store_id
# unit_id dict; hopefully this won't be an issue w/ the MainScreen class
unit_dict_q = conn.execute('SELECT unit_id, unit_name FROM units;')
unit_dict_q = unit_dict_q.fetchall()
unit_dict = dict()
for unit in unit_dict_q:
unit_dict[unit[0]] = unit[1]
# department_id dict
department_dict_q = conn.execute('SELECT department_id, department_name FROM departments;')
department_dict_q = department_dict_q.fetchall()
department_dict = dict()
for department in department_dict_q:
department_dict[department[0]] = department[1]
# store_id dict
store_dict_q = conn.execute('SELECT store_id, store_name FROM stores;')
store_dict_q = store_dict_q.fetchall()
store_dict = dict()
for store in store_dict_q:
store_dict[store[0]] = store[1]
# assemble dict of info for recently added items
recent_items_q = conn.execute('SELECT name, quantity, unit_id, department_id, isle, store_id, time_created FROM items;')
recent_items_q = recent_items_q.fetchall()
recent_items = dict()
# this next part is to get all of our times into our dict, so we can more easily stop the loop that builds the recently added table
recent_items['datetime'] = []
for time in recent_items_q:
recent_items['datetime'].append(time[6])
recent_items['datetime'].sort(reverse=True)
count = 0
for time in recent_items['datetime']:
for item in recent_items_q:
if item[6] == time:
recent_items[item[0]] = dict()
recent_items[item[0]]['name'] = item[0]
recent_items[item[0]]['amt'] = item[1]
recent_items[item[0]]['unit'] = unit_dict[int(item[2])]
recent_items[item[0]]['department'] = department_dict[int(item[3])]
recent_items[item[0]]['isle'] = item[4]
recent_items[item[0]]['store'] = store_dict[int(item[5])]
recent_items[item[0]]['datetime added'] = item[6]
count += 1
if count > 5:
break
# now, we build the actual displayed table
for k, v in recent_items.items():
if isinstance(v, list):
continue
self.recently_added.add_widget(Label(text=str(recent_items[k]['name']), text_size=(100, None))) # name
self.recently_added.add_widget(Label(text=str(recent_items[k]['amt']))) # amt
self.recently_added.add_widget(Label(text=str(recent_items[k]['unit']))) # unit
self.recently_added.add_widget(Label(text=str(recent_items[k]['department']))) # department
self.recently_added.add_widget(Label(text=str(recent_items[k]['isle']))) # isle
self.recently_added.add_widget(Label(text=str(recent_items[k]['store']))) # store
self.recently_added.add_widget(Label(text=str(recent_items[k]['datetime added']), text_size=(100, None)))
# will be removed later
# assemble the items table as a pd df
'''recent_added_df = pd.read_sql('SELECT name, quantity, unit_id, department_id, isle, store_id, time_created FROM items;', conn)
recent_added_df = recent_added_df.sort_values('time_created', ascending=False)
# go thru the first five items, put them in the grid layout of the add item screen
count = 0
for item in recent_added_df.itertuples():
self.recently_added.add_widget(Label(text=str(item[1]), text_size=(self.width, None))) # name
self.recently_added.add_widget(Label(text=str(item[2]))) # amt
self.recently_added.add_widget(Label(text=str(unit_dict[int(item[3])]))) # unit
self.recently_added.add_widget(Label(text=str(department_dict[item[4]]))) # department
self.recently_added.add_widget(Label(text=str(item[5]))) # isle
self.recently_added.add_widget(Label(text=str(store_dict[int(item[6])]))) # store
self.recently_added.add_widget(Label(text=str(item[7]), text_size=(self.width, None)))
count += 1
if count > 5:
break'''
@classmethod
def destroy_recent_added_list(cls, self):
for label in self.recently_added.children[0:]:
self.recently_added.remove_widget(label)
class MainApp(App):
def build(self):
# return main_layout
global main_screen
global add_items
main_screen = MainScreen(name='MainScreen')
add_items = AddItems(name='AddItems')
sm = ScreenManager()
sm.add_widget(main_screen)
sm.add_widget(add_items)
return sm
# code to add item should be like
# conn.execute(bananas.add_item())
# runTouchApp(MainApp)
if __name__ == '__main__':
app = MainApp()
app.run()
#runTouchApp()
conn.close()
``` |
{
"source": "jmacdonald2010/stock-prediction",
"score": 3
} |
#### File: jmacdonald2010/stock-prediction/reddit_calls.py
```python
import praw
import reddit_auth
import re
def get_new_symbols():
# initialize the API things
# the reddit_auth file is not on git, is used to contain API keys, etc.
reddit = praw.Reddit(
client_id=reddit_auth.client_id,
client_secret=reddit_auth.client_secret,
user_agent = reddit_auth.user_agent
)
popular_stocks = []
# add to the list below as needed
not_stocks = ['NSFW', 'OP', 'LOL', 'OMG', 'FOMO', 'DD']
for post in reddit.subreddit("RobinhoodPennyStocks").hot(limit=20):
comments = post.comments
for top_level_comment in post.comments:
if isinstance(top_level_comment, praw.models.MoreComments):
continue
# print(top_level_comment.body)
words = top_level_comment.body.split()
for word in words:
if len(word) == 1:
continue
if word.isupper():
word = re.sub('[^a-zA-Z]+', '', word)
if word in not_stocks:
continue
popular_stocks.append(word)
elif "$" in word:
# word = word.replace("$", "") # This is going to need a regex to parse these out effectively
word = re.sub('[^a-zA-Z]+', '', word)
if word in not_stocks:
continue
if word.isupper():
popular_stocks.append(word)
# make a dict of the number of times certain securities are mentioned
stock_mention_count = {}
for stock in popular_stocks:
if stock not in stock_mention_count:
stock_mention_count[stock] = 1
elif stock_mention_count[stock] >= 1:
stock_mention_count[stock] += 1
return stock_mention_count
```
#### File: jmacdonald2010/stock-prediction/send_email.py
```python
import smtplib, ssl
import config
import datetime
def send_email(watchlist_stats, reddit_stats, other_stats):
def dict_to_string(stats):
text = ''
for k,v in stats.items():
if isinstance(v, dict):
symbol_placed = False
for key, value in v.items():
if symbol_placed is False:
text = text + k + ': ' + '\n' + key + ': ' + str(value) + '\n'
symbol_placed = True
else:
text = text + key + ': ' + str(value) + '\n'
text = text + '\n'
else:
try:
text = text + key + ': ' + str(value) + '\n'
except:
text = text + 'Error occured on a symbol here.' + '\n'
text = text + '\n'
return text
port = 465
current_date = datetime.datetime.now().strftime('%Y-%m-%d')
# create secure SSL context
context = ssl.create_default_context()
# I know I'll need to update the stats variables, I just need to see what needs to be done first.
message = f"""\
Subject: Stock Predictions, {current_date}
Good Evening,
Here is some information for the symbols on your watchlist:
{dict_to_string(watchlist_stats)}
People on Reddit are talking about these symbols:
{dict_to_string(reddit_stats)}
The following symbols have large projected growth, and may be worth looking into:
{dict_to_string(other_stats)}
And of course, this does not constitute financial/investment advice."""
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(config.app_email, config.app_email_pw)
server.sendmail(config.app_email, config.email_recipient, message)
'''# testing func
test_dict_01 = {
'ONE': {
'mse': 0.09,
'percent_up': 30
},
'TWO': {
'mse': 0.78,
'percent_up': 45
}
}
test_dict_02 = test_dict_01
test_dict_03 = test_dict_01
send_email(test_dict_01, test_dict_02, test_dict_03)'''
def test_loop_email(training_status):
port = 465
current_date = datetime.datetime.now().strftime('%Y-%m-%d')
# create secure SSL context
context = ssl.create_default_context()
# I know I'll need to update the stats variables, I just need to see what needs to be done first.
if training_status == 'Success':
message = f"""Training of model successful, moving onto to next configuration."""
elif training_status == 'Fail':
message = 'Training of model failed, training loop script crashed.'
elif training_status == 'Complete':
message = 'Completed training of all models. Hooray!'
else:
message = 'Other Unknown error occured.'
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(config.app_email, config.app_email_pw)
server.sendmail(config.app_email, config.email_recipient, message)
# test to see if working
# test_loop_email('Success')
```
#### File: jmacdonald2010/stock-prediction/stock.py
```python
import sqlite3
import datetime
import time
class Stock:
def __init__(self, symbol, short_name=None, full_name=None, sector=None, industry=None, sector_id=None, industry_id=None):
self.symbol = symbol
# Everything else below here does not need to be defined upon creation
self.short_name = short_name
self.full_name = full_name
self.sector = sector
self.industry = industry
self.sector_id = sector_id
self.industry_id = industry_id
def check_industry(self, industry_name, conn):
'''This method takes the industry given as an input and checks to see if it exists in the database. If it does not already exist in the database, it adds it to the database.'''
industries = conn.execute('SELECT industry FROM industry')
industries = industries.fetchall()
# loop thru all of the different industries in the industry table
# if the industry is found, we exit the while loop w/o adding it.
# if the industry is not found, we add it to the db
x = False
for i in industries:
if i[0] == industry_name:
x = True
break
if x is False:
conn.execute(f"INSERT INTO industry (industry) VALUES ('{industry_name}')")
conn.commit()
industry_id = conn.execute(f"SELECT industry_id FROM industry WHERE industry = '{industry_name}'")
conn.commit() # attemping to fix db locked error
industry_id = industry_id.fetchall()
self.industry_id = industry_id[0][0] # not quite sure if this is how this should work.
def check_sector(self, sector, conn):
'''This method takes the sector given as an input and checks to see if it exists in the database. If it does not already exist in the database, it adds it to the database.'''
sectors = conn.execute('SELECT sector FROM sector')
sectors = sectors.fetchall()
# loop thru all of the different sectors in the sector table
# if the sector is found, we exit the while loop w/o adding it.
# if the sector is not found, we add it to the db
x = False
for i in sectors:
if i[0] == sectors:
x = True
break
if x is False:
conn.execute(f"INSERT INTO sector (sector) VALUES ('{sector}')")
conn.commit()
sector_id = conn.execute(f"SELECT sector_id FROM sector WHERE sector = '{sector}'")
conn.commit() # see above.
sector_id = sector_id.fetchall()
self.sector_id = sector_id[0][0]
class Price:
def __init__(self, stock, open_price, high_price, low_price, close_price, volume, price_datetime, dividends=None, stock_splits=None):
self.stock = stock # this should be a stock object
self.open_price = open_price
self.high_price = high_price
self.low_price = low_price
self.close_price = close_price
self.volume = volume
self.price_datetime = price_datetime
# optional
self.dividends = dividends
self.stock_splits = stock_splits
def __str__(self):
print("Symbol: ", self.stock.symbol)
print("Open Price: ", self.open_price)
print("High Price: ", self.high_price)
print("Low Price: ", self.low_price)
print("Close Price: ", self.close_price)
print("Volume: ", self.volume)
print("Price Datetime: ", self.price_datetime)
print("Diviends: ", self.dividends)
print("Stock Splits: ", self.stock_splits)
def get_current_datetime():
'''Determines the current Datetime in Year-Month-Day Hour:Min:Sec format and returns it as a string.'''
current_datetime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return current_datetime
``` |
{
"source": "jmacdonald2010/stock-scanner",
"score": 4
} |
#### File: jmacdonald2010/stock-scanner/add_industry_sector.py
```python
from multiprocessing.sharedctypes import Value
from tables import Industry, Sector
from connect import connect_to_session
def add_industry_sector(table, name, session):
"""Adds an industry or sector name to the appropriate table. Returns the object added to the database.
`table` should be specified by the Class Name, Industry or Sector.
`name` should be presented as a string. Names are case-sensitive.
`session` is the session created by the `connect_to_session` function."""
# Connect to DB
# session = connect_to_session()
table = table.lower()
if table == 'industry':
new = Industry(industry_name=name)
elif table == 'sector':
new = Sector(sector_name=name)
else:
raise ValueError("Incorrect table/object name given. Please specify ONLY Industry or Sector.")
session.add(new)
session.commit()
return new
def fetch_industry_sector_data(table, session):
"""Fetches and returns as a dictionary the ids and industry/sector names that exist in the database.
`table` is the table name Industry or Sector.
`session` is the session created by the `connect_to_session` function."""
table = table.lower()
if table == 'industry':
table = Industry
elif table == 'sector':
table = Sector
else:
raise ValueError("Incorrect table/object name given. Please specify ONLY Industry or Sector.")
# keys are the NAMES, and vals are the IDs (since we will have the name to start with, we need to get the id)
data = dict()
for row in session.query(table).all():
if table == Industry:
data[row.industry_name] = row.id
elif table == Sector:
data[row.sector_name] = row.id
else:
ValueError("Somehow you raised this error, so this is probably a developer issue.")
return data
```
#### File: jmacdonald2010/stock-scanner/add_update_stock.py
```python
from multiprocessing.sharedctypes import Value
from connect import connect_to_session
from tables import Stocks
import yfinance as yf
import datetime
from add_industry_sector import add_industry_sector, fetch_industry_sector_data
from sqlalchemy import func
def add_update_stock(symbol, is_held):
"""This function takes a stock symbol as a string, makes a call to yfinance, and gets back the necessary data to add the symbol to the database.
`is_held` must also be specified, to mark the is_held flag in the database True/False."""
session = connect_to_session()
# I imagine it isn't terribly likely that a company will change industry/sectors
# So all we will focus on updating is is_held and datetime_updated
# Start by querying to see if the stock exists in the database
for row in session.query(Stocks).filter(Stocks.symbol == symbol):
# there should only be one entry here, so this should work
if row.symbol == symbol:
# As long as everything is correct, the function should exit here
stock = row
stock.is_held = is_held
stock.datetime_updated = func.now()
session.commit()
return stock
else:
continue
data = yf.Ticker(symbol).info
# Check to see if the existing Industry/Sectors exist in the Db
# Fetch all Industry names
ind_sect_dict = dict()
ind_sect_dict['industry'] = fetch_industry_sector_data('Industry', session)
ind_sect_dict['sector'] = fetch_industry_sector_data('Sector', session)
# Attempting above as a for loop for less copy/paste
ids = dict()
for x in ind_sect_dict.keys():
try:
ids[f'{x}_id'] = ind_sect_dict[x][data[x]]
except KeyError:
new = add_industry_sector(x, data[x], session)
ids[f'{x}_id'] = new.id
# Create an object of the stock
stock = Stocks(
symbol=symbol,
short_name=data['shortName'],
long_name=data['longName'],
# industry_id=data['industry'],
# sector_id=data['sector'],
industry_id=ids['industry_id'],
sector_id=ids['sector_id'],
is_held=is_held,
datetime_updated=func.now())
# Add entry to DB
session.add(stock)
session.commit()
return stock
if __name__ == "__main__":
# This is mostly for testing, or if it needs to be manually run for some reason.
symbol = input("Input stock symbol: ")
is_held = input("Do you hold a position in this security? Y/N:")
if is_held == 'Y':
is_held = True
elif is_held == 'N':
is_held = False
else:
raise ValueError("Invalid input was given. User should input either Y or N")
add_update_stock(symbol, is_held)
```
#### File: jmacdonald2010/stock-scanner/connect.py
```python
from flask import session
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
def connect():
"""Eventually I'll make the arguments to the connection string actual arguments."""
engine = create_engine('postgresql+psycopg2://postgres:[email protected]:5432/')
return engine
def connect_to_session():
"""Much like connect(), someday, this function may have arguments."""
engine = connect()
Session = sessionmaker(bind=engine)
session = Session(expire_on_commit=False)
return session
``` |
{
"source": "jmacera95/ascii-art",
"score": 4
} |
#### File: ascii-art/example/make_art_python2.py
```python
from PIL import Image
ASCII_CHARS = ['#', '?', '%', '.', 'S', '+', '.', '*', ':', ',', '@']
def scale_image(image, new_width=100):
"""Resizes an image preserving the aspect ratio.
"""
(original_width, original_height) = image.size
aspect_ratio = original_height/float(original_width)
new_height = int(aspect_ratio * new_width)
new_image = image.resize((new_width, new_height))
return new_image
def convert_to_grayscale(image):
return image.convert('L')
def map_pixels_to_ascii_chars(image, range_width=25):
"""Maps each pixel to an ascii char based on the range
in which it lies.
0-255 is divided into 11 ranges of 25 pixels each.
"""
pixels_in_image = list(image.getdata())
pixels_to_chars = [ASCII_CHARS[int(pixel_value/range_width)] for pixel_value in
pixels_in_image]
return "".join(pixels_to_chars)
def convert_image_to_ascii(image, new_width=100):
image = scale_image(image)
image = convert_to_grayscale(image)
pixels_to_chars = map_pixels_to_ascii_chars(image)
len_pixels_to_chars = len(pixels_to_chars)
image_ascii = [pixels_to_chars[index: index + new_width] for index in
range(0, len_pixels_to_chars, new_width)]
return "\n".join(image_ascii)
def handle_image_conversion(image_filepath):
image = None
try:
image = Image.open(image_filepath)
except Exception as e:
print(f"Unable to open image file {image_filepath}.".format(image_filepath=image_filepath))
print(e)
return
image_ascii = convert_image_to_ascii(image)
print(image_ascii)
if __name__ == '__main__':
import sys
image_file_path = sys.argv[1]
handle_image_conversion(image_file_path)
```
#### File: ascii-art/gui/main.py
```python
import os
import sys
import tkinter
import tkinter.font as tkfont
import tkinter.scrolledtext as scrolledtext
from tkinter import filedialog as fd
from PIL import Image
from math import ceil
ASCII_CHARS = [ '#', '?', '%', '.', 'S', '+', '.', '*', ':', ',', '@']
ASCII_CHARS_HR = ['-', '_', '+', '<', '>', 'i', '!', 'l', 'I', '?',
'/', '\\', '|', '(', ')', '1', '{', '}', '[', ']',
'r', 'c', 'v', 'u', 'n', 'x', 'z', 'j', 'f', 't',
'L', 'C', 'J', 'U', 'Y', 'X', 'Z', 'O', '0', 'Q',
'o', 'a', 'h', 'k', 'b', 'd', 'p', 'q', 'w', 'm',
'*', 'W', 'M', 'B', '8', '&', '%', '$', '#', '@']
# Modified example code for Creating GUI Application using Tkinter by @Jude_Savio
app = tkinter.Tk()
text_box = scrolledtext.ScrolledText(app,bg='white',fg='#4682B4')
text_box.tag_configure("center",justify="center")
image_path = ''
message = ''
highres = tkinter.IntVar()
def scale_image(image, new_width=100):
"""Resizes an image preserving the aspect ratio.
"""
(original_width, original_height) = image.size
aspect_ratio = original_height/float(original_width)
new_height = int(aspect_ratio * new_width)
new_image = image.resize((new_width, new_height))
return new_image
def convert_to_grayscale(image):
return image.convert('L')
def map_pixels_to_ascii_chars(image):
"""Maps each pixel to an ascii char based on the range
in which it lies.
0-255 is divided into ranges of pixels based on the number of
characters in ASCII_CHARS
"""
if highres.get() == 1:
ascii_chars = ASCII_CHARS_HR
else:
ascii_chars = ASCII_CHARS
# Calculates the ranges of pixels based on the number of characters in ascii_chars
range_width = ceil(255 / len(ascii_chars))
pixels_in_image = list(image.getdata())
pixels_to_chars = [ascii_chars[int(pixel_value/range_width)] for pixel_value in
pixels_in_image]
return "".join(pixels_to_chars)
def convert_image_to_ascii(image, new_width=100):
image = scale_image(image)
image = convert_to_grayscale(image)
pixels_to_chars = map_pixels_to_ascii_chars(image)
len_pixels_to_chars = len(pixels_to_chars)
image_ascii = [pixels_to_chars[index: index + new_width] for index in
range(0, len_pixels_to_chars, new_width)]
return "\n".join(image_ascii)
def handle_image_conversion(image_filepath):
image = None
try:
image = Image.open(image_filepath)
print('Successful')
except Exception as e:
print(f"Unable to open image file {image_filepath}.")
print(e)
return
image_ascii = convert_image_to_ascii(image)
message = image_ascii
print((message))
text_box.insert(tkinter.INSERT,message)
text_box.tag_add("center", "1.0", "end")
def clear_screen():
text_box.delete('1.0', 'end')
def browse_file():
""" For Getting the file path that gets chosen by the user
"""
filename = fd.askopenfilename()
print('Browse File Function - ',filename)
image_path = filename
handle_image_conversion((image_path))
def quit_app():
""" To quit the mainloop of the app
"""
app.destroy()
def print_hr():
print(highres.get())
def create_window():
""" Specifying the Structure and the widgets of the app
"""
app.title("ASCII-QT")
app.minsize(900,400)
app.configure(bg='#003366')
font = tkfont.Font(family="Sans Serif" ,size=20, weight="bold")
bt_font = tkfont.Font(family="Sans Serif" ,size=10, weight="bold")
label = tkinter.Label(app, text="ASCII ART",font=font, bg='#003366',fg='white')
label.pack(pady=10)
text_box.pack(pady=30,expand=True, fill='both')
button_frame = tkinter.Frame(app,bg='#003366')
button_frame.pack(side='bottom',pady=15)
hr_box = tkinter.Checkbutton(button_frame, text='High Res', variable=highres, onvalue=1, offvalue=0, command=print_hr)
choose_button = tkinter.Button(button_frame, text="Choose",width=10,activebackground='white',activeforeground='#4682B4',font=bt_font,command=browse_file)
clear_button = tkinter.Button(button_frame, text="Clear", width=10, activebackground='white',
activeforeground='#4682B4', font=bt_font, command=clear_screen)
cancel_button = tkinter.Button(button_frame,text="Quit",width=10,activebackground='white',activeforeground='#4682B4',font=bt_font,command=quit_app)
hr_box.pack(side='left',padx=25)
choose_button.pack(side = 'left',padx=25)
clear_button.pack(side='left', padx=25)
cancel_button.pack(side = 'left',padx=25)
app.mainloop()
def main():
create_window()
if __name__=='__main__':
main()
```
#### File: ascii_art_server/api/views.py
```python
from django.http import JsonResponse
# Create your views here.
def home(request):
return JsonResponse({'info': 'Ascii Art Server', 'name': 'ZTM'})
```
#### File: ascii-art/tests/test_ascii_art.py
```python
import pytest
from ascii_art import ascii_art
def test_1_colorText():
text = 'test'
def_output = ascii_art.colorText(text)
assert type(def_output) is str
def test_2_is_supported():
path = '../ztm-logo.png'
def_output = ascii_art.is_supported(path)
assert def_output
def test_3_is_supported():
bad_path = '.ztm-logo'
def_output = ascii_art.is_supported(bad_path)
assert not def_output
def test_4_check_file():
path = '../ztm-logo.png'
def_output = ascii_art.check_file(path)
assert def_output is None
def test_5_write_file():
def_output = ascii_art.write_file('test', 'test')
assert def_output
def test_6_write_file():
def_output = ascii_art.write_file('', 'test')
assert not def_output
def test_7_write_file():
with pytest.raises(TypeError):
ascii_art.write_file('')
def test_8_output_name():
assert type(ascii_art.output_name('test')) is str
def test_9_output_name():
with pytest.raises(TypeError):
ascii_art.output_name()
def test_10_all_supported_files():
assert type(ascii_art.all_supported_files()) is list
def test_ascii_chars():
ASCII_CHARS = ['#', '?', '%', '.', 'S', '+', '.', '*', ':', ',', '@']
assert ASCII_CHARS == ascii_art.ASCII_CHARS
def test_color_options():
COLOR_OPTIONS = ['black', 'blue', 'cyan', 'green', 'magenta', 'red', 'white', 'yellow']
assert COLOR_OPTIONS == ascii_art.COLOR_OPTIONS
def test_supported_image_types():
SUPPORTED_IMAGE_TYPES = ('.png', '.jpeg', '.jpg')
assert SUPPORTED_IMAGE_TYPES == ascii_art.SUPPORTED_IMAGE_TYPES
``` |
{
"source": "jmaces/aapm-ct-challenge",
"score": 3
} |
#### File: aapm-ct-challenge/aapm-ct/data_management.py
```python
import gzip
import os
import numpy as np
import torch
from config import DATA_PATH
# ----- data transforms -----
class Permute(object):
""" Permute order of (fbp, sino, target) triples. """
def __init__(self, perm):
self.perm = perm
def __call__(self, inputs):
out = tuple([inputs[k] for k in self.perm])
return out
# ----- datasets -----
class CTDataset(torch.utils.data.Dataset):
""" AAPM Computed Tomography Challenge dataset.
Loads (fbp, sinogram, target) data from a single data batch file.
Parameters
----------
subset : string
One of 'train', 'val', 'test' or valid sub directory path.
Determines the subf directory to search for data files.
batch : int
Number of the data batch to load. One of [1,2,3,4] for `train` subset
(Files are split across four files). Should be 1 for `val` and `test`
subset.
folds : int
Number of folds for data splitting (e.g. for cross-validation)
(Default 10)
num_fold: int or list
Number of the current fold to use. One of [0,...,folds-1]. Use a list
to use multiple folds. (Default 0)
leave_out : bool
Leave the specified folds. Otherwise only these folds are kept
(e.g. set to True for training data and False for
valdiation data). (Default True)
transform : callable
Additional data transforms for pre-processing (Default None)
device : torch.device
Device (e.g. CPU, GPU cuda specifier) to place the data on.
(Default None)
"""
def __init__(
self,
subset,
batch,
folds=10,
num_fold=0,
leave_out=True,
transform=None,
device=None,
):
# choose directory according to subset
if subset == "train":
path = os.path.join(DATA_PATH, "training_data")
elif subset == "val":
path = os.path.join(DATA_PATH, "validation_data")
elif subset == "test":
path = os.path.join(DATA_PATH, "test_data")
else:
path = os.path.join(DATA_PATH, subset)
self.transform = transform
self.device = device
# load data files
self.sinogram = np.load(
gzip.GzipFile(
os.path.join(path, "Sinogram_batch{}.npy.gz".format(batch)),
"r",
)
)
self.fbp = np.load(
gzip.GzipFile(
os.path.join(path, "FBP128_batch{}.npy.gz".format(batch)), "r"
)
)
if not subset == "val" and not subset == "test":
self.phantom = np.load(
gzip.GzipFile(
os.path.join(path, "Phantom_batch{}.npy.gz".format(batch)),
"r",
)
)
else:
self.phantom = 0.0 * self.fbp # no ground truth data exists here
assert self.phantom.shape[0] == self.sinogram.shape[0]
assert self.phantom.shape[0] == self.fbp.shape[0]
# split dataset for cross validation
fold_len = self.phantom.shape[0] // folds
if not isinstance(num_fold, list):
num_fold = [num_fold]
p_list, s_list, f_list = [], [], []
for cur_fold in range(folds):
il = cur_fold * fold_len
ir = il + fold_len
if leave_out ^ (cur_fold in num_fold):
p_list.append(self.phantom[il:ir])
s_list.append(self.sinogram[il:ir])
f_list.append(self.fbp[il:ir])
self.phantom = np.concatenate(p_list, axis=0)
self.sinogram = np.concatenate(s_list, axis=0)
self.fbp = np.concatenate(f_list, axis=0)
# transform numpy to torch tensor
self.phantom = torch.tensor(self.phantom, dtype=torch.float)
self.sinogram = torch.tensor(self.sinogram, dtype=torch.float)
self.fbp = torch.tensor(self.fbp, dtype=torch.float)
def __len__(self):
return self.phantom.shape[0]
def __getitem__(self, idx):
# add channel dimension
out = (
self.fbp[idx, ...].unsqueeze(0),
self.sinogram[idx, ...].unsqueeze(0),
self.phantom[idx, ...].unsqueeze(0),
)
# move to device and apply transformations
if self.device is not None:
out = tuple([x.to(self.device) for x in out])
if self.transform is not None:
out = self.transform(out)
return out
def load_ct_data(subset, num_batches=4, **kwargs):
""" Concatenates individual CTDatasets from four files.
Parameters
----------
subset : string
one of 'train', 'val', or 'test' or valid sub directory path.
**kwargs : dictionary
additional keyword arguments passed on to the CTDatasets.
Returns
-------
Combined dataset from multiple data batch files.
"""
if not subset == "val" and not subset == "test":
num_batches = min(num_batches, 4)
else:
num_batches = 1
return torch.utils.data.ConcatDataset(
[
CTDataset(subset, batch, **kwargs)
for batch in range(1, num_batches + 1)
]
)
# ---- run data exploration -----
if __name__ == "__main__":
# validate data set and print some simple statistics
tdata = load_ct_data("train", folds=10, num_fold=[0, 9], leave_out=True)
vdata = load_ct_data("train", folds=10, num_fold=[0, 9], leave_out=False)
print(len(tdata))
print(len(vdata))
y, z, x = tdata[0]
print(y.shape, z.shape, x.shape)
print(y.min(), z.min(), x.min())
print(y.max(), z.max(), x.max())
```
#### File: aapm-ct-challenge/aapm-ct/script_evalute_operator.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from data_management import load_ct_data
from networks import RadonNet
from operators import l2_error
# ----- load configuration -----
import config # isort:skip
# ----- global configuration -----
device = torch.device("cuda:0")
torch.cuda.set_device(0)
# define operator
d = torch.load(
os.path.join(
config.RESULTS_PATH,
"operator_radon_bwd_train_phase_1",
"model_weights.pt",
),
map_location=device,
)
radon_net = RadonNet.new_from_state_dict(d)
# overwrite default settings (if necessary)
radon_net.OpR.flat = True
radon_net.OpR.filter_type = "hamming"
operator = radon_net.OpR.to(device)
print(list(operator.parameters()))
radon_net.freeze()
# ----- data configuration -----
# always use same folds, num_fold for noth train and val
# always use leave_out=True on train and leave_out=False on val data
val_data_params = {
"folds": 32,
"num_fold": 0,
"leave_out": False,
}
val_data = load_ct_data("train", **val_data_params)
data_load_val = torch.utils.data.DataLoader(val_data, 1)
# ----- iterate over val data -----
chall_loss = 0
our_loss = 0
chall_relE = 0
our_relE = 0
sino_relE = 0
with torch.no_grad():
for i, v_batch in reversed(list(enumerate(data_load_val))):
# get items
our_fbp = operator.inv(v_batch[1].to(device))
chall_fbp = v_batch[0].to(device)
gt = v_batch[2].to(device)
sino = v_batch[1].to(device)
# calc measures
our_loss += l2_error(our_fbp, gt, relative=False, squared=False)[
0
].item() / np.sqrt(gt.shape[-1] * gt.shape[-2])
chall_loss += l2_error(chall_fbp, gt, relative=False, squared=False)[
0
].item() / np.sqrt(gt.shape[-1] * gt.shape[-2])
chall_relE += l2_error(chall_fbp, gt, relative=True, squared=False)[
0
].item()
our_relE += l2_error(our_fbp, gt, relative=True, squared=False)[
0
].item()
sino_relE += l2_error(
operator.dot(gt), sino, relative=True, squared=False
)[0].item()
our_loss = our_loss / val_data.__len__()
chall_loss = chall_loss / val_data.__len__()
chall_relE = chall_relE / val_data.__len__()
our_relE = our_relE / val_data.__len__()
sino_relE = sino_relE / val_data.__len__()
# ----- plotting -----
fig, subs = plt.subplots(2, 5, clear=True, num=1, figsize=(30, 15))
def _implot(sub, im, vmin=0, vmax=0.3, aspect=1.0):
if im.shape[-3] == 2: # complex image
p = sub.imshow(
torch.sqrt(im.pow(2).sum(-3))[0, :, :].detach().cpu(),
vmin=vmin,
vmax=vmax,
aspect=aspect,
)
else: # real image
p = sub.imshow(
im[0, 0, :, :].detach().cpu(), vmin=vmin, vmax=vmax, aspect=aspect
)
return p
# gt
p02 = _implot(subs[0, 2], gt)
subs[0, 2].set_title("gt")
plt.colorbar(p02, ax=subs[0, 2])
vmin, vmax = p02.get_clim()
# gt zoom
p12 = _implot(subs[1, 2], gt[..., 300:412, 300:412], vmin=vmin, vmax=vmax)
subs[1, 2].set_title("gt zoom")
plt.colorbar(p12, ax=subs[1, 2])
# our_fbp
p00 = _implot(subs[0, 0], our_fbp, vmin=vmin, vmax=vmax)
subs[0, 0].set_title("our fbp: loss = \n " "{:1.2e}".format(our_loss.item()))
plt.colorbar(p00, ax=subs[0, 0])
# chall_fbp
p10 = _implot(subs[1, 0], chall_fbp, vmin=vmin, vmax=vmax)
subs[1, 0].set_title(
"chall fbp: loss = \n " "{:1.2e}".format(chall_loss.item())
)
plt.colorbar(p10, ax=subs[1, 0])
# our_fbp zoom
p01 = _implot(subs[0, 1], our_fbp[..., 300:412, 300:412], vmin=vmin, vmax=vmax)
subs[0, 1].set_title("our fbp: relE = \n " "{:1.2e}".format(our_relE))
plt.colorbar(p01, ax=subs[0, 1])
# chall_fbp zoom
p11 = _implot(
subs[1, 1], chall_fbp[..., 300:412, 300:412], vmin=vmin, vmax=vmax
)
subs[1, 1].set_title("chall fbp: relE = \n " "{:1.2e}".format(chall_relE))
plt.colorbar(p11, ax=subs[1, 1])
# our_fbp difference plot
p03 = _implot(subs[0, 3], (our_fbp - gt), vmin=-0.035, vmax=0.035)
subs[0, 3].set_title("our fbp difference")
plt.colorbar(p03, ax=subs[0, 3])
# chall_fbp difference plot
p04 = _implot(subs[0, 4], (chall_fbp - gt), vmin=-0.035, vmax=0.035)
subs[0, 4].set_title("chall fbp difference")
plt.colorbar(p04, ax=subs[0, 4])
# dc check
p13 = _implot(
subs[1, 3],
operator.inv(sino - operator.dot(gt)),
vmin=-0.0015,
vmax=0.0015,
)
subs[1, 3].set_title("dc check")
plt.colorbar(p13, ax=subs[1, 3])
# sino difference plot
p14 = _implot(
subs[1, 4],
(sino - operator.dot(gt)),
vmin=-0.0035,
vmax=0.0035,
aspect="auto",
)
subs[1, 4].set_title("sino rel err: = \n " "{:1.2e}".format(sino_relE))
plt.colorbar(p14, ax=subs[1, 4])
plt.show()
``` |
{
"source": "jmaces/fw-rde",
"score": 2
} |
#### File: fw-rde/stl10/models.py
```python
import numpy as np
from tensorflow.keras import layers, models
from kerasadf import layers as adflayers
# GLOBAL DEFAULT PARAMETERS
MODELPATH = 'stl10-vgg16-avgpool-weights.hdf5'
INPUT_SHAPE = (224, 224, 3) # channels last, 3 colors 224 x 224
# STANDARD TF-KERAS MODELS
def load_model(path=MODELPATH, softmax=False):
# input layer
inputs = layers.Input(INPUT_SHAPE)
# first convolution and pooling block
conv1a = layers.Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(inputs)
conv1b = layers.Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv1a)
pool1 = layers.AveragePooling2D(2)(conv1b)
# second convolution and pooling block
conv2a = layers.Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool1)
conv2b = layers.Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv2a)
pool2 = layers.AveragePooling2D(2)(conv2b)
# third convolution and pooling block
conv3a = layers.Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool2)
conv3b = layers.Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv3a)
conv3c = layers.Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv3b)
pool3 = layers.AveragePooling2D(2)(conv3c)
# fourth convolution and pooling block
conv4a = layers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool3)
conv4b = layers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv4a)
conv4c = layers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv4b)
pool4 = layers.AveragePooling2D(2)(conv4c)
# fifth convolution and pooling block
conv5a = layers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(pool4)
conv5b = layers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv5a)
conv5c = layers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal')(conv5b)
pool5 = layers.AveragePooling2D(2)(conv5c)
# dense and output block
flat = layers.Flatten()(pool5)
dense1 = layers.Dense(4096, activation='relu',
kernel_initializer='glorot_normal')(flat)
dense2 = layers.Dense(4096, activation='relu',
kernel_initializer='glorot_normal')(dense1)
if softmax:
activation = 'softmax'
else:
activation = None
dense3 = layers.Dense(10, activation=activation,
kernel_initializer='glorot_normal')(dense2)
# build model
model = models.Model(inputs, dense3)
model.load_weights(MODELPATH)
return model
# ADF TF-KERAS MODELS
def load_adfmodel(path=MODELPATH, mode='diag', rank=None):
# input layer
input_mean = layers.Input(INPUT_SHAPE)
if mode == 'diag':
input_var = layers.Input(INPUT_SHAPE)
elif mode == 'half':
if rank is None:
rank = np.prod(INPUT_SHAPE)
input_var = layers.Input([rank]+list(INPUT_SHAPE))
elif mode == 'full':
input_var = layers.Input(list(INPUT_SHAPE)*2)
# first convolution and pooling block
conv1a = adflayers.Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(
[input_mean, input_var]
)
conv1b = adflayers.Conv2D(64, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(conv1a)
pool1 = adflayers.AveragePooling2D(2, mode=mode)(conv1b)
# second convolution and pooling block
conv2a = adflayers.Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(pool1)
conv2b = adflayers.Conv2D(128, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(conv2a)
pool2 = adflayers.AveragePooling2D(2, mode=mode)(conv2b)
# third convolution and pooling block
conv3a = adflayers.Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(pool2)
conv3b = adflayers.Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(conv3a)
conv3c = adflayers.Conv2D(256, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(conv3b)
pool3 = adflayers.AveragePooling2D(2, mode=mode)(conv3c)
# fourth convolution and pooling block
conv4a = adflayers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(pool3)
conv4b = adflayers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(conv4a)
conv4c = adflayers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(conv4b)
pool4 = adflayers.AveragePooling2D(2, mode=mode)(conv4c)
# fifth convolution and pooling block
conv5a = adflayers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(pool4)
conv5b = adflayers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(conv5a)
conv5c = adflayers.Conv2D(512, 3, activation='relu', padding='same',
kernel_initializer='he_normal', mode=mode)(conv5b)
pool5 = adflayers.AveragePooling2D(2, mode=mode)(conv5c)
# dense and output block
flat = adflayers.Flatten(mode=mode)(pool5)
dense1 = adflayers.Dense(4096, activation='relu',
kernel_initializer='glorot_normal', mode=mode)(flat)
dense2 = adflayers.Dense(4096, activation='relu',
kernel_initializer='glorot_normal', mode=mode)(dense1)
dense3 = adflayers.Dense(10, kernel_initializer='glorot_normal', mode=mode)(dense2)
# build model
model = models.Model([input_mean, input_var], dense3)
model.load_weights(MODELPATH)
return model
``` |
{
"source": "jmaces/keras-adf",
"score": 3
} |
#### File: keras-adf/tests/strategies.py
```python
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
from hypothesis import assume
# constants for various tests
COVARIANCE_MODES = ["diag", "half", "full"]
# array comparison helpers robust to precision loss
def assert_eq(x, y, atol=None, rtol=1e-7):
"""Robustly and symmetrically assert x == y componentwise. """
if atol is None:
atol = max(np.finfo(x.dtype).eps, np.finfo(y.dtype).eps)
tol = atol + rtol * np.maximum(np.abs(x), np.abs(y), dtype=np.float64)
np.testing.assert_array_less(np.abs(x - y), tol)
def assert_leq(x, y, atol=None, rtol=1e-7):
"""Robustly assert x <= y componentwise. """
if atol is None:
atol = max(np.finfo(x.dtype).eps, np.finfo(y.dtype).eps)
mask = np.greater(x, y)
np.testing.assert_allclose(x[mask], y[mask], atol=atol, rtol=rtol)
# data generation strategies
def clean_floats(min_value=-1e15, max_value=1e15, width=32):
"""Custom floating point number strategy.
Working with very large or very small floats leads to over-/underflow
problems. To avoid this we assume ``reasonable`` numbers for our tests.
We exclude NaN, infinity, and negative infinity.
The following ranges are recommended, so that squares (e.g. for variances)
stay within the data type limits:
-1e30 to +1e30 for 64-bit floats.
-1e15 to +1e15 for 32-bit floats. (default)
-200 to +200 for 16-bit floats.
If your code really runs into floats outside this range probably something
is wrong somewhere else.
"""
if width == 64:
min_value, max_value = np.clip(
(min_value, max_value), -1e30, 1e30
).astype(np.float64)
elif width == 32:
min_value, max_value = np.clip(
(min_value, max_value), -1e15, 1e15
).astype(np.float32)
elif width == 16:
min_value, max_value = np.clip(
(min_value, max_value), -150, 150
).astype(np.float16)
else:
raise ValueError(
"Invalid width parameted, expected 16, 32, or 64"
"but got {}.".format(width)
)
return st.floats(
allow_nan=False,
allow_infinity=False,
min_value=min_value,
max_value=max_value,
width=width,
)
@st.composite
def batched_float_array(
draw,
min_batch_size=None,
max_batch_size=None,
min_data_dims=1,
max_data_dims=None,
min_data_size=None,
max_data_size=None,
):
"""Float array strategy for different covariance modes.
Generates tuples of batched mean and covariance arrays of shapes consistent
for one of the allowed convariance modes {"diag", "half", "full"}.
Shapes are arbitrary with at least two dimensions (first for batch_size,
the remaining for the true data dimensions).
Content can be any floating point data type.
A minimum and maximum size for the batch dimension, the number of data
dimensions and product of data dimensions can be specified respectively.
Yields tuples (means, covariacnes, mode).
"""
mode = draw(st.sampled_from(COVARIANCE_MODES))
dtype = draw(hnp.floating_dtypes())
bytes = dtype.itemsize
bits = 8 * bytes
if max_data_dims is not None:
shape = draw(
hnp.array_shapes(
min_dims=1 + min_data_dims, max_dims=1 + max_data_dims
)
)
else:
shape = draw(hnp.array_shapes(min_dims=1 + min_data_dims))
if min_batch_size is not None:
assume(shape[0] >= min_batch_size)
if max_batch_size is not None:
assume(shape[0] <= max_batch_size)
if min_data_size is not None:
assume(np.prod(shape[1:]) >= min_data_size)
if max_data_size is not None:
assume(np.prod(shape[1:]) <= max_data_size)
means_ar = hnp.arrays(
dtype,
shape,
elements=clean_floats(width=bits),
fill=clean_floats(width=bits),
)
if mode == "diag":
covariances_ar = hnp.arrays(
dtype,
shape,
elements=clean_floats(width=bits, min_value=0.0),
fill=clean_floats(width=bits, min_value=0.0),
)
elif mode == "half":
rank = draw(st.integers(min_value=1, max_value=np.prod(shape[1:])))
covariances_ar = hnp.arrays(
dtype,
(shape[0], rank) + shape[1:],
elements=clean_floats(width=bits),
fill=clean_floats(width=bits),
)
elif mode == "full":
def fac_to_cov(L):
shape = L.shape
L = np.reshape(L, [shape[0], np.prod(shape[1:]), 1])
return np.reshape(
np.matmul(L, np.transpose(L, [0, 2, 1])),
(shape[0],) + shape[1:] + shape[1:],
)
covariances_ar = hnp.arrays(
dtype,
shape,
elements=clean_floats(width=bits),
fill=clean_floats(width=bits),
).map(fac_to_cov)
return draw(means_ar), draw(covariances_ar), mode
```
#### File: keras-adf/tests/test_activations.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from hypothesis import given
from tensorflow.keras import backend as K
import kerasadf.activations
import kerasadf.layers
from .strategies import assert_eq, assert_leq, batched_float_array
# constants for various tests
ALL_ACTIVATIONS = ["linear", "relu"]
# serialization test
@pytest.mark.parametrize("name", ALL_ACTIVATIONS)
def test_serialization(name):
K.clear_session()
fn = kerasadf.activations.get(name)
ref_fn = getattr(kerasadf.activations, name)
assert fn == ref_fn
config = kerasadf.activations.serialize(fn)
fn = kerasadf.activations.deserialize(config)
assert fn == ref_fn
@pytest.mark.parametrize("name", ALL_ACTIVATIONS)
def test_serialization_with_layers(name):
K.clear_session()
activation = kerasadf.activations.get(name)
layer_from_name = kerasadf.layers.Dense(3, activation=name)
layer_from_activation = kerasadf.layers.Dense(3, activation=activation)
config_from_name = kerasadf.layers.serialize(layer_from_name)
config_from_activation = kerasadf.layers.serialize(layer_from_activation)
deserialized_layer_from_name = kerasadf.layers.deserialize(
config_from_name
)
deserialized_layer_from_activation = kerasadf.layers.deserialize(
config_from_activation
)
assert (
deserialized_layer_from_name.__class__.__name__
== layer_from_name.__class__.__name__
)
assert (
deserialized_layer_from_activation.__class__.__name__
== layer_from_activation.__class__.__name__
)
assert (
deserialized_layer_from_name.__class__.__name__
== layer_from_activation.__class__.__name__
)
assert (
deserialized_layer_from_activation.__class__.__name__
== layer_from_name.__class__.__name__
)
assert deserialized_layer_from_name.activation == activation
assert deserialized_layer_from_activation.activation == activation
# activation tests
@given(batched_float_array())
def test_linear_eq_np_linear(x):
def _np_linear(means, covariances, mode):
return means, covariances
K.clear_session()
means, covariances, mode = x
means_out, covariances_out = kerasadf.activations.linear(
[means, covariances], mode=mode
)
means_ref, covariances_ref = _np_linear(means, covariances, mode)
assert_eq(means_ref, means_out)
assert_eq(covariances_ref, covariances_out)
@given(batched_float_array())
def test_relu(x):
K.clear_session()
means, covariances, mode = x
means_tensor = K.placeholder(means.shape, dtype=means.dtype)
covariances_tensor = K.placeholder(
covariances.shape, dtype=covariances.dtype
)
f = K.function(
[means_tensor, covariances_tensor],
kerasadf.activations.relu(
[means_tensor, covariances_tensor], mode=mode
),
)
means_out, covariances_out = f([means, covariances])
assert means.shape == means_out.shape
assert covariances.shape == covariances_out.shape
assert means.dtype.name == means_out.dtype.name
assert covariances.dtype.name == covariances_out.dtype.name
assert_leq(np.zeros_like(means_out), means_out)
assert_leq(means, means_out)
if mode == "diag":
variances_out = covariances_out
elif mode == "half":
cov_shape = covariances_out.shape
variances_out = np.reshape(
np.sum(
np.square(
np.reshape(
covariances_out,
(cov_shape[0], cov_shape[1], np.prod(cov_shape[2:])),
)
),
axis=1,
),
means_out.shape,
)
elif mode == "full":
cov_shape = covariances_out.shape
cov_rank = len(cov_shape) - 1
variances_out = np.reshape(
np.diagonal(
np.reshape(
covariances_out,
(
cov_shape[0],
np.prod(cov_shape[1 : cov_rank // 2 + 1]),
np.prod(cov_shape[cov_rank // 2 + 1 :]),
),
),
axis1=-2,
axis2=-1,
),
means_out.shape,
)
assert means_out.shape == variances_out.shape
assert_leq(np.zeros_like(variances_out), variances_out)
```
#### File: keras-adf/tests/test_pooling.py
```python
from __future__ import absolute_import, division, print_function
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given, settings
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
import kerasadf.layers
from .strategies import batched_float_array
# pooling layer tests
@settings(deadline=None)
@pytest.mark.parametrize("padding", ["same", "valid"])
@given(
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
batched_float_array(min_data_dims=2, max_data_dims=2),
)
def test_average_pool_1d(padding, pool_size, strides, x):
K.clear_session()
means, covariances, mode = x
strides = min(strides, means.shape[1])
pool_size = min(pool_size, means.shape[1])
im = Input(shape=means.shape[1:], dtype=means.dtype)
ic = Input(shape=covariances.shape[1:], dtype=covariances.dtype)
layer = kerasadf.layers.AveragePooling1D(
pool_size, strides, padding, mode=mode
)
ms, cs = layer.compute_output_shape([im.shape, ic.shape])
om, oc = layer([im, ic])
model = Model([im, ic], [om, oc])
means_out, covariances_out = model.predict([means, covariances])
if padding == "same":
out_size = np.ceil(means.shape[1] / strides)
elif padding == "valid":
out_size = np.ceil((means.shape[1] - pool_size + 1) / strides)
assert means.shape[0] == means_out.shape[0]
assert out_size == means_out.shape[1]
assert means.shape[2] == means_out.shape[2]
assert ms.as_list() == om.shape.as_list()
if mode == "diag":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size == covariances_out.shape[1]
assert covariances.shape[2] == covariances_out.shape[2]
elif mode == "half":
assert covariances.shape[0] == covariances_out.shape[0]
assert covariances.shape[1] == covariances_out.shape[1]
assert out_size == covariances_out.shape[2]
assert covariances.shape[3] == covariances_out.shape[3]
elif mode == "full":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size == covariances_out.shape[1]
assert covariances.shape[2] == covariances_out.shape[2]
assert out_size == covariances_out.shape[3]
assert covariances.shape[4] == covariances_out.shape[4]
assert cs.as_list() == oc.shape.as_list()
# serialization and deserialization test
config = layer.get_config()
layer_from_config = kerasadf.layers.AveragePooling1D.from_config(config)
layer_deserialized = kerasadf.layers.deserialize(
{"class_name": layer.__class__.__name__, "config": config}
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_from_config
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_deserialized
)
@settings(deadline=None)
@pytest.mark.parametrize("padding", ["same", "valid"])
@given(
st.tuples(
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
)
| st.integers(min_value=1, max_value=8),
st.tuples(
st.integers(min_value=1, max_value=8),
st.integers(min_value=1, max_value=8),
)
| st.integers(min_value=1, max_value=8),
batched_float_array(min_data_dims=3, max_data_dims=3),
)
def test_average_pool_2d(padding, pool_size, strides, x):
K.clear_session()
means, covariances, mode = x
if isinstance(strides, tuple):
strides = np.minimum(strides, means.shape[1:3])
else:
strides = min(strides, min(means.shape[1], means.shape[2]))
if isinstance(pool_size, tuple):
pool_size = np.minimum(pool_size, means.shape[1:3])
else:
pool_size = min(pool_size, min(means.shape[1], means.shape[2]))
im = Input(shape=means.shape[1:], dtype=means.dtype)
ic = Input(shape=covariances.shape[1:], dtype=covariances.dtype)
layer = kerasadf.layers.AveragePooling2D(
pool_size, strides, padding, mode=mode
)
ms, cs = layer.compute_output_shape([im.shape, ic.shape])
om, oc = layer([im, ic])
model = Model([im, ic], [om, oc])
means_out, covariances_out = model.predict([means, covariances])
if padding == "same":
out_size = np.ceil(np.asarray(means.shape[1:3]) / strides)
elif padding == "valid":
out_size = np.ceil(
(np.asarray(means.shape[1:3]) - pool_size + 1) / strides
)
assert means.shape[0] == means_out.shape[0]
assert out_size[0] == means_out.shape[1]
assert out_size[1] == means_out.shape[2]
assert means.shape[3] == means_out.shape[3]
assert ms.as_list() == om.shape.as_list()
if mode == "diag":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size[0] == covariances_out.shape[1]
assert out_size[1] == covariances_out.shape[2]
assert covariances.shape[3] == covariances_out.shape[3]
elif mode == "half":
assert covariances.shape[0] == covariances_out.shape[0]
assert covariances.shape[1] == covariances_out.shape[1]
assert out_size[0] == covariances_out.shape[2]
assert out_size[1] == covariances_out.shape[3]
assert covariances.shape[4] == covariances_out.shape[4]
elif mode == "full":
assert covariances.shape[0] == covariances_out.shape[0]
assert out_size[0] == covariances_out.shape[1]
assert out_size[1] == covariances_out.shape[2]
assert covariances.shape[3] == covariances_out.shape[3]
assert out_size[0] == covariances_out.shape[4]
assert out_size[1] == covariances_out.shape[5]
assert covariances.shape[6] == covariances_out.shape[6]
assert cs.as_list() == oc.shape.as_list()
# serialization and deserialization test
config = layer.get_config()
layer_from_config = kerasadf.layers.AveragePooling2D.from_config(config)
layer_deserialized = kerasadf.layers.deserialize(
{"class_name": layer.__class__.__name__, "config": config}
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_from_config
)
assert kerasadf.layers.serialize(layer) == kerasadf.layers.serialize(
layer_deserialized
)
``` |
{
"source": "jmaces/rde",
"score": 3
} |
#### File: rde/an8flower/mask_evaluation.py
```python
import os
import numpy as np
import instances
import models
# PARAMETERS
INDICES = range(0, 1200, 100) # data samples
def pearson_correlation(x, y):
""" Pearson correlation coefficient of two flattened arrays. """
return np.corrcoef(x.flatten(), y.flatten())[0, 1]
def jaccard_index(x, y):
""" Jaccard index of the binarizations of two arrays. """
# threshold to binary
xh = np.heaviside(x - np.mean(x), 0)
yh = np.heaviside(y - np.mean(y), 0)
# compute intersection and union
xb = xh.astype(np.bool)
yb = yh.astype(np.bool)
intersection = np.logical_and(xb, yb)
union = np.logical_or(xb, yb)
# Jaccard index is the ration intersection over union (IoU)
return intersection.sum() / union.sum()
if __name__ == "__main__":
# load model
model = models.load_model(softmax=True)
# load instances
generator, _ = instances.load_generator(class_mode="categorical")
# collect mappings
all_mappings = set()
for root, dirs, files in os.walk("results"):
all_mappings.update(
[f for f in files if (f.endswith(".npz") and "pixelflip" not in f)]
)
print("Found data for {} mapping methods.".format(len(all_mappings)))
names = []
pearson_means = []
jaccard_means = []
for file in all_mappings:
# init result lists
collected_pearson = [] # for Pearson correlation coefficients
collected_jaccard = [] # for Jaccard indeces (intersection over union)
# iterate data samples
for INDEX in INDICES:
x, y = generator[INDEX][0][0, ...], generator[INDEX][1][0, ...]
xname = os.path.splitext(
os.path.split(generator.filenames[INDEX])[1]
)[0]
# load mapping
path = os.path.join("results", xname, file)
data = np.load(path)
mask_path = os.path.join("results", xname, "mask.npz")
mask_data = np.load(mask_path)
mask = mask_data["mapping"]
mapping = data["mapping"]
assert mask_data["index"] == INDEX
assert data["index"] == INDEX
# compute similarity measures
pearson = pearson_correlation(mapping, mask)
jaccard = jaccard_index(mapping, mask)
# collect results
collected_pearson.append(pearson)
collected_jaccard.append(jaccard)
if "mask" not in file:
names.append(os.path.splitext(file)[0])
pearson_means.append(np.asarray(collected_pearson).mean())
jaccard_means.append(np.asarray(collected_jaccard).mean())
print(os.path.splitext(file)[0])
print(
"\tPearson:\t{:1.3e} +- {:1.3e}".format(
np.asarray(collected_pearson).mean(),
np.asarray(collected_pearson).std(),
)
)
print(
"\tJaccard:\t{:1.3e} +- {:1.3e}".format(
np.asarray(collected_jaccard).mean(),
np.asarray(collected_jaccard).std(),
)
)
# save results
np.savez_compressed(
os.path.join("results", "mask-eval-" + file),
pearson_correlation=np.asarray(collected_pearson),
jaccard_index=np.asarray(collected_jaccard),
name=os.path.splitext(file)[0],
)
```
#### File: rde/stl10/models.py
```python
import numpy as np
from keras import layers as klayers
from keras import models as kmodels
from kerasadf import layers as adflayers
from tensorflow.keras import layers, models
# GLOBAL DEFAULT PARAMETERS
MODELPATH = "stl10-vgg16-avgpool-weights.hdf5"
INPUT_SHAPE = (224, 224, 3) # channels last, 3 colors 224 x 224
# STANDARD TF-KERAS MODELS
def load_model(path=MODELPATH, softmax=False):
# input layer
inputs = layers.Input(INPUT_SHAPE)
# first convolution and pooling block
conv1a = layers.Conv2D(
64,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(inputs)
conv1b = layers.Conv2D(
64,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv1a)
pool1 = layers.AveragePooling2D(2)(conv1b)
# second convolution and pooling block
conv2a = layers.Conv2D(
128,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool1)
conv2b = layers.Conv2D(
128,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv2a)
pool2 = layers.AveragePooling2D(2)(conv2b)
# third convolution and pooling block
conv3a = layers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool2)
conv3b = layers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv3a)
conv3c = layers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv3b)
pool3 = layers.AveragePooling2D(2)(conv3c)
# fourth convolution and pooling block
conv4a = layers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool3)
conv4b = layers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv4a)
conv4c = layers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv4b)
pool4 = layers.AveragePooling2D(2)(conv4c)
# fifth convolution and pooling block
conv5a = layers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool4)
conv5b = layers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv5a)
conv5c = layers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv5b)
pool5 = layers.AveragePooling2D(2)(conv5c)
# dense and output block
flat = layers.Flatten()(pool5)
dense1 = layers.Dense(
4096, activation="relu", kernel_initializer="glorot_normal"
)(flat)
dense2 = layers.Dense(
4096, activation="relu", kernel_initializer="glorot_normal"
)(dense1)
if softmax:
activation = "softmax"
else:
activation = None
dense3 = layers.Dense(
10, activation=activation, kernel_initializer="glorot_normal"
)(dense2)
# build model
model = models.Model(inputs, dense3)
model.load_weights(MODELPATH)
return model
# ADF TF-KERAS MODELS
def load_adfmodel(path=MODELPATH, mode="diag", rank=None):
# input layer
input_mean = layers.Input(INPUT_SHAPE)
if mode == "diag":
input_var = layers.Input(INPUT_SHAPE)
elif mode == "half":
if rank is None:
rank = np.prod(INPUT_SHAPE)
input_var = layers.Input([rank] + list(INPUT_SHAPE))
elif mode == "full":
input_var = layers.Input(list(INPUT_SHAPE) * 2)
# first convolution and pooling block
conv1a = adflayers.Conv2D(
64,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)([input_mean, input_var])
conv1b = adflayers.Conv2D(
64,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(conv1a)
pool1 = adflayers.AveragePooling2D(2, mode=mode)(conv1b)
# second convolution and pooling block
conv2a = adflayers.Conv2D(
128,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(pool1)
conv2b = adflayers.Conv2D(
128,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(conv2a)
pool2 = adflayers.AveragePooling2D(2, mode=mode)(conv2b)
# third convolution and pooling block
conv3a = adflayers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(pool2)
conv3b = adflayers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(conv3a)
conv3c = adflayers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(conv3b)
pool3 = adflayers.AveragePooling2D(2, mode=mode)(conv3c)
# fourth convolution and pooling block
conv4a = adflayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(pool3)
conv4b = adflayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(conv4a)
conv4c = adflayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(conv4b)
pool4 = adflayers.AveragePooling2D(2, mode=mode)(conv4c)
# fifth convolution and pooling block
conv5a = adflayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(pool4)
conv5b = adflayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(conv5a)
conv5c = adflayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
mode=mode,
)(conv5b)
pool5 = adflayers.AveragePooling2D(2, mode=mode)(conv5c)
# dense and output block
flat = adflayers.Flatten(mode=mode)(pool5)
dense1 = adflayers.Dense(
4096, activation="relu", kernel_initializer="glorot_normal", mode=mode
)(flat)
dense2 = adflayers.Dense(
4096, activation="relu", kernel_initializer="glorot_normal", mode=mode
)(dense1)
dense3 = adflayers.Dense(
10, kernel_initializer="glorot_normal", mode=mode
)(dense2)
# build model
model = models.Model([input_mean, input_var], dense3)
model.load_weights(MODELPATH)
return model
# STANDARD KERAS MODELS
def load_kmodel(path=MODELPATH, softmax=False):
# input layer
inputs = klayers.Input(INPUT_SHAPE)
# first convolution and pooling block
conv1a = klayers.Conv2D(
64,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(inputs)
conv1b = klayers.Conv2D(
64,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv1a)
pool1 = klayers.AveragePooling2D(2)(conv1b)
# second convolution and pooling block
conv2a = klayers.Conv2D(
128,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool1)
conv2b = klayers.Conv2D(
128,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv2a)
pool2 = klayers.AveragePooling2D(2)(conv2b)
# third convolution and pooling block
conv3a = klayers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool2)
conv3b = klayers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv3a)
conv3c = klayers.Conv2D(
256,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv3b)
pool3 = klayers.AveragePooling2D(2)(conv3c)
# fourth convolution and pooling block
conv4a = klayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool3)
conv4b = klayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv4a)
conv4c = klayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv4b)
pool4 = klayers.AveragePooling2D(2)(conv4c)
# fifth convolution and pooling block
conv5a = klayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool4)
conv5b = klayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv5a)
conv5c = klayers.Conv2D(
512,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv5b)
pool5 = klayers.AveragePooling2D(2)(conv5c)
# dense and output block
flat = klayers.Flatten()(pool5)
dense1 = klayers.Dense(
4096, activation="relu", kernel_initializer="glorot_normal"
)(flat)
dense2 = klayers.Dense(
4096, activation="relu", kernel_initializer="glorot_normal"
)(dense1)
if softmax:
activation = "softmax"
else:
activation = None
dense3 = klayers.Dense(
10, activation=activation, kernel_initializer="glorot_normal"
)(dense2)
# build model
model = kmodels.Model(inputs, dense3)
model.load_weights(MODELPATH)
return model
``` |
{
"source": "jmaces/robust-nets",
"score": 2
} |
#### File: robust-nets/ellipses/config_robustness_radon.py
```python
import os
import numpy as np
import pandas as pd
import torch
import config
from find_adversarial import PAdam, untargeted_attack
from networks import IterativeNet, Tiramisu, UNet
from operators import Radon, TVAnalysisPeriodic, noise_poisson, proj_l2_ball
from reconstruction_methods import admm_l1_rec
# ------ setup ----------
device = torch.device("cuda:0")
torch.cuda.set_device(0)
# ----- operators -----
theta = torch.linspace(0, 180, 61)[:-1] # 60 lines, exclude endpoint
OpA = Radon(config.n, theta)
OpAIt = Radon(config.n, theta)
OpAIt.adj = OpAIt.inv
OpTV = TVAnalysisPeriodic(config.n, device=device)
# ----- methods --------
methods = pd.DataFrame(columns=["name", "info", "reconstr", "attacker", "net"])
methods = methods.set_index("name")
noise_ref = noise_poisson
# ----- set up L1 --------
# grid search parameters for L1 via admm
grid_search_file = os.path.join(
config.RESULTS_PATH, "grid_search_l1", "grid_search_l1_radon_all.pkl"
)
gs_params = pd.read_pickle(grid_search_file)
def _get_gs_param(noise_rel):
idx = (gs_params.noise_rel - noise_rel).abs().to_numpy().argmin()
return gs_params.grid_param[idx]["lam"], gs_params.grid_param[idx]["rho"]
# the actual reconstruction method
def _reconstructL1(y, noise_rel):
lam, rho = _get_gs_param(noise_rel.numpy())
x, _ = admm_l1_rec(
y,
OpA,
OpTV,
0.0 * OpA.adj(y),
0.0 * OpTV(OpA.adj(y)),
lam,
rho,
iter=200,
silent=False,
)
return x
# the reconstruction method used for the L1 attack
# (less iterations due to high computational costs)
def _reconstructL1_adv(y, lam, rho, x0, z0):
x, _ = admm_l1_rec(y, OpA, OpTV, x0, z0, lam, rho, iter=20, silent=True)
return x
# loss
mseloss = torch.nn.MSELoss(reduction="sum")
# attack function for L1
def _attackerL1(x0, noise_rel, yadv_init=None, batch_size=6):
# compute noiseless measurements
y0 = OpA(x0)
if noise_rel == 0.0:
return y0, y0, y0
# compute absolute noise levels
noise_level = noise_rel * y0.norm(p=2, dim=(-2, -1), keepdim=True)
# compute noisy measurements for reference
yref = noise_ref(OpA(x0), noise_level)
# attack parameters
adv_init_fac = 3.0 * noise_level
adv_param = {
"codomain_dist": mseloss,
"domain_dist": None,
"mixed_dist": None,
"weights": (1.0, 1.0, 1.0),
"optimizer": PAdam,
"projs": None,
"iter": 15,
"stepsize": 5e0,
}
# get ADMM tuning parameters for noise_rel
lam, rho = _get_gs_param(noise_rel.numpy())
# compute good start values for _reconstructL1_adv
x0_adv, z0_adv = admm_l1_rec(
y0,
OpA,
OpTV,
0.0 * OpA.adj(y0),
0.0 * OpTV(OpA.adj(y0)),
lam,
rho,
iter=200,
silent=False,
)
# compute initialization
yadv = y0.clone().detach() + (
adv_init_fac / np.sqrt(np.prod(y0.shape[-2:]))
) * torch.randn_like(y0)
if yadv_init is not None:
yadv[0 : yadv_init.shape[0], ...] = yadv_init.clone().detach()
for idx_batch in range(0, yadv.shape[0], batch_size):
print(
"Attack for samples "
+ str(list(range(idx_batch, idx_batch + batch_size)))
)
adv_param["projs"] = [
lambda y: proj_l2_ball(
y,
y0[idx_batch : idx_batch + batch_size, ...],
noise_level[idx_batch : idx_batch + batch_size, ...],
)
]
# perform attack
yadv[idx_batch : idx_batch + batch_size, ...] = untargeted_attack(
lambda y: _reconstructL1_adv(
y,
lam,
rho,
x0_adv[idx_batch : idx_batch + batch_size, ...],
z0_adv[idx_batch : idx_batch + batch_size, ...],
),
yadv[idx_batch : idx_batch + batch_size, ...]
.clone()
.requires_grad_(True),
y0[idx_batch : idx_batch + batch_size, ...],
t_out_ref=x0[idx_batch : idx_batch + batch_size, ...],
**adv_param
).detach()
return yadv, yref, y0
methods.loc["L1"] = {
"info": {
"name_disp": "TV$[\\eta]$",
"name_save": "tv",
"plt_color": "#e8000b",
"plt_marker": "o",
"plt_linestyle": "-",
"plt_linewidth": 2.75,
},
"reconstr": _reconstructL1,
"attacker": lambda x0, noise_rel, yadv_init=None: _attackerL1(
x0, noise_rel, yadv_init=yadv_init
),
"net": None,
}
methods.loc["L1", "net"] = None
# ----- set up net attacks --------
# the actual reconstruction method for any net
def _reconstructNet(y, noise_rel, net):
return net.forward(y)
# attack function for any net
def _attackerNet(x0, noise_rel, net, yadv_init=None, batch_size=3):
# compute noiseless measurements
y0 = OpA(x0)
if noise_rel == 0.0:
return y0, y0, y0
# compute absolute noise levels
noise_level = noise_rel * y0.norm(p=2, dim=(-2, -1), keepdim=True)
# compute noisy measurements for reference
yref = noise_ref(OpA(x0), noise_level) # noisy measurements
# attack parameters
adv_init_fac = 3.0 * noise_level
adv_param = {
"codomain_dist": mseloss,
"domain_dist": None,
"mixed_dist": None,
"weights": (1.0, 1.0, 1.0),
"optimizer": PAdam,
"projs": None,
"iter": 500,
"stepsize": 5e0,
}
# compute initialization
yadv = y0.clone().detach() + (
adv_init_fac / np.sqrt(np.prod(y0.shape[-2:]))
) * torch.randn_like(y0)
if yadv_init is not None:
yadv[0 : yadv_init.shape[0], ...] = yadv_init.clone().detach()
for idx_batch in range(0, yadv.shape[0], batch_size):
print(
"Attack for samples "
+ str(list(range(idx_batch, idx_batch + batch_size)))
)
adv_param["projs"] = [
lambda y: proj_l2_ball(
y,
y0[idx_batch : idx_batch + batch_size, ...],
noise_level[idx_batch : idx_batch + batch_size, ...],
)
]
# perform attack
yadv[idx_batch : idx_batch + batch_size, ...] = untargeted_attack(
lambda y: _reconstructNet(y, 0.0, net),
yadv[idx_batch : idx_batch + batch_size, ...]
.clone()
.requires_grad_(True),
y0[idx_batch : idx_batch + batch_size, ...],
t_out_ref=x0[idx_batch : idx_batch + batch_size, ...],
**adv_param
).detach()
return yadv, yref, y0
# ----- load nets -----
# create a net and load weights from file
def _load_net(path, subnet, subnet_params, it_net_params):
subnet = subnet(**subnet_params).to(device)
it_net = IterativeNet(subnet, **it_net_params).to(device)
it_net.load_state_dict(torch.load(path, map_location=torch.device(device)))
it_net.freeze()
it_net.eval()
return it_net
def _append_net(name, info, net):
methods.loc[name] = {
"info": info,
"reconstr": lambda y, noise_rel: _reconstructNet(y, noise_rel, net),
"attacker": lambda x0, noise_rel, yadv_init=None: _attackerNet(
x0, noise_rel, net, yadv_init=yadv_init
),
"net": net,
}
pass
# ----- UNets -----
unet_params = {
"in_channels": 1,
"drop_factor": 0.0,
"base_features": 36,
"out_channels": 1,
}
_append_net(
"UNet jit",
{
"name_disp": "UNet",
"name_save": "unet_jit",
"plt_color": "ff7c00",
"plt_marker": "o",
"plt_linestyle": ":",
"plt_linewidth": 2.75,
},
_load_net(
"results/Radon_UNet_jitter_v3_train_phase_2/model_weights.pt",
UNet,
unet_params,
{
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
},
),
)
_append_net(
"UNet",
{
"name_disp": "UNet no jit",
"name_save": "unet",
"plt_color": "darkorange",
"plt_marker": "o",
"plt_linestyle": ":",
"plt_linewidth": None,
},
_load_net(
"results/Radon_UNet_Hann_v2_train_phase_2/model_weights.pt",
UNet,
unet_params,
{
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
},
),
)
# ----- Tiramisu -----
tiramisu_params = {
"in_channels": 1,
"out_channels": 1,
"drop_factor": 0.0,
"down_blocks": (5, 7, 9, 12, 15),
"up_blocks": (15, 12, 9, 7, 5),
"pool_factors": (2, 2, 2, 2, 2),
"bottleneck_layers": 20,
"growth_rate": 16,
"out_chans_first_conv": 16,
}
_append_net(
"Tiramisu jit",
{
"name_disp": "Tira",
"name_save": "tiramisu_jit",
"plt_color": "turquoise",
"plt_marker": "o",
"plt_linestyle": "-",
"plt_linewidth": None,
},
_load_net(
"results/Radon_Tiramisu_jitter_v6_train_phase_1/"
+ "model_weights_epoch19.pt",
Tiramisu,
tiramisu_params,
{
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
},
),
)
_append_net(
"Tiramisu",
{
"name_disp": "Tira no jit",
"name_save": "tiramisu",
"plt_color": "turquoise",
"plt_marker": "o",
"plt_linestyle": "-",
"plt_linewidth": None,
},
_load_net(
"results/Radon_Tiramisu_Hann_v5_train_phase_1/model_weights.pt",
Tiramisu,
tiramisu_params,
{
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
},
),
)
```
#### File: robust-nets/ellipses/operators.py
```python
import math
from abc import ABC, abstractmethod
import numpy as np
import pytorch_radon
import skimage.transform
import torch
import torch_cg
from fastmri_utils.data import transforms
from fastmri_utils.data.transforms import fftshift, ifftshift
# ----- Utilities -----
class RadialMaskFunc(object):
""" Generates a golden angle radial spokes mask.
Useful for subsampling a Fast-Fourier-Transform.
Contains radial lines (spokes) through the center of the mask, with
angles spaced according to the golden angle (~111.25°). The first line
has angle 0° (horizontal). An offset parameter can be given to skip
the first `offset*num_lines` lines.
Parameters
----------
shape : array_like
A tuple specifying the size of the mask.
num_lines : int
Number of radial lines (spokes) in the mask.
offset : int, optional
Offset factor for the range of angles in the mask.
"""
def __init__(self, shape, num_lines, offset=0):
self.shape = shape
self.num_lines = num_lines
self.offset = offset
self.mask = self._generate_radial_mask(shape, num_lines, offset)
def __call__(self, shape, seed=None):
assert (self.mask.shape[0] == shape[-3]) and (
self.mask.shape[1] == shape[-2]
)
return torch.reshape(
self.mask, (len(shape) - 3) * (1,) + self.shape + (1,)
)
def _generate_radial_mask(self, shape, num_lines, offset=0):
# generate line template and empty mask
x, y = shape
d = math.ceil(np.sqrt(2) * max(x, y))
line = np.zeros((d, d))
line[d // 2, :] = 1.0
out = np.zeros((d, d))
# compute golden angle sequence
golden = (np.sqrt(5) - 1) / 2
angles = (
180.0
* golden
* np.arange(offset * num_lines, (offset + 1) * num_lines)
)
# draw lines
for angle in angles:
out += skimage.transform.rotate(line, angle, order=0)
# crop mask to correct size
out = out[
d // 2 - math.floor(x / 2) : d // 2 + math.ceil(x / 2),
d // 2 - math.floor(y / 2) : d // 2 + math.ceil(y / 2),
]
# return binary mask
return torch.tensor(out > 0)
def l2_error(X, X_ref, relative=False, squared=False, use_magnitude=True):
""" Compute average l2-error of an image over last three dimensions.
Parameters
----------
X : torch.Tensor
The input tensor of shape [..., 1, W, H] for real images or
[..., 2, W, H] for complex images.
X_ref : torch.Tensor
The reference tensor of same shape.
relative : bool, optional
Use relative error. (Default False)
squared : bool, optional
Use squared error. (Default False)
use_magnitude : bool, optional
Use complex magnitudes. (Default True)
Returns
-------
err_av :
The average error.
err :
Tensor with individual errors.
"""
assert X_ref.ndim >= 3 # do not forget the channel dimension
if X_ref.shape[-3] == 2 and use_magnitude: # compare complex magnitudes
X_flat = torch.flatten(torch.sqrt(X.pow(2).sum(-3)), -2, -1)
X_ref_flat = torch.flatten(torch.sqrt(X_ref.pow(2).sum(-3)), -2, -1)
else:
X_flat = torch.flatten(X, -3, -1)
X_ref_flat = torch.flatten(X_ref, -3, -1)
if squared:
err = (X_flat - X_ref_flat).norm(p=2, dim=-1) ** 2
else:
err = (X_flat - X_ref_flat).norm(p=2, dim=-1)
if relative:
if squared:
err = err / (X_ref_flat.norm(p=2, dim=-1) ** 2)
else:
err = err / X_ref_flat.norm(p=2, dim=-1)
if X_ref.ndim > 3:
err_av = err.sum() / np.prod(X_ref.shape[:-3])
else:
err_av = err
return err_av.squeeze(), err
def l2_error_meas(X, X_ref, relative=False, squared=False):
""" Compute average l2-error of measurements over last two dimensions.
Parameters
----------
X : torch.Tensor
The input tensor of shape [..., 1, N] for real measurements or
[..., 2, N] for complex measurements.
X_ref : torch.Tensor
The reference tensor of same shape.
relative : bool, optional
Use relative error. (Default False)
squared : bool, optional
Use squared error. (Default False)
use_magnitude : bool, optional
Use complex magnitudes. (Default True)
Returns
-------
err_av :
The average error.
err :
Tensor with errors.
"""
assert X_ref.ndim >= 2 # do not forget the channel dimension
X_flat = torch.flatten(X, -2, -1)
X_ref_flat = torch.flatten(X_ref, -2, -1)
if squared:
err = (X_flat - X_ref_flat).norm(p=2, dim=-1) ** 2
else:
err = (X_flat - X_ref_flat).norm(p=2, dim=-1)
if relative:
if squared:
err = err / (X_ref_flat.norm(p=2, dim=-1) ** 2)
else:
err = err / X_ref_flat.norm(p=2, dim=-1)
if X_ref.ndim > 2:
err_av = err.sum() / np.prod(X_ref.shape[:-2])
else:
err_av = err
return err_av, err
def noise_gaussian(y, eta, n_seed=None, t_seed=None):
""" Additive Gaussian noise. """
if n_seed is not None:
np.random.seed(n_seed)
if t_seed is not None:
torch.manual_seed(t_seed)
noise = torch.randn_like(y)
return y + eta / np.sqrt(np.prod(y.shape[1:])) * noise
def noise_poisson(y, eta, n_seed=None, t_seed=None):
""" Rescaled Poisson noise. (not additive!) """
if n_seed is not None:
np.random.seed(n_seed)
if t_seed is not None:
torch.manual_seed(t_seed)
scale_fac = y.sum(dim=-1, keepdim=True) / (eta ** 2)
noisy_y = torch.poisson(scale_fac * y) / scale_fac
return noisy_y
def to_complex(x):
""" Converts real images to complex by adding a channel dimension. """
assert x.ndim >= 3 and (x.shape[-3] == 1 or x.shape[-3] == 2)
# real tensor of shape (1, n1, n2) or batch of shape (*, 1, n1, n2)
if x.shape[-3] == 1:
imag = torch.zeros_like(x)
out = torch.cat([x, imag], dim=-3)
else:
out = x
return out
def rotate_real(x):
""" Rotates the magnitude of a complex signal into the real channel. """
assert x.ndim >= 3 and (x.shape[-3] == 2)
x_rv = torch.zeros_like(x)
x_rv[..., 0, :, :] = torch.sqrt(x.pow(2).sum(-3))
return x_rv
def mult_complex(x, y):
""" Multiply two complex tensors with real and imag in last dimension. """
out_real = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
out_imag = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack([out_real, out_imag], dim=-1)
def div_complex(x, y):
""" Divide two complex tensors with real and imag in last dimension. """
out_real = x[..., 0] * y[..., 0] + x[..., 1] * y[..., 1]
out_imag = x[..., 1] * y[..., 0] - x[..., 0] * y[..., 1]
denom = y[..., 0].pow(2) + y[..., 1].pow(2)
return torch.stack([out_real / denom, out_imag / denom], dim=-1)
def conj_complex(x):
""" Complex conjugate of tensor with real and imag in last dimension. """
out_real = x[..., 0]
out_imag = -x[..., 1]
return torch.stack([out_real, out_imag], dim=-1)
def im2vec(x, dims=(-2, -1)):
""" Flattens last two dimensions of an image tensor to a vector. """
return torch.flatten(x, *dims)
def vec2im(x, n):
""" Unflattens the last dimension of a vector to two image dimensions. """
return x.view(*x.shape[:-1], *n)
def prep_fft_channel(x):
""" Rotates complex image dimension from channel to last position. """
x = torch.reshape(x, x.shape[:-3] + (x.shape[-3] // 2, 2) + x.shape[-2:])
return x.permute(*range(x.ndim - 3), -2, -1, -3)
def unprep_fft_channel(x):
""" Rotates complex image dimension from last to channel position. """
x = x.permute(*range(x.ndim - 3), -1, -3, -2)
return torch.reshape(
x, x.shape[:-4] + (x.shape[-4] * x.shape[-3],) + x.shape[-2:]
)
def circshift(x, dim=-1, num=1):
""" Circular shift by n along a dimension. """
perm = list(range(num, x.shape[dim])) + list(range(0, num))
if not dim == -1:
return x.transpose(dim, -1)[..., perm].transpose(dim, -1)
else:
return x[..., perm]
# ----- Thresholding, Projections, and Proximal Operators -----
def _shrink_single(x, thresh):
""" Soft/Shrinkage thresholding for tensors. """
return torch.nn.Softshrink(thresh)(x)
def _shrink_recursive(c, thresh):
""" Soft/Shrinkage thresholding for nested tuples/lists of tensors. """
if isinstance(c, (list, tuple)):
return [_shrink_recursive(el, thresh) for el in c]
else:
return _shrink_single(c, thresh)
shrink = _shrink_single # alias for convenience
def proj_l2_ball(x, centre, radius):
""" Euclidean projection onto a closed l2-ball.
Parameters
----------
x : torch.Tensor
The input tensor to project.
centre : torch.Tensor
The centre of the ball.
radius : float
The radius of the ball. Must be non-negative.
Returns
-------
torch.Tensor
The projection of x onto the closed ball.
"""
norm = torch.sqrt((x - centre).pow(2).sum(dim=(-2, -1), keepdim=True))
radius, norm = torch.broadcast_tensors(radius, norm)
fac = torch.ones_like(norm)
fac[norm > radius] = radius[norm > radius] / norm[norm > radius]
return fac * x + (1 - fac) * centre
# ----- Linear Operator Utilities -----
class LinearOperator(ABC):
""" Abstract base class for linear (measurement) operators.
Can be used for real operators
A : R^(n1 x n2) -> R^m
or complex operators
A : C^(n1 x n2) -> C^m.
Can be applied to tensors of shape (n1, n2) or (1, n1, n2) or batches
thereof of shape (*, n1, n2) or (*, 1, n1, n2) in the real case, or
analogously shapes (2, n1, n2) or (*, 2, n1, n2) in the complex case.
Attributes
----------
m : int
Dimension of the co-domain of the operator.
n : tuple of int
Dimensions of the domain of the operator.
"""
def __init__(self, m, n):
self.m = m
self.n = n
@abstractmethod
def dot(self, x):
""" Application of the operator to a vector.
Computes Ax for a given vector x from the domain.
Parameters
----------
x : torch.Tensor
Must be of shape to (*, n1, n2) or (*, 2, n1, n2).
Returns
-------
torch.Tensor
Will be of shape (*, m) or (*, 2, m).
"""
pass
@abstractmethod
def adj(self, y):
""" Application of the adjoint operator to a vector.
Computes (A^*)y for a given vector y from the co-domain.
Parameters
----------
y : torch.Tensor
Must be of shape (*, m) or (*, 2, m).
Returns
-------
torch.Tensor
Will be of shape (*, n1, n2) or (*, 2, n1, n2).
"""
pass
@abstractmethod
def inv(self, y):
""" Application of some inversion of the operator to a vector.
Computes (A^dagger)y for a given vector y from the co-domain.
A^dagger can for example be the pseudo-inverse.
Parameters
----------
y : torch.Tensor
Must be of shape (*, m) or (*, 2, m).
Returns
-------
torch.Tensor
Will be of shape (*, n1, n2) or (*, 2, n1, n2).
"""
pass
def __call__(self, x): # alias to make operator callable by using dot
return self.dot(x)
# ----- Measurement Operators -----
class Fourier(LinearOperator):
""" 2D discrete Fourier transform.
Implements the complex operator C^(n1, n2) -> C^m
appling the (subsampled) Fourier transform.
The adjoint is the conjugate transpose. The inverse is the same as adjoint.
Parameters
----------
mask : torch.Tensor
The subsampling mask for the Fourier transform.
"""
def __init__(self, mask):
m = mask.nonzero().shape[0]
n = mask.shape[-2:]
super().__init__(m, n)
self.mask = mask[0, 0, :, :].bool()
def dot(self, x):
""" Subsampled Fourier transform. """
full_fft = unprep_fft_channel(transforms.fft2(prep_fft_channel(x)))
return im2vec(full_fft)[..., im2vec(self.mask)]
def adj(self, y):
""" Adjoint is the zeor-filled inverse Fourier transform. """
masked_fft = torch.zeros(
*y.shape[:-1], self.n[0] * self.n[1], device=y.device
)
masked_fft[..., im2vec(self.mask)] = y
return unprep_fft_channel(
transforms.ifft2(prep_fft_channel(vec2im(masked_fft, self.n)))
)
def inv(self, y):
""" Pseudo-inverse a.k.a. zero-filled IFFT. """
return self.adj(y)
def tikh(self, rhs, kernel, rho):
""" Tikhonov regularized inversion.
Solves the normal equation
(F*F + rho W*W) x = F*y
or more generally
(F*F + rho W*W) x = z
for a Tikhonov regularized least squares fit, assuming that the
regularization W*W can be diagonalied by FFTs, i.e.
W*W = F*D*F
for some diagonal matrix D.
Parameters
----------
rhs : torch.Tensor
The right hand side tensor z, often F*y for some y.
kernel : torch.Tensor
The Fourier kernel of W, containing the diagonal elements D.
rho : float
The regularization parameter.
"""
assert rhs.ndim >= 3 and rhs.shape[-3] == 2 # assert complex images
fft_rhs = transforms.fft2(prep_fft_channel(rhs))
combined_kernel = prep_fft_channel(
to_complex(self.mask.unsqueeze(0).to(rhs.device))
) + rho * kernel.to(rhs.device)
fft_div = div_complex(fft_rhs, combined_kernel)
return unprep_fft_channel(transforms.ifft2(fft_div))
class Fourier_matrix(LinearOperator, torch.nn.Module):
""" 2D discrete Fourier transform based on Kroneckers of 1D Fourier.
Implements the complex operator C^(n1, n2) -> C^m appling the (subsampled)
Fourier transform. The adjoint is the conjugate transpose. The inverse is
the same as adjoint.
The Kronecker product implementation can be faster than the regular 2D
implementation in certain situations.
Parameters
----------
mask : torch.Tensor
The subsampling mask for the Fourier transform.
"""
def __init__(self, mask):
m = mask.nonzero().shape[0]
n = mask.shape[-2:]
LinearOperator.__init__(self, m, n)
torch.nn.Module.__init__(self)
self.mask = mask[0, 0, :, :].bool()
self.fft2 = LearnableFourier2D(n, inverse=False, learnable=False)
self.ifft2 = LearnableFourier2D(n, inverse=True, learnable=False)
def dot(self, x):
""" Subsampled Fourier transform. """
full_fft = self.fft2(x)
return im2vec(full_fft)[..., im2vec(self.mask)]
def adj(self, y):
""" Adjoint is the zeor-filled inverse Fourier transform. """
masked_fft = torch.zeros(
*y.shape[:-1], self.n[0] * self.n[1], device=y.device
)
masked_fft[..., im2vec(self.mask)] = y
return self.ifft2(vec2im(masked_fft, self.n))
def inv(self, y):
""" Pseudo-inverse a.k.a. zero-filled IFFT. """
return self.adj(y)
def tikh(self, rhs, kernel, rho):
""" Tikhonov regularized inversion.
Solves the normal equation
(F*F + rho W*W) x = F*y
or more generally
(F*F + rho W*W) x = z
for a Tikhonov regularized least squares fit, assuming that the
regularization W*W can be diagonalied by FFTs, i.e.
W*W = F*D*F
for some diagonal matrix D.
Parameters
----------
rhs : torch.Tensor
The right hand side tensor z, often F*y for some y.
kernel : torch.Tensor
The Fourier kernel of W, containing the diagonal elements D.
rho : float
The regularization parameter.
"""
assert rhs.ndim >= 3 and rhs.shape[-3] == 2 # assert complex images
fft_rhs = prep_fft_channel(self.fft2(rhs))
combined_kernel = prep_fft_channel(
to_complex(self.mask.unsqueeze(0).to(rhs.device))
) + rho * kernel.to(rhs.device)
fft_div = unprep_fft_channel(div_complex(fft_rhs, combined_kernel))
return self.ifft2(fft_div)
class Radon(LinearOperator):
""" 2D parallel beam Radon transform based on FFTs.
Implements the real operator R^(n1, n2) -> R^m
appling the (subsampled) parallel beam Radon transform.
The adjoint is the transpose. The inverse is the filtered back projection
(FBP) algorithm.
The transform currently assumes square images, i.e. n1=n2. The output
dimension depends on the image size and the number of sampled angles for
the transform.
Parameters
----------
theta : torch.Tensor
The angles that are sampled for the Radon transform. The
number of total measurements m will be calculated automatically.
"""
def __init__(self, n, theta):
assert n[0] == n[1]
self.radon = pytorch_radon.Radon(
in_size=n[0], theta=theta, circle=False
)
dummy_in = torch.zeros(1, 1, *n)
dummy_out = self.radon(dummy_in)
self.m_shape = dummy_out.shape[2:]
PI = 4 * torch.ones(1).atan()
self.adj_factor = PI.item() / (2 * len(theta))
self.iradon_inv = pytorch_radon.IRadon(
in_size=n[0],
theta=theta,
circle=False,
use_filter=pytorch_radon.filters.HannFilter(),
)
self.iradon_adj = pytorch_radon.IRadon(
in_size=n[0], theta=theta, circle=False, use_filter=None
)
super().__init__(np.prod(self.m_shape), n)
def dot(self, x):
if x.ndim == 3: # no batch dimension
return im2vec(self.radon(x.unsqueeze(0)).squeeze(0))
else:
return im2vec(self.radon(x))
def adj(self, y):
if y.ndim == 2: # no batch dimension
return (
self.iradon_adj(vec2im(y, self.m_shape).unsqueeze(0)).squeeze(
0
)
/ self.adj_factor
)
else:
return self.iradon_adj(vec2im(y, self.m_shape)) / self.adj_factor
def inv(self, y):
if y.ndim == 2: # no batch dimension
return self.iradon_inv(
vec2im(y, self.m_shape).unsqueeze(0)
).squeeze(0)
else:
return self.iradon_inv(vec2im(y, self.m_shape))
class TVAnalysisPeriodic(LinearOperator):
""" 2D Total Variation analysis operator.
Implements the real operator R^(n1, n2) -> R^(2*n1*n2)
appling the forward finite difference operator
[[ -1 1 0 ... 0 ]
[ 0 -1 1 0 ... 0 ]
[ . . . . ]
[ . -1 1 0 ]
[ 0 ... 0 -1 1 ]
[ 1 ... 0 0 -1 ]]
with periodic boundary conditions along the rows and columns of an
image. The adjoint is the transpose.
Can also be applied to complex tensors with shape (2, n1, n2).
It will then act upon the real part and imaginary part separately.
Parameters
----------
device : torch.Device or int, optional
The torch device or its ID to place the operator on. Set to `None` to
use the global torch default device. (Default `None`)
"""
def __init__(self, n, device=None):
super().__init__(n[0] * n[1] * 2, n)
self.device = device
def dot(self, x):
row_diff = circshift(x, dim=-2) - x
col_diff = circshift(x, dim=-1) - x
return torch.cat([im2vec(row_diff), im2vec(col_diff)], dim=-1,)
def adj(self, y):
row_diff = vec2im(
y[..., : self.n[0] * self.n[1]], (self.n[0], self.n[1])
)
col_diff = vec2im(
y[..., self.n[0] * self.n[1] :], (self.n[0], self.n[1])
)
return (
circshift(row_diff, dim=-2, num=self.n[0] - 1)
- row_diff
+ circshift(col_diff, dim=-1, num=self.n[0] - 1)
- col_diff
)
def get_fourier_kernel(self):
""" The factors of the operator after diagonalization by 2D FFTs. """
kernel = torch.zeros(self.n[0], self.n[1]).unsqueeze(-3)
kernel[0, 0, 0] = 4
kernel[0, 0, 1] = -1
kernel[0, 1, 0] = -1
kernel[0, 0, -1] = -1
kernel[0, -1, 0] = -1
return transforms.fftshift(
torch.fft(prep_fft_channel(to_complex(kernel)), signal_ndim=2),
dim=(-3, -2),
)
def inv(self, y):
raise NotImplementedError(
"This operator does not implement a direct " "inversion."
)
class LearnableFourier1D(torch.nn.Module):
""" Learnable 1D discrete Fourier transform.
Implements a complex operator C^n -> C^n, which is learnable but
initialized as the Fourier transform.
Parameters
----------
n : int
Dimension of the domain and range of the operator.
dim : int, optional
Apply the 1D operator along specified axis for inputs with multiple
axis. (Default is last axis)
inverse : bool, optional
Use the discrete inverse Fourier transform as initialization instead.
(Default False)
learnable : bool, optional
Make operator learnable. Otherwise it will be kept fixed as the
initialization. (Default True)
"""
def __init__(self, n, dim=-1, inverse=False, learnable=True):
super(LearnableFourier1D, self).__init__()
self.n = n
self.dim = dim
eye_n = torch.stack([torch.eye(n), torch.zeros(n, n)], dim=-1)
if inverse:
fft_n = ifftshift(
torch.ifft(
fftshift(eye_n, dim=1), signal_ndim=1, normalized=True
),
dim=1,
)
else:
fft_n = ifftshift(
torch.fft(
fftshift(eye_n, dim=1), signal_ndim=1, normalized=True
),
dim=1,
)
fft_real_n = fft_n[..., 0]
fft_imag_n = fft_n[..., 1]
fft_matrix = torch.cat(
[
torch.cat([fft_real_n, -fft_imag_n], dim=1),
torch.cat([fft_imag_n, fft_real_n], dim=1),
],
dim=0,
)
self.linear = torch.nn.Linear(2 * n, 2 * n, bias=False)
self.linear.weight.data = (
fft_matrix
+ 1 / (np.sqrt(self.n) * 16) * torch.randn_like(fft_matrix)
if learnable
else fft_matrix
)
self.linear.weight.requires_grad = learnable
def forward(self, x):
xt = torch.transpose(x, self.dim, -1)
x_real = xt[..., 0, :, :]
x_imag = xt[..., 1, :, :]
x_vec = torch.cat([x_real, x_imag], dim=-1)
fft_vec = self.linear(x_vec)
fft_real = fft_vec[..., : self.n]
fft_imag = fft_vec[..., self.n :]
return torch.transpose(
torch.stack([fft_real, fft_imag], dim=-3), -1, self.dim
)
class LearnableFourier2D(torch.nn.Module):
""" Learnable 2D discrete Fourier transform.
Implements a complex operator C^(n1, n2) -> C^(n1, n2), which is learnable
but initialized as the Fourier transform. Operates along the last two
dimensions of inputs with more axis.
Parameters
----------
n : tuple of int
Dimensions of the domain and range of the operator.
inverse : bool, optional
Use the discrete inverse Fourier transform as initialization instead.
(Default False)
learnable : bool, optional
Make operator learnable. Otherwise it will be kept fixed as the
initialization. (Default True)
"""
def __init__(self, n, inverse=False, learnable=True):
super(LearnableFourier2D, self).__init__()
self.linear1 = LearnableFourier1D(
n[0], dim=-2, inverse=inverse, learnable=learnable
)
self.linear2 = LearnableFourier1D(
n[1], dim=-1, inverse=inverse, learnable=learnable
)
def forward(self, x):
return self.linear1(self.linear2(x))
class LearnableInverter(torch.nn.Module):
""" Learnable inversion of subsampled discrete Fourier transform.
The zero-filling (transpose of the subsampling operator) is fixed.
The inversion is learnable and initialized as a 2D inverse Fourier
transform, realized as Kroneckers of 1D Fourier inversions.
Implements a complex operator C^m -> C^(n1, n2).
Parameters
----------
n : tuple of int
Dimensions of the range of the operator.
mask : torch.Tensor
The subsampling mask. Determines m.
"""
def __init__(self, n, mask, learnable=True):
super(LearnableInverter, self).__init__()
self.n = n
self.mask = mask[0, 0, :, :].bool()
self.learnable_ifft = LearnableFourier2D(
n, inverse=True, learnable=learnable
)
def forward(self, y):
masked_fft = torch.zeros(
*y.shape[:-1], self.n[0] * self.n[1], device=y.device
)
masked_fft[..., im2vec(self.mask)] = y
return self.learnable_ifft(vec2im(masked_fft, self.n))
# ----- Inversion Methods -----
class CGInverterLayer(torch.nn.Module):
""" Solves a batch of positive definite linear systems using the PCG.
This class is a wrapper of `torch_cg.CG` making it compatible with our
input signal specifications for 2D image signals.
The class provides a batched foward operation solving a linear system
A X = B
where A represents a quadratic positive definite matrix, B is a right hand
side of shape [*, 1, n1, n2] for real signals or [*, 2, n1, n2] for
complex signals, and X represents the solution of th same shape as B.
Attributes
----------
shape : tuple of int
Shape of signals that the solver expects (without batch dimension),
i.e. [1, n1, n2] for real or [2, n1, n2] for complex signals.
A_bmm : callable
Performs the batch-wise matrix multiply of A and X.
M_bmm : callable, optional
Performs a batch-wise matrix multiply of the preconditioning
matrix M and X. Set to `None` to use no preconditioning, i.e.
M is the identity matrix. (Default None)
rtol : float, optional
Relative tolerance for norm of residual. (Default 1e-3)
atol : float, optional
Absolute tolerance for norm of residual. (Default 0.0)
maxiter : int, optional
Maximum number of CG iterations to perform. (Default 5*n1*n2)
verbose : bool, optional
Whether or not to print status messages. (Default False)
"""
def __init__(self, shape, A_bmm, M_bmm=None, **kwargs):
super().__init__()
def _A_bmm(X):
""" Shape compatible wrapper for A_bmm. """
AX = A_bmm(X.view((-1,) + shape))
AX_flat = torch.flatten(AX, -3, -1).unsqueeze(-1)
return AX_flat
def _M_bmm(X):
""" Shape compatible wrapper for M_bmm. """
MX = M_bmm(X.view((-1,) + shape))
MX_flat = torch.flatten(MX, -3, -1).unsqueeze(-1)
return MX_flat
self.A_bmm = _A_bmm
self.M_bmm = _M_bmm if M_bmm is not None else None
self.func = _CGInverterFunc(self.A_bmm, self.M_bmm, **kwargs)
def forward(self, B, X0=None):
""" Solves the linear system given a right hand side.
Parameters
----------
B : torch.Tensor
The right hand side.
X0 : torch.Tensor, optional
Initial guess for X. Set `None` to use M_bmm(B). (Default None)
Returns
-------
torch.Tensor
The solution X.
"""
return self.func(B, X0)
def _CGInverterFunc(
A_bmm, M_bmm, rtol=1e-5, atol=0.0, maxiter=None, verbose=False
):
""" Helper function for building CGInverter autograd functions. """
class _CGInverter(torch.autograd.Function):
""" The actual CGInverter autograd function. """
@staticmethod
def forward(ctx, *params):
B, X0 = params # unpack input parameters
ctx.in_grads = [p is not None and p.requires_grad for p in params]
B_flat = torch.flatten(B, -3, -1).unsqueeze(-1)
X0_flat = (
torch.flatten(X0, -3, -1).unsqueeze(-1)
if X0 is not None
else X0
)
X, _ = torch_cg.cg_batch(
A_bmm, B_flat, M_bmm, X0_flat, rtol, atol, maxiter, verbose
)
return X.view(B.shape)
@staticmethod
def backward(ctx, *params):
(dX,) = params # unpack input parameters
dX_flat = torch.flatten(dX, -3, -1).unsqueeze(-1)
dB, _ = torch_cg.cg_batch(
A_bmm, dX_flat, M_bmm, None, rtol, atol, maxiter, verbose
)
grads = [dB.view(dX.shape), 0.0 * dX]
for i in range(len(grads)):
if not ctx.in_grads[i]:
grads[i] = None
return tuple(grads)
return _CGInverter.apply
```
#### File: robust-nets/ellipses/script_grid_search_l1_fourier.py
```python
import os
import numpy as np
import pandas as pd
import torch
from data_management import IPDataset
from operators import (
Fourier,
RadialMaskFunc,
TVAnalysisPeriodic,
noise_gaussian,
to_complex,
unprep_fft_channel,
)
from reconstruction_methods import admm_l1_rec_diag, grid_search
# ----- load configuration -----
import config # isort:skip
# ------ setup ----------
device = torch.device("cuda")
file_name = "grid_search_l1_fourier_"
save_path = os.path.join(config.RESULTS_PATH, "grid_search_l1")
# ----- operators --------
mask_func = RadialMaskFunc(config.n, 40)
mask = unprep_fft_channel(mask_func((1, 1) + config.n + (1,)))
OpA = Fourier(mask)
OpTV = TVAnalysisPeriodic(config.n, device=device)
# ----- load test data --------
samples = range(50, 100)
test_data = IPDataset("test", config.DATA_PATH)
X_0 = torch.stack([test_data[s][0] for s in samples])
X_0 = to_complex(X_0.to(device))
# ----- noise setup --------
noise_min = 1e-3
noise_max = 0.08
noise_steps = 50
noise_rel = torch.tensor(
np.logspace(np.log10(noise_min), np.log10(noise_max), num=noise_steps)
).float()
# add extra noise levels 0.00 and 0.16 for tabular evaluation
noise_rel = (
torch.cat(
[torch.zeros(1).float(), noise_rel, 0.16 * torch.ones(1).float()]
)
.float()
.to(device)
)
def meas_noise(y, noise_level):
return noise_gaussian(y, noise_level)
# ----- set up reconstruction method and grid params --------
def _reconstruct(y, lam, rho):
x, _ = admm_l1_rec_diag(
y,
OpA,
OpTV,
OpA.adj(y),
OpTV(OpA.adj(y)),
lam,
rho,
iter=1000,
silent=True,
)
return x
# parameter search grid
grid = {
"lam": np.logspace(-6, -1, 25),
"rho": np.logspace(-5, 1, 25),
}
def combine_results():
results = pd.DataFrame(
columns=["noise_rel", "grid_param", "err_min", "grid", "err"]
)
for idx in range(len(noise_rel)):
results_cur = pd.read_pickle(
os.path.join(save_path, file_name + str(idx) + ".pkl")
)
results.loc[idx] = results_cur.loc[idx]
os.makedirs(save_path, exist_ok=True)
results.to_pickle(os.path.join(save_path, file_name + "all.pkl"))
return results
# ------ perform grid search ---------
if __name__ == "__main__":
idx_noise = (int(os.environ.get("SGE_TASK_ID")) - 1,)
for idx in idx_noise:
noise_level = noise_rel[idx] * OpA(X_0).norm(
p=2, dim=(-2, -1), keepdim=True
)
Y_ref = meas_noise(OpA(X_0), noise_level)
grid_param, err_min, err = grid_search(X_0, Y_ref, _reconstruct, grid)
results = pd.DataFrame(
columns=["noise_rel", "grid_param", "err_min", "grid", "err"]
)
results.loc[idx] = {
"noise_rel": noise_rel[idx],
"grid_param": grid_param,
"err_min": err_min,
"grid": grid,
"err": err,
}
os.makedirs(save_path, exist_ok=True)
results.to_pickle(
os.path.join(save_path, file_name + str(idx) + ".pkl")
)
```
#### File: robust-nets/ellipses/script_train_radon_tiramisu_jitter_v6.py
```python
import os
import matplotlib as mpl
import torch
import torchvision
from data_management import IPDataset, Jitter, SimulateMeasurements
from networks import IterativeNet, Tiramisu
from operators import Radon
# ----- load configuration -----
import config # isort:skip
# ----- global configuration -----
mpl.use("agg")
device = torch.device("cuda:0")
torch.cuda.set_device(0)
# ----- measurement configuration -----
theta = torch.linspace(0, 180, 61)[:-1] # 60 lines, exclude endpoint
OpA = Radon(config.n, theta)
# ----- network configuration -----
subnet_params = {
"in_channels": 1,
"out_channels": 1,
"drop_factor": 0.0,
"down_blocks": (5, 7, 9, 12, 15),
"up_blocks": (15, 12, 9, 7, 5),
"pool_factors": (2, 2, 2, 2, 2),
"bottleneck_layers": 20,
"growth_rate": 16,
"out_chans_first_conv": 16,
}
subnet = Tiramisu
it_net_params = {
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
}
# ----- training configuration -----
mseloss = torch.nn.MSELoss(reduction="sum")
def loss_func(pred, tar):
return mseloss(pred, tar) / pred.shape[0]
train_phases = 1
train_params = {
"num_epochs": [19],
"batch_size": [10],
"loss_func": loss_func,
"save_path": [
os.path.join(
config.RESULTS_PATH,
"Radon_Tiramisu_jitter_v6_"
"train_phase_{}".format((i + 1) % (train_phases + 1)),
)
for i in range(train_phases + 1)
],
"save_epochs": 1,
"optimizer": torch.optim.Adam,
"optimizer_params": [{"lr": 8e-5, "eps": 2e-4, "weight_decay": 5e-4}],
"scheduler": torch.optim.lr_scheduler.StepLR,
"scheduler_params": {"step_size": 1, "gamma": 1.0},
"acc_steps": [1],
"train_transform": torchvision.transforms.Compose(
[SimulateMeasurements(OpA), Jitter(5e2, 0.0, 1.0)]
),
"val_transform": torchvision.transforms.Compose(
[SimulateMeasurements(OpA)],
),
"train_loader_params": {"shuffle": True, "num_workers": 0},
"val_loader_params": {"shuffle": False, "num_workers": 0},
}
# ----- data configuration -----
train_data_params = {
"path": config.DATA_PATH,
"device": device,
}
train_data = IPDataset
val_data_params = {
"path": config.DATA_PATH,
"device": device,
}
val_data = IPDataset
# ------ save hyperparameters -------
os.makedirs(train_params["save_path"][-1], exist_ok=True)
with open(
os.path.join(train_params["save_path"][-1], "hyperparameters.txt"), "w"
) as file:
for key, value in subnet_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in it_net_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_data_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in val_data_params.items():
file.write(key + ": " + str(value) + "\n")
file.write("train_phases" + ": " + str(train_phases) + "\n")
# ------ construct network and train -----
subnet_tmp = subnet(**subnet_params).to(device)
it_net_tmp = IterativeNet(
subnet_tmp,
**{
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
}
).to(device)
it_net_tmp.load_state_dict(
torch.load(
"results/Radon_Tiramisu_jitter_v4_train_phase_1/model_weights.pt",
map_location=torch.device(device),
)
)
subnet = it_net_tmp.subnet
it_net = IterativeNet(subnet, **it_net_params).to(device)
train_data = train_data("train", **train_data_params)
val_data = val_data("val", **val_data_params)
for i in range(train_phases):
train_params_cur = {}
for key, value in train_params.items():
train_params_cur[key] = (
value[i] if isinstance(value, (tuple, list)) else value
)
print("Phase {}:".format(i + 1))
for key, value in train_params_cur.items():
print(key + ": " + str(value))
it_net.train_on(train_data, val_data, **train_params_cur)
```
#### File: robust-nets/fastmri-challenge/script_robustness_fourier_example_adv.py
```python
import os
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torchvision
from matplotlib import rc
from piq import psnr, ssim
from data_management import (
CropOrPadAndResimulate,
Flatten,
Normalize,
RandomMaskDataset,
filter_acquisition_no_fs,
)
from find_adversarial import err_measure_l2, grid_attack
from operators import rotate_real, to_complex
# ----- load configuration -----
import config # isort:skip
import config_robustness as cfg_rob # isort:skip
from config_robustness import methods # isort:skip
# ------ general setup ----------
device = cfg_rob.device
save_path = os.path.join(config.RESULTS_PATH, "attacks")
save_results = os.path.join(save_path, "challenge_example_V0S15_adv.pkl")
do_plot = True
save_plot = True
# ----- attack setup -----
# select samples
sample_vol = 0
sample_sl = 15
it_init = 6
keep_init = 3
# select range relative noise
noise_rel = torch.tensor([0.00, 0.025]).float().unique(sorted=True)
print(noise_rel)
# select measure for reconstruction error
err_measure = err_measure_l2
# select reconstruction methods
methods_include = ["L1", "Tiramisu jit"]
methods = methods.loc[methods_include]
# select methods excluded from (re-)performing attacks
methods_no_calc = ["L1", "Tiramisu jit"]
# adjust constrast
v_min = 0.05
v_max = 4.5
# ----- perform attack -----
# load data and select sample
test_data_params = {
"mask_func": cfg_rob.mask_func,
"filter": [filter_acquisition_no_fs],
"num_sym_slices": 0,
"multi_slice_gt": False,
"simulate_gt": True,
"keep_mask_as_func": True,
"transform": torchvision.transforms.Compose(
[
CropOrPadAndResimulate((368, 368)),
Flatten(0, -3),
Normalize(reduction="mean", use_target=False),
],
),
}
test_data = RandomMaskDataset
test_data = test_data("val", **test_data_params)
lo, hi = test_data.get_slices_in_volume(sample_vol)
print(
"volume slices from {} to {}, selected {}".format(lo, hi, lo + sample_sl)
)
X_VOL = to_complex(
torch.stack([test_data[sl_idx][2] for sl_idx in range(lo, hi)], dim=0)
).to(device)
X_MAX = rotate_real(X_VOL)[:, 0:1, ...].max().cpu()
X_0 = to_complex(test_data[lo + sample_sl][2].to(device)).unsqueeze(0)
X_0 = X_0.repeat(it_init, *((X_0.ndim - 1) * (1,)))
Y_0 = cfg_rob.OpA(X_0)
# create result table and load existing results from file
results = pd.DataFrame(
columns=[
"name",
"X_adv_err",
"X_ref_err",
"X_adv_psnr",
"X_ref_psnr",
"X_adv_ssim",
"X_ref_ssim",
"X_adv",
"X_ref",
"Y_adv",
"Y_ref",
]
)
results.name = methods.index
results = results.set_index("name")
# load existing results from file
if os.path.isfile(save_results):
results_save = pd.read_pickle(save_results)
for idx in results_save.index:
if idx in results.index:
results.loc[idx] = results_save.loc[idx]
else:
results_save = results
# perform attacks
for (idx, method) in methods.iterrows():
if idx not in methods_no_calc:
(
results.loc[idx].X_adv_err,
results.loc[idx].X_ref_err,
results.loc[idx].X_adv,
results.loc[idx].X_ref,
results.loc[idx].Y_adv,
results.loc[idx].Y_ref,
) = grid_attack(
method,
noise_rel,
X_0,
Y_0,
store_data=True,
keep_init=keep_init,
err_measure=err_measure,
)
results.loc[idx].X_adv_psnr = torch.zeros(len(noise_rel), X_0.shape[0])
results.loc[idx].X_ref_psnr = torch.zeros(len(noise_rel), X_0.shape[0])
results.loc[idx].X_adv_ssim = torch.zeros(len(noise_rel), X_0.shape[0])
results.loc[idx].X_ref_ssim = torch.zeros(len(noise_rel), X_0.shape[0])
for idx_noise in range(len(noise_rel)):
results.loc[idx].X_adv_psnr[idx_noise, ...] = psnr(
torch.clamp(
rotate_real(results.loc[idx].X_adv[idx_noise, ...])[
:, 0:1, ...
],
v_min,
v_max,
),
torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
data_range=v_max - v_min,
reduction="none",
)
results.loc[idx].X_ref_psnr[idx_noise, ...] = psnr(
torch.clamp(
rotate_real(results.loc[idx].X_ref[idx_noise, ...])[
:, 0:1, ...
],
v_min,
v_max,
),
torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
data_range=v_max - v_min,
reduction="none",
)
results.loc[idx].X_adv_ssim[idx_noise, ...] = ssim(
torch.clamp(
rotate_real(results.loc[idx].X_adv[idx_noise, ...])[
:, 0:1, ...
],
v_min,
v_max,
),
torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
data_range=v_max - v_min,
size_average=False,
)
results.loc[idx].X_ref_ssim[idx_noise, ...] = ssim(
torch.clamp(
rotate_real(results.loc[idx].X_ref[idx_noise, ...])[
:, 0:1, ...
],
v_min,
v_max,
),
torch.clamp(rotate_real(X_0.cpu())[:, 0:1, ...], v_min, v_max),
data_range=v_max - v_min,
size_average=False,
)
# save results
for idx in results.index:
results_save.loc[idx] = results.loc[idx]
os.makedirs(save_path, exist_ok=True)
results_save.to_pickle(save_results)
# select the worst example for each noise level and method (rel err)
results_max = pd.DataFrame(
columns=["name", "X_adv_err", "X_adv_psnr", "X_adv_ssim", "X_adv", "Y_adv"]
)
results_max.name = methods.index
results_max = results_max.set_index("name")
for (idx, method) in methods.iterrows():
_, idx_adv = results.loc[idx].X_adv_err.max(dim=1)
idx_noise = range(len(noise_rel))
results_max.loc[idx].X_adv_err = results.loc[idx].X_adv_err[
idx_noise, idx_adv, ...
]
results_max.loc[idx].X_adv_psnr = results.loc[idx].X_adv_psnr[
idx_noise, idx_adv, ...
]
results_max.loc[idx].X_adv_ssim = results.loc[idx].X_adv_ssim[
idx_noise, idx_adv, ...
]
results_max.loc[idx].X_adv = results.loc[idx].X_adv[
idx_noise, idx_adv, ...
]
results_max.loc[idx].Y_adv = results.loc[idx].Y_adv[
idx_noise, idx_adv, ...
]
# ----- plotting -----
def _implot(sub, im, vmin=v_min, vmax=v_max):
if im.shape[-3] == 2: # complex image
image = sub.imshow(
torch.sqrt(im.pow(2).sum(-3))[0, :, :].detach().cpu(),
vmin=vmin,
vmax=vmax,
)
else: # real image
image = sub.imshow(im[0, 0, :, :].detach().cpu(), vmin=vmin, vmax=vmax)
image.set_cmap("gray")
sub.set_xticks([])
sub.set_yticks([])
return image
if do_plot:
# LaTeX typesetting
rc("font", **{"family": "serif", "serif": ["Palatino"]})
rc("text", usetex=True)
X_0 = X_0.cpu()
Y_0 = Y_0.cpu()
# +++ ground truth +++
fig, ax = plt.subplots(clear=True, figsize=(2.5, 2.5), dpi=200)
im = _implot(ax, X_0)
if save_plot:
fig.savefig(
os.path.join(
save_path,
"fig_example_challenge_V{}S{}_adv_gt.pdf".format(
sample_vol, sample_sl
),
),
bbox_inches="tight",
pad_inches=0,
)
# method-wise plots
for (idx, method) in methods.iterrows():
# +++ reconstructions per noise level +++
for idx_noise in range(len(noise_rel)):
results_ref = results_max
X_cur = results_ref.loc[idx].X_adv[idx_noise, ...].unsqueeze(0)
# adv
fig, ax = plt.subplots(clear=True, figsize=(2.5, 2.5), dpi=200)
im = _implot(ax, X_cur)
ax.text(
360,
10,
"rel.~$\\ell_2$-err: {:.2f}\\%".format(
results_ref.loc[idx].X_adv_err[idx_noise].item() * 100
),
fontsize=10,
color="white",
horizontalalignment="right",
verticalalignment="top",
)
ax.text(
360,
30,
"PSNR: {:.2f}".format(
results_ref.loc[idx].X_adv_psnr[idx_noise].item()
),
fontsize=10,
color="white",
horizontalalignment="right",
verticalalignment="top",
)
ax.text(
360,
53,
"SSIM: {:.2f}".format(
results_ref.loc[idx].X_adv_ssim[idx_noise].item()
),
fontsize=10,
color="white",
horizontalalignment="right",
verticalalignment="top",
)
if save_plot:
fig.savefig(
os.path.join(
save_path,
"fig_example_challenge_V{}S{}_adv_".format(
sample_vol, sample_sl
)
+ method.info["name_save"]
+ "_{:.0e}".format(noise_rel[idx_noise].item())
+ ".pdf",
),
bbox_inches="tight",
pad_inches=0,
)
# not saved
fig.suptitle(
method.info["name_disp"]
+ " for rel. noise level = {:1.3f}".format(
noise_rel[idx_noise].item()
)
)
plt.show()
```
#### File: fastmri_utils/common/subsample.py
```python
import numpy as np
import torch
def create_mask_for_mask_type(mask_type_str, center_fractions, accelerations):
if mask_type_str == "random":
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == "equispaced":
return EquispacedMaskFunc(center_fractions, accelerations)
else:
raise Exception(f"{mask_type_str} not supported")
class MaskFunc:
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns
to be retained. If multiple values are provided, then one of
these numbers is chosen uniformly each time.
accelerations (List[int]): Amount of under-sampling. This should
have the same length as center_fractions. If multiple values
are provided, then one of these is chosen uniformly each time.
"""
if len(center_fractions) != len(accelerations):
raise ValueError(
"Number of center fractions should match number of "
"accelerations"
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def choose_acceleration(self):
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
return center_fraction, acceleration
class RandomMaskFunc(MaskFunc):
"""
RandomMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding to low-frequencies
2. The other columns are selected uniformly at random with a
probability equal to:
prob = (N / acceleration - N_low_freqs) / (N - N_low_freqs).
This ensures that the expected number of columns selected is equal to
(N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the RandomMaskFunc object is called.
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04],
then there is a 50% probability that 4-fold acceleration with 8% center
fraction is selected and a 50% probability that 8-fold acceleration with
4% center fraction is selected.
"""
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns
to be retained. If multiple values are provided, then one of
these numbers is chosen uniformly each time.
accelerations (List[int]): Amount of under-sampling. This should
have the same length as center_fractions. If multiple values
are provided, then one of these is chosen uniformly each time.
An acceleration of 4 retains 25% of the columns, but they may
not be spaced evenly.
"""
if len(center_fractions) != len(accelerations):
raise ValueError(
"Number of center fractions should match number of "
"accelerations"
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The
shape should have at least 3 dimensions. Samples are drawn
along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting
the seed ensures the same mask is generated each time for the
same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
self.rng.seed(seed)
num_cols = shape[-2]
center_fraction, acceleration = self.choose_acceleration()
# Create the mask
num_low_freqs = int(round(num_cols * center_fraction))
prob = (num_cols / acceleration - num_low_freqs) / (
num_cols - num_low_freqs
)
mask = self.rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
class EquispacedMaskFunc(MaskFunc):
"""
EquispacedMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding to low-frequencies
2. The other columns are selected with equal spacing at a proportion
that reaches the desired acceleration rate taking into consideration
the number of low frequencies. This ensures that the expected number
of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the EquispacedMaskFunc object is called.
"""
def __call__(self, shape, seed):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The
shape should have at least 3 dimensions. Samples are drawn
along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting
the seed ensures the same mask is generated each time for the
same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
self.rng.seed(seed)
center_fraction, acceleration = self.choose_acceleration()
num_cols = shape[-2]
num_low_freqs = int(round(num_cols * center_fraction))
# Create the mask
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
# Determine acceleration rate by adjusting for the number of low freqs
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (
num_low_freqs * acceleration - num_cols
)
offset = self.rng.randint(0, round(adjusted_accel))
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
```
#### File: robust-nets/mnist/networks.py
```python
import os
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import matplotlib.pyplot as plt
import pandas as pd
import torch
from tqdm import tqdm
from operators import accuracy, l2_error
# ----- ----- Abstract Base Network ----- -----
class InvNet(torch.nn.Module, metaclass=ABCMeta):
""" Abstract base class for networks solving linear inverse problems.
The network is intended for the denoising of a direct inversion of a 1D
signal from (noisy) linear measurements. The measurement model
y = Ax + noise
can be used to obtain an approximate reconstruction x_ from y using, e.g.,
the pseudo-inverse of A. The task of the network is either to directly
obtain x from y or denoise and improve this first inversion x_ towards x.
"""
def __init__(self):
super(InvNet, self).__init__()
@abstractmethod
def forward(self, z):
""" Applies the network to a batch of inputs z. """
pass
def freeze(self):
""" Freeze all model weights, i.e. prohibit further updates. """
for param in self.parameters():
param.requires_grad = False
@property
def device(self):
return next(self.parameters()).device
def _train_step(
self, batch_idx, batch, loss_func, optimizer, batch_size, acc_steps
):
inp, tar = batch
inp = inp.to(self.device)
tar = tar.to(self.device)
pred = self.forward(inp)
loss = loss_func(pred, tar) / acc_steps
loss.backward()
if (batch_idx // batch_size + 1) % acc_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss * acc_steps, inp, tar, pred
def _val_step(self, batch_idx, batch, loss_func):
inp, tar = batch
inp = inp.to(self.device)
tar = tar.to(self.device)
pred = self.forward(inp)
loss = loss_func(pred, tar)
return loss, inp, tar, pred
def _on_epoch_end(
self,
epoch,
save_epochs,
save_path,
logging,
loss,
inp,
tar,
pred,
v_loss,
v_inp,
v_tar,
v_pred,
val_data,
):
self._print_info()
logging = logging.append(
{
"loss": loss.item(),
"val_loss": v_loss.item(),
"rel_l2_error": l2_error(
pred, tar, relative=True, squared=False
)[0].item(),
"val_rel_l2_error": l2_error(
v_pred, v_tar, relative=True, squared=False
)[0].item(),
},
ignore_index=True,
sort=False,
)
print(logging.tail(1))
if (epoch + 1) % save_epochs == 0:
fig = self._create_figure(
logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
)
os.makedirs(save_path, exist_ok=True)
torch.save(
self.state_dict(),
os.path.join(
save_path, "model_weights_epoch{}.pt".format(epoch + 1)
),
)
logging.to_pickle(
os.path.join(
save_path, "losses_epoch{}.pkl".format(epoch + 1)
),
)
if fig is not None:
fig.savefig(
os.path.join(
save_path, "plot_epoch{}.png".format(epoch + 1)
),
bbox_inches="tight",
)
return logging
def _create_figure(
self, logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
):
""" Can be overwritten by child classes to plot training progress. """
pass
def _add_to_progress_bar(self, dict):
""" Can be overwritten by child classes to add to progress bar. """
return dict
def _on_train_end(self, save_path, logging):
os.makedirs(save_path, exist_ok=True)
torch.save(
self.state_dict(), os.path.join(save_path, "model_weights.pt")
)
logging.to_pickle(os.path.join(save_path, "losses.pkl"),)
def _print_info(self):
""" Can be overwritten by child classes to print at epoch end. """
pass
def train_on(
self,
train_data,
val_data,
num_epochs,
batch_size,
loss_func,
save_path,
save_epochs=50,
optimizer=torch.optim.Adam,
optimizer_params={"lr": 2e-4, "eps": 1e-3},
scheduler=torch.optim.lr_scheduler.StepLR,
scheduler_params={"step_size": 1, "gamma": 1.0},
acc_steps=1,
train_transform=None,
val_transform=None,
):
optimizer = optimizer(self.parameters(), **optimizer_params)
scheduler = scheduler(optimizer, **scheduler_params)
logging = pd.DataFrame(
columns=["loss", "val_loss", "rel_l2_error", "val_rel_l2_error"]
)
inp_train, tar_train = train_data
inp_val, tar_val = val_data
for epoch in range(num_epochs):
permutation = torch.randperm(inp_train.shape[0])
self.train() # make sure we are in train mode
t = tqdm(
range(0, inp_train.shape[0], batch_size),
desc="epoch {} / {}".format(epoch, num_epochs),
)
optimizer.zero_grad()
loss = 0.0
for i in t:
indices = permutation[i : i + batch_size]
batch = (
train_transform(inp_train[indices, ...])
if train_transform is not None
else inp_train[indices, ...],
tar_train[indices, ...],
)
loss_b, inp, tar, pred = self._train_step(
i, batch, loss_func, optimizer, batch_size, acc_steps
)
t.set_postfix(
**self._add_to_progress_bar({"loss": loss_b.item()})
)
with torch.no_grad():
loss += loss_b
loss /= -(-inp_train.shape[0] // batch_size)
with torch.no_grad():
self.eval() # make sure we are in eval mode
scheduler.step()
v_loss = 0.0
for ii in range(0, inp_val.shape[0], batch_size):
v_batch = (
val_transform(inp_val[ii : ii + batch_size])
if val_transform is not None
else inp_val[ii : ii + batch_size],
tar_val[ii : ii + batch_size],
)
v_loss_b, v_inp, v_tar, v_pred = self._val_step(
ii, v_batch, loss_func
)
v_loss += v_loss_b
v_loss /= -(-inp_val.shape[0] // batch_size)
logging = self._on_epoch_end(
epoch,
save_epochs,
save_path,
logging,
loss,
inp,
tar,
pred,
v_loss,
v_inp,
v_tar,
v_pred,
val_data,
)
self._on_train_end(save_path, logging)
return logging
# ----- ----- Iterative Networks ----- -----
class IterativeNet(InvNet):
def __init__(
self,
subnet,
operator,
inverter,
num_iter,
lam,
lam_learnable,
final_dc=True,
resnet_factor=1.0,
):
super(IterativeNet, self).__init__()
self.operator = operator
self.inverter = inverter
self.subnet = subnet
self.num_iter = num_iter
self.final_dc = final_dc
self.resnet_factor = resnet_factor
if not isinstance(lam, (list, tuple)):
lam = [lam] * num_iter
if not isinstance(lam_learnable, (list, tuple)):
lam_learnable = [lam_learnable] * len(lam)
self.lam = torch.nn.ParameterList(
[
torch.nn.Parameter(
torch.tensor(lam[it]), requires_grad=lam_learnable[it]
)
for it in range(len(lam))
]
)
def forward(self, inp):
xinv = self.inverter(inp)
for it in range(self.num_iter):
# subnet step
xinv = self.resnet_factor * xinv + self.subnet(xinv)
# data consistency step
if (self.final_dc) or (
(not self.final_dc) and it < self.num_iter - 1
):
xinv = xinv - self.lam[it] * self.operator.adj(
self.operator(xinv) - inp
)
return xinv
def _print_info(self):
print("Current lambda(s):")
print([self.lam[it].item() for it in range(len(self.lam))])
print([self.lam[it].requires_grad for it in range(len(self.lam))])
def _create_figure(
self, logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
):
def _implot(sub, im):
im = im.reshape(-1, 1, 28, 28) # vec to im
return sub.imshow(im[0, 0, :, :].detach().cpu())
fig, subs = plt.subplots(2, 3, clear=True, num=1, figsize=(8, 6))
# inv = self.inverter(inp)
v_inv = self.inverter(v_inp)
# training and validation loss
subs[0, 0].set_title("losses")
subs[0, 0].semilogy(logging["loss"], label="train")
subs[0, 0].semilogy(logging["val_loss"], label="val")
subs[0, 0].legend()
# validation input
p10 = _implot(subs[1, 0], v_inv)
subs[1, 0].set_title("val inv")
plt.colorbar(p10, ax=subs[1, 0])
# validation output
p01 = _implot(subs[0, 1], v_pred)
subs[0, 1].set_title(
"val output:\n ||x0-xrec||_2 / ||x0||_2\n = "
"{:1.2e}".format(logging["val_rel_l2_error"].iloc[-1])
)
plt.colorbar(p01, ax=subs[0, 1])
# validation target
p11 = _implot(subs[1, 1], v_tar)
subs[1, 1].set_title("val target")
plt.colorbar(p11, ax=subs[1, 1])
# validation difference
p12 = _implot(subs[1, 2], v_pred - v_tar)
subs[1, 2].set_title("val diff: x0 - x_pred")
plt.colorbar(p12, ax=subs[1, 2])
# training output
p02 = _implot(subs[0, 2], pred)
subs[0, 2].set_title(
"train output:\n ||x0-xrec||_2 / ||x0||_2\n = "
"{:1.2e}".format(logging["rel_l2_error"].iloc[-1])
)
plt.colorbar(p02, ax=subs[0, 2])
return fig
# ----- ----- U-Net ----- -----
class UNet(InvNet):
""" U-Net implementation.
Based on https://github.com/mateuszbuda/brain-segmentation-pytorch/
and modified in agreement with their licence:
-----
MIT License
Copyright (c) 2019 mateuszbuda
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
def __init__(
self, in_channels=1, out_channels=1, base_features=32, drop_factor=0.0,
):
# set properties of UNet
super(UNet, self).__init__()
self.encoder1 = UNet._conv_block(
in_channels,
base_features,
drop_factor=drop_factor,
block_name="encoding_1",
)
self.pool1 = torch.nn.MaxPool1d(kernel_size=2, stride=2)
self.encoder2 = UNet._conv_block(
base_features,
base_features * 2,
drop_factor=drop_factor,
block_name="encoding_2",
)
self.pool2 = torch.nn.MaxPool1d(kernel_size=2, stride=2)
self.encoder3 = UNet._conv_block(
base_features * 2,
base_features * 4,
drop_factor=drop_factor,
block_name="encoding_3",
)
self.pool3 = torch.nn.MaxPool1d(kernel_size=2, stride=2)
self.encoder4 = UNet._conv_block(
base_features * 4,
base_features * 8,
drop_factor=drop_factor,
block_name="encoding_4",
)
self.pool4 = torch.nn.MaxPool1d(kernel_size=2, stride=2)
self.bottleneck = UNet._conv_block(
base_features * 8,
base_features * 16,
drop_factor=drop_factor,
block_name="bottleneck",
)
self.upconv4 = torch.nn.ConvTranspose1d(
base_features * 16, base_features * 8, kernel_size=2, stride=2,
)
self.decoder4 = UNet._conv_block(
base_features * 16,
base_features * 8,
drop_factor=drop_factor,
block_name="decoding_4",
)
self.upconv3 = torch.nn.ConvTranspose1d(
base_features * 8, base_features * 4, kernel_size=2, stride=2
)
self.decoder3 = UNet._conv_block(
base_features * 8,
base_features * 4,
drop_factor=drop_factor,
block_name="decoding_3",
)
self.upconv2 = torch.nn.ConvTranspose1d(
base_features * 4, base_features * 2, kernel_size=2, stride=2
)
self.decoder2 = UNet._conv_block(
base_features * 4,
base_features * 2,
drop_factor=drop_factor,
block_name="decoding_2",
)
self.upconv1 = torch.nn.ConvTranspose1d(
base_features * 2, base_features, kernel_size=2, stride=2
)
self.decoder1 = UNet._conv_block(
base_features * 2,
base_features,
drop_factor=drop_factor,
block_name="decoding_1",
)
self.outconv = torch.nn.Conv1d(
in_channels=base_features,
out_channels=out_channels,
kernel_size=1,
)
def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
dec4 = self.upconv4(bottleneck)
dec4 = torch.cat((dec4, enc4), dim=1)
dec4 = self.decoder4(dec4)
dec3 = self.upconv3(dec4)
dec3 = torch.cat((dec3, enc3), dim=1)
dec3 = self.decoder3(dec3)
dec2 = self.upconv2(dec3)
dec2 = torch.cat((dec2, enc2), dim=1)
dec2 = self.decoder2(dec2)
dec1 = self.upconv1(dec2)
dec1 = torch.cat((dec1, enc1), dim=1)
dec1 = self.decoder1(dec1)
return self.outconv(dec1)
@staticmethod
def _conv_block(in_channels, out_channels, drop_factor, block_name):
return torch.nn.Sequential(
OrderedDict(
[
(
block_name + "conv1",
torch.nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
bias=True,
),
),
(block_name + "bn_1", torch.nn.BatchNorm1d(out_channels)),
(block_name + "relu1", torch.nn.ReLU(True)),
(block_name + "dr1", torch.nn.Dropout(p=drop_factor)),
(
block_name + "conv2",
torch.nn.Conv1d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
bias=True,
),
),
(block_name + "bn_2", torch.nn.BatchNorm1d(out_channels)),
(block_name + "relu2", torch.nn.ReLU(True)),
(block_name + "dr2", torch.nn.Dropout(p=drop_factor)),
]
)
)
# ----- ----- Tiramisu Network ----- -----
class Tiramisu(InvNet):
""" Tiramisu network implementation.
Based on https://github.com/bfortuner/pytorch_tiramisu
and modified in agreement with their licence:
-----
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
def __init__(
self,
in_channels=1,
out_channels=1,
drop_factor=0.0,
down_blocks=(5, 5, 5, 5, 5),
up_blocks=(5, 5, 5, 5, 5),
pool_factors=(2, 2, 2, 2, 2),
bottleneck_layers=5,
growth_rate=8,
out_chans_first_conv=16,
):
super(Tiramisu, self).__init__()
self.down_blocks = down_blocks
self.up_blocks = up_blocks
# init counts of channels
cur_channels_count = 0
skip_connection_channel_counts = []
# First Convolution
self.bn_layer = torch.nn.BatchNorm1d(out_chans_first_conv)
self.add_module(
"firstconv",
torch.nn.Conv1d(
in_channels=in_channels,
out_channels=out_chans_first_conv,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
)
cur_channels_count = out_chans_first_conv
# Downsampling path
self.denseBlocksDown = torch.nn.ModuleList([])
self.transDownBlocks = torch.nn.ModuleList([])
for i in range(len(self.down_blocks)):
self.denseBlocksDown.append(
Tiramisu._DenseBlock(
cur_channels_count,
growth_rate,
self.down_blocks[i],
drop_factor,
)
)
cur_channels_count += growth_rate * self.down_blocks[i]
skip_connection_channel_counts.insert(0, cur_channels_count)
self.transDownBlocks.append(
Tiramisu._TransitionDown(
cur_channels_count, drop_factor, pool_factors[i]
)
)
# Bottleneck
self.add_module(
"bottleneck",
Tiramisu._Bottleneck(
cur_channels_count,
growth_rate,
bottleneck_layers,
drop_factor,
),
)
prev_block_channels = growth_rate * bottleneck_layers
cur_channels_count += prev_block_channels
# Upsampling path
self.transUpBlocks = torch.nn.ModuleList([])
self.denseBlocksUp = torch.nn.ModuleList([])
for i in range(len(up_blocks) - 1):
self.transUpBlocks.append(
Tiramisu._TransitionUp(
prev_block_channels,
prev_block_channels,
pool_factors[-i - 1],
)
)
cur_channels_count = (
prev_block_channels + skip_connection_channel_counts[i]
)
self.denseBlocksUp.append(
Tiramisu._DenseBlock(
cur_channels_count,
growth_rate,
up_blocks[i],
drop_factor,
upsample=True,
)
)
prev_block_channels = growth_rate * self.up_blocks[i]
cur_channels_count += prev_block_channels
# Final DenseBlock
self.transUpBlocks.append(
Tiramisu._TransitionUp(
prev_block_channels, prev_block_channels, pool_factors[0]
)
)
cur_channels_count = (
prev_block_channels + skip_connection_channel_counts[-1]
)
self.denseBlocksUp.append(
Tiramisu._DenseBlock(
cur_channels_count,
growth_rate,
self.up_blocks[-1],
drop_factor,
upsample=False,
)
)
cur_channels_count += growth_rate * self.up_blocks[-1]
# Final Conv layer
self.finalConv = torch.nn.Conv1d(
in_channels=cur_channels_count,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
def forward(self, x):
out = self.bn_layer(self.firstconv((x)))
skip_connections = []
for i in range(len(self.down_blocks)):
out = self.denseBlocksDown[i](out)
skip_connections.append(out)
out = self.transDownBlocks[i](out)
out = self.bottleneck(out)
for i in range(len(self.up_blocks)):
skip = skip_connections.pop()
out = self.transUpBlocks[i](out, skip)
out = self.denseBlocksUp[i](out)
out = self.finalConv(out)
return out
# ----- Blocks for Tiramisu -----
class _DenseLayer(torch.nn.Sequential):
def __init__(self, in_channels, growth_rate, p):
super().__init__()
self.add_module("bn", torch.nn.BatchNorm1d(in_channels))
self.add_module("relu", torch.nn.ReLU(True))
self.add_module(
"conv",
torch.nn.Conv1d(
in_channels,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=True,
),
)
self.add_module("drop", torch.nn.Dropout(p=p))
def forward(self, x):
return super().forward(x)
class _DenseBlock(torch.nn.Module):
def __init__(
self, in_channels, growth_rate, n_layers, p, upsample=False
):
super().__init__()
self.upsample = upsample
self.layers = torch.nn.ModuleList(
[
Tiramisu._DenseLayer(
in_channels + i * growth_rate, growth_rate, p
)
for i in range(n_layers)
]
)
def forward(self, x):
if self.upsample:
new_features = []
# we pass all previous activations to each dense layer normally
# But we only store each layer's output in the new_features
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], dim=1)
new_features.append(out)
return torch.cat(new_features, dim=1)
else:
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], dim=1) # 1 = channel axis
return x
class _TransitionDown(torch.nn.Sequential):
def __init__(self, in_channels, p, pool_factor):
super().__init__()
self.add_module("bn", torch.nn.BatchNorm1d(in_channels))
self.add_module("relu", torch.nn.ReLU(inplace=True))
self.add_module(
"conv",
torch.nn.Conv1d(
in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0,
bias=True,
),
)
self.add_module("drop", torch.nn.Dropout(p))
self.add_module(
"maxpool",
torch.nn.MaxPool1d(
kernel_size=pool_factor, stride=pool_factor
),
)
def forward(self, x):
return super().forward(x)
class _TransitionUp(torch.nn.Module):
def __init__(self, in_channels, out_channels, pool_factor):
super().__init__()
self.convTrans = torch.nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=pool_factor,
padding=0,
bias=True,
)
def forward(self, x, skip):
out = self.convTrans(x)
out = Tiramisu._center_crop(out, skip.size(2))
out = torch.cat([out, skip], dim=1)
return out
class _Bottleneck(torch.nn.Sequential):
def __init__(self, in_channels, growth_rate, n_layers, p):
super().__init__()
self.add_module(
"bottleneck",
Tiramisu._DenseBlock(
in_channels, growth_rate, n_layers, p, upsample=True
),
)
def forward(self, x):
return super().forward(x)
def _center_crop(layer, max_width):
_, _, w = layer.size()
c = (w - max_width) // 2
return layer[:, :, c : (c + max_width)]
# ----- Classification Network -----
# we reuse the InvNet class as a base and overwrite everything that does not
# apply to the classification setting
class Simple2DConvNet(InvNet):
def __init__(self, drop_factor=0.5):
super(Simple2DConvNet, self).__init__()
self.net = torch.nn.Sequential(
OrderedDict(
[
("conv1", torch.nn.Conv2d(1, 32, (3, 3))),
("relu1", torch.nn.ReLU(True)),
("conv2", torch.nn.Conv2d(32, 32, (3, 3))),
("relu2", torch.nn.ReLU(True)),
(
"pool1",
torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2),
),
("conv3", torch.nn.Conv2d(32, 64, (3, 3))),
("relu3", torch.nn.ReLU(True)),
("conv4", torch.nn.Conv2d(64, 64, (3, 3))),
("relu4", torch.nn.ReLU(True)),
(
"pool2",
torch.nn.MaxPool2d(kernel_size=(2, 2), stride=2),
),
("flatten", torch.nn.Flatten()),
("linear1", torch.nn.Linear(1024, 200)),
("dropout1", torch.nn.Dropout(drop_factor)),
("relu5", torch.nn.ReLU(True)),
("linear2", torch.nn.Linear(200, 200)),
("dropout2", torch.nn.Dropout(drop_factor)),
("relu6", torch.nn.ReLU(True)),
("linear6", torch.nn.Linear(200, 10)),
]
)
)
def forward(self, x):
return self.net(x)
def _on_epoch_end(
self,
epoch,
save_epochs,
save_path,
logging,
loss,
inp,
tar,
pred,
v_loss,
v_inp,
v_tar,
v_pred,
val_data,
):
self._print_info()
logging = logging.append(
{
"loss": loss.item(),
"val_loss": v_loss.item(),
"accuracy": accuracy(pred, tar).item(),
"val_accuracy": accuracy(v_pred, v_tar).item(),
},
ignore_index=True,
sort=False,
)
print(logging.tail(1))
if (epoch + 1) % save_epochs == 0:
fig = self._create_figure(
logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
)
os.makedirs(save_path, exist_ok=True)
torch.save(
self.state_dict(),
os.path.join(
save_path, "model_weights_epoch{}.pt".format(epoch + 1)
),
)
logging.to_pickle(
os.path.join(
save_path, "losses_epoch{}.pkl".format(epoch + 1)
),
)
if fig is not None:
fig.savefig(
os.path.join(
save_path, "plot_epoch{}.png".format(epoch + 1)
),
bbox_inches="tight",
)
return logging
def train_on(
self,
train_data,
val_data,
num_epochs,
batch_size,
loss_func,
save_path,
save_epochs=50,
optimizer=torch.optim.Adam,
optimizer_params={"lr": 2e-4, "eps": 1e-3},
scheduler=torch.optim.lr_scheduler.StepLR,
scheduler_params={"step_size": 1, "gamma": 1.0},
acc_steps=1,
train_transform=None,
val_transform=None,
):
optimizer = optimizer(self.parameters(), **optimizer_params)
scheduler = scheduler(optimizer, **scheduler_params)
logging = pd.DataFrame(
columns=["loss", "val_loss", "accuracy", "val_accuracy"]
)
inp_train, tar_train = train_data
inp_val, tar_val = val_data
for epoch in range(num_epochs):
permutation = torch.randperm(inp_train.shape[0])
self.train() # make sure we are in train mode
t = tqdm(
range(0, inp_train.shape[0], batch_size),
desc="epoch {} / {}".format(epoch, num_epochs),
)
optimizer.zero_grad()
loss = 0.0
for i in t:
indices = permutation[i : i + batch_size]
batch = (
train_transform(inp_train[indices, ...])
if train_transform is not None
else inp_train[indices, ...],
tar_train[indices, ...],
)
loss_b, inp, tar, pred = self._train_step(
i, batch, loss_func, optimizer, batch_size, acc_steps
)
t.set_postfix(
**self._add_to_progress_bar({"loss": loss_b.item()})
)
loss += loss_b
loss /= -(-inp_train.shape[0] // batch_size)
with torch.no_grad():
self.eval() # make sure we are in eval mode
scheduler.step()
v_loss = 0.0
for ii in range(0, inp_val.shape[0], batch_size):
v_batch = (
val_transform(inp_val[ii : ii + batch_size])
if val_transform is not None
else inp_val[ii : ii + batch_size],
tar_val[ii : ii + batch_size],
)
v_loss_b, v_inp, v_tar, v_pred = self._val_step(
ii, v_batch, loss_func
)
v_loss += v_loss_b
v_loss /= -(-inp_val.shape[0] // batch_size)
logging = self._on_epoch_end(
epoch,
save_epochs,
save_path,
logging,
loss,
inp,
tar,
pred,
v_loss,
v_inp,
v_tar,
v_pred,
val_data,
)
self._on_train_end(save_path, logging)
return logging
def _create_figure(
self, logging, loss, inp, tar, pred, v_loss, v_inp, v_tar, v_pred
):
def _implot(sub, im):
return sub.imshow(im[0, 0, :, :].detach().cpu())
fig, subs = plt.subplots(2, 2, clear=True, num=1, figsize=(8, 6))
# training and validation loss
subs[0, 0].set_title("losses")
subs[0, 0].semilogy(logging["loss"], label="train")
subs[0, 0].semilogy(logging["val_loss"], label="val")
subs[0, 0].legend()
# training and validation accuracy
subs[1, 0].set_title("accuracies")
subs[1, 0].plot(logging["accuracy"], label="train")
subs[1, 0].plot(logging["val_accuracy"], label="val")
subs[1, 0].legend()
# validation and training example
_implot(subs[0, 1], v_inp)
subs[0, 1].set_title(
"val inp {} | {}".format(torch.argmax(v_pred, dim=-1)[0], v_tar[0])
)
_implot(subs[1, 1], inp)
subs[1, 1].set_title(
"train inp {} | {}".format(torch.argmax(pred, dim=-1)[0], tar[0])
)
return fig
```
#### File: robust-nets/tvsynth/reconstruction_methods.py
```python
import torch
from tqdm import tqdm
from operators import prox_l2_constraint_conjugate, shrink
# ----- Iterative reconstruction algorithms -----
def primaldual(
y,
OpA,
OpW,
c0,
eta,
y0=None,
iter=20,
sigma=0.5,
tau=0.5,
theta=1.0,
silent=False,
report_pd_gap=False,
):
""" Primal Dual algorithm.
Reconstruction algorithm for the inverse problem y = Ax + e under the
synthesis model x=Wc for some sparse coefficients c and ||e||_2 <= eta.
min ||c||_1 s.t. ||AWc - y||_2 <= eta
Basic iteration steps are
1) y_ = prox_l2_constraint_conjugate(y_+sig*AWc_, sigma*y, sigma*eta)
2) c = shrink(c-tau*W'A'y_, tau)
3) c_ = c + theta*(c - cold)
Parameters
----------
y : torch.Tensor
The measurement vector.
OpA : operators.LinearOperator
The measurement operator, providing A and A'.
OpW : operators.LinearOperator
The synthesis operator, providing W and W'.
c0 : torchTensor
Initial guess for the coefficients, typically torch.zeros(...) of
appropriate size.
y0 : torchTensor
Initial guess for the dual variable, typically zeros(...)
eta : float
The measurement noise level, specifying the constraint.
iter : int, optional
Number of primal dual iterations. (Default 20)
sigma : float, optional
Step size parameter, should satisfy sigma*tau*||AW||_2^2 < 1.
(Default 0.5)
tau : float, optional
Step size parameter, should satisfy sigma*tau*||AW||_2^2 < 1.
(Default 0.5)
theta : float, optional
DR parameter, arbitrary in [0,1]. (Default 0.5)
silent : bool, optional
Disable progress bar. (Default False)
report_pd_gap : bool, optional
Report pd-gap at the end.
Returns
-------
tensor
The recovered signal x=Wc and coefficients c.
"""
# we do not explicitly check for sig*tau*||AW||_2^2 < 1 and trust that
# the user read the documentation ;)
if y0 is None:
y0 = torch.zeros_like(y)
# helper functions for primal-dual gap
def F(_y):
return ((_y - y).norm(p=2, dim=-1) > (eta + 1e-2)) * 1e4
def Fstar(_y):
return eta * _y.norm(p=2, dim=-1) + (y * _y).sum(dim=-1)
def Gstar(_y):
return ((torch.max(torch.abs(_y), dim=-1))[0] > (1.0 + 1e-2)) * 1e4
# init iteration variables
c = c0.clone()
c_ = c.clone()
y_ = y0.clone()
# run main primal dual iterations
for it in tqdm(range(iter), desc="Primal-Dual iterations", disable=silent):
# primal dual step 1)
y_ = prox_l2_constraint_conjugate(
y_ + sigma * OpA(OpW(c_)), sigma * y, sigma * eta
)
# primal dual step 2)
cold, c = c, shrink(c - tau * OpW.adj(OpA.adj(y_)), tau)
# primal dual step 3
c_ = c + theta * (c - cold)
# compute primal dual gap
if report_pd_gap:
E = (
F(OpA(OpW(c_)))
+ c_.abs().sum(dim=-1)
+ Fstar(y_)
+ Gstar(-OpW.adj(OpA.adj(y_)))
)
print("\n\n Primal Dual Gap: \t {:1.4e} \n\n".format(E.abs().max()))
return OpW(c_), c_, y_
```
#### File: robust-nets/tvsynth/script_train_unet_jitter.py
```python
import os
import matplotlib as mpl
import torch
from data_management import Jitter, load_dataset
from networks import IterativeNet, UNet
from operators import TVAnalysis, get_tikhonov_matrix
# --- load configuration -----
import config # isort:skip
# ----- general setup -----
mpl.use("agg")
device = torch.device("cuda:0")
# ----- operators -----
OpA = config.meas_op(config.m, config.n, device=device, **config.meas_params)
OpTV = TVAnalysis(config.n, device=device)
# ----- build linear inverter ------
reg_fac = 2e-2
inverter = torch.nn.Linear(OpA.m, OpA.n, bias=False)
inverter.weight.requires_grad = False
inverter.weight.data = get_tikhonov_matrix(OpA, OpTV, reg_fac)
# ----- network configuration -----
subnet_params = {
"in_channels": 1,
"out_channels": 1,
"drop_factor": 0.0,
"base_features": 64,
}
subnet = UNet
it_net_params = {
"operator": OpA,
"inverter": inverter,
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
}
# ----- training setup ------
mse_loss = torch.nn.MSELoss(reduction="sum")
def loss_func(pred, tar):
return mse_loss(pred, tar) / pred.shape[0]
train_phases = 2
train_params = {
"num_epochs": [200, 75],
"batch_size": [40, 40],
"loss_func": loss_func,
"save_path": [
os.path.join(
config.RESULTS_PATH,
"unet_jitter_"
"train_phase_{}".format((i + 1) % (train_phases + 1)),
)
for i in range(train_phases + 1)
],
"save_epochs": 1,
"optimizer": torch.optim.Adam,
"optimizer_params": [
{"lr": 8e-5, "eps": 1e-5, "weight_decay": 5e-3},
{"lr": 5e-5, "eps": 1e-5, "weight_decay": 5e-3},
],
"scheduler": torch.optim.lr_scheduler.StepLR,
"scheduler_params": {"step_size": 1, "gamma": 1.0},
"acc_steps": [1, 200],
"train_transform": Jitter(2e0, 0.0, 1.0),
}
# -----data prep -----
X_train, C_train, Y_train = [
tmp.unsqueeze(-2).to(device)
for tmp in load_dataset(config.set_params["path"], subset="train")
]
X_val, C_val, Y_val = [
tmp.unsqueeze(-2).to(device)
for tmp in load_dataset(config.set_params["path"], subset="val")
]
# ------ save hyperparameters -------
os.makedirs(train_params["save_path"][-1], exist_ok=True)
with open(
os.path.join(train_params["save_path"][-1], "hyperparameters.txt"), "w"
) as file:
for key, value in subnet_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in it_net_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_params.items():
file.write(key + ": " + str(value) + "\n")
file.write("train_phases" + ": " + str(train_phases) + "\n")
# ------ construct network and train -----
subnet = subnet(**subnet_params).to(device)
it_net = IterativeNet(subnet, **it_net_params).to(device)
for i in range(train_phases):
train_params_cur = {}
for key, value in train_params.items():
train_params_cur[key] = (
value[i] if isinstance(value, (tuple, list)) else value
)
print("Phase {}:".format(i + 1))
for key, value in train_params_cur.items():
print(key + ": " + str(value))
it_net.train_on((Y_train, X_train), (Y_val, X_val), **train_params_cur)
``` |
{
"source": "jmaces/statstream",
"score": 3
} |
#### File: src/statstream/approximate.py
```python
import numpy as np
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from tqdm import tqdm
from .exact import streaming_mean
def _truncated_svd(A, rank=None):
"""Private helper function for calculating truncated singular value
decompositions.
Given a matrix A and a rank K computes the truncated singular value
decomposition U, S, V of A where the K largest singular values are kept and
the rest are truncated to zero. If A has shape [N, M] then U, S, V have
shapes [N, K], [K], [M, K] respectively, and U @ np.diag(S) @ V.T is the
best rank K approximation to A with respect to the spectral norm.
This function internally uses standard numpy and scipy routines for
calculating the SVD, it merely serves as a `syntactic sugar` container
for deciding which subroutines to use depending on the shape of A and
the rank K.
Parameters
----------
A : array
The matrix to decompose.
rank : int
Number of singular values to keep for the truncated SVD.
Returns
-------
S : array
(Truncated) singular values of A.
U, V : array
matrix factors of the (truncated) singular value decomposition of A.
"""
if not rank:
rank = np.min(A.shape)
if rank < np.min(A.shape):
# use truncated SVD if rank is reduced
U, S, VT = svds(A.astype(np.float64), rank)
else:
# use full SVD otherwise
U, S, VT = svd(A.astype(np.float64), full_matrices=False)
V = VT.T
return U, S, V
def _merge_low_rank_eigendecomposition(S1, V1, S2, V2, rank=None):
"""Private helper function for merging SVD based low rank approximations.
Given factors S1, V1 and S2, V2 of shapes [K1], [M, K1] and [K2], [M, K2]
respectively of singular value decompositions
A1 = U1 @ np.diag(S1) @ V1.T
A2 = U2 @ np.diag(S2) @ V2.T
merge them into factors S, V of shape [K], [M, K] of an approximate
decomposition A = U @ np.diag(S) @ V.T, where A is the concatenation of A1
and A2 along the first axis. This is done without the need of calculating
U1, U2, and U.
This is useful for merging eigendecompositions V @ np.diag(S**2) @ V.T of
autocorrelation (or similarly covariance) matrices A.T @ A that do not
require U. Using truncated singular value decompositons can be used for
merging low rank approximations.
Parameters
----------
S1 : array
Singular values of first matrix.
V1 : array
Factor of the singular value decomposition of first matrix.
S2 : array
Singular values of second matrix.
V2 : array
Factor of the singular value decomposition of second matrix.
rank : int
Number of singular values to keep after merging. If set to `None`
no truncation will be done, thus rank will equal the sum of
singular values given in S1 and S2.
Returns
-------
S : array
(Truncated) singular values of the singular value decomposition of
concatenated matrix.
V : array
Factor of the singular value decomposition of concatenated matrix.
Notes
-----
The algorithm for combining SVD based low rank approximations is
described in more detail in [1]_.
References
----------
.. [1] <NAME>,
"Scalability of Semantic Analysis in Natural Language Processing",
2011.
"""
rank1, rank2 = S1.size, S2.size
if not rank or rank > rank1 + rank2:
rank = rank1 + rank2
if rank > min(V1.shape[0], V2.shape[0]):
rank = min(V1.shape[0], V2.shape[0])
Z = np.matmul(V1.T, V2)
Q, R = np.linalg.qr(V2 - np.matmul(V1, Z), mode="reduced")
Zfill = np.zeros([rank2, rank1])
B = np.concatenate(
[
np.concatenate([np.diag(S1), np.matmul(Z, np.diag(S2))], axis=1),
np.concatenate([Zfill, np.matmul(R, np.diag(S2))], axis=1),
],
axis=0,
)
U, S, VT = _truncated_svd(B, rank=rank)
V = np.matmul(V1, U[:rank1, :]) + np.matmul(Q, U[rank1:, :])
return S, V
def streaming_low_rank_autocorrelation(
X, rank, steps=None, shift=0.0, tree=False
):
"""Low rank factorization of the sample autocorrelation matrix of a
streaming dataset.
Computes a factorization of the autocorrelation matrix of a dataset from
a stream of batches of samples. If the full data set was given in a matrix
``A`` of shape ``[N, M]``, where ``N`` is the number of data samples and
``M`` is the dimensionality of each sample, then the autocorrelation matrix
is ``1/(N-1)*A.T @ A`` and of shape ``[M, M]``.
The function computes a matrix ``L`` of shape ``[M, K]`` such that
``L.T @ L`` is an approximation of the autocorrelation matrix of rank at
most ``K``.
This is done from a stream of sample batches without ever explicitly
forming matrices of the full shape ``[M, M]``. Batches can be combined in a
*linear* streaming way (which gives more relative weight to later batches)
or in *binary tree* mode, where batches are combined pairwise, then the
results are combined again pairwise and so on (this leads to an additional
memory requirement of a factor of ``log(N)``).
The data has to be provided by an iterator yielding batches of samples.
Either a number of steps can be specified, or the iterator is assumed to
be emptied in a finite number of steps. In the first case only the given
number of batches is extracted from the iterator and used for the
correlation calculation, even if the iterator could yield more data.
Samples are given along the first axis. The correlation has the squared
shape as the remaining axes, e.g. batches of shape
``[batch_size, d1, ..., dN]`` will result in a correlation factor of shape
``[K, d1, ..., dN]``.
This function consumes an iterator, thus finite iterators will be empty
after a call to this function, unless ``steps`` is set to a smaller number
than batches in the iterator.
Parameters
----------
X : iterable
An iterator yielding the batches of samples.
rank : int
The maximal rank of the approximate decomposition factor.
steps : int, optional
The number of batches to use from the iterator (all available batches
are used if set to `None`). The defaul is `None`.
shift : array, optional
Apply a shift of data samples before calculating correlations, that is
use (X-shift) instead of X (must be broadcastable to the shape of
batches from X). The default is 0.0, that is no shift is used.
tree : bool, optional
Use the binary tree mode to combine batches more evenly at the cost of
additional memory requirement. The default is `False`.
Returns
-------
array
A low-rank factor of a symmetric decomposition of the autocorrelation
matrix of the seen data.
Notes
-----
The algorithm for combining SVD based low rank approximations is
described in more detail in [1]_.
References
----------
.. [1] <NAME>,
"Scalability of Semantic Analysis in Natural Language Processing",
2011.
"""
def _process_batch(batch, S, V, rank, count):
batch_size = batch.shape[0]
Ub, Sb, Vb = _truncated_svd(np.reshape(batch, [batch_size, -1]), rank,)
if S is None or V is None:
S, V = Sb, Vb
else:
S, V = _merge_low_rank_eigendecomposition(S, V, Sb, Vb, rank=rank)
count += batch_size
return S, V, count
def _tree_process_batch(batch, stack, rank, count):
batch_size = batch.shape[0]
Ub, Sb, Vb = _truncated_svd(np.reshape(batch, [batch_size, -1]), rank,)
stack.append({"S": Sb, "V": Vb, "level": 0})
while len(stack) >= 2 and stack[-1]["level"] == stack[-2]["level"]:
item1, item2 = stack.pop(), stack.pop()
S, V = _merge_low_rank_eigendecomposition(
item1["S"], item1["V"], item2["S"], item2["V"], rank=rank
)
stack.append({"S": S, "V": V, "level": item1["level"] + 1})
count += batch_size
return stack, count
if tree:
stack, count = [], 0
if steps:
for step in tqdm(range(steps), "autocorrelation approximation"):
batch = next(X)
if isinstance(batch, tuple) and len(batch) > 1:
batch = batch[0]
stack, count = _tree_process_batch(
batch - shift, stack, rank, count
)
else:
for batch in tqdm(X, "autocorrelation approximation"):
if isinstance(batch, tuple) and len(batch) > 1:
batch = batch[0]
stack, count = _tree_process_batch(
batch - shift, stack, rank, count
)
while len(stack) >= 2:
item1, item2 = stack.pop(), stack.pop()
S, V = _merge_low_rank_eigendecomposition(
item1["S"], item1["V"], item2["S"], item2["V"], rank
)
stack.append({"S": S, "V": V, "level": item1["level"] + 1})
S, V = stack[0]["S"], stack[0]["V"]
else:
S, V, count = None, None, 0
if steps:
for step in tqdm(range(steps), "autocorrelation approximation"):
batch = next(X)
if isinstance(batch, tuple) and len(batch) > 1:
batch = batch[0]
S, V, count = _process_batch(batch - shift, S, V, rank, count)
else:
for batch in tqdm(X, "autocorrelation approximation"):
if isinstance(batch, tuple) and len(batch) > 1:
batch = batch[0]
S, V, count = _process_batch(batch - shift, S, V, rank, count)
factor = V * np.expand_dims(S, 0)
return np.reshape(factor.T, (S.size,) + batch.shape[1:]) / np.sqrt(
count - 1
)
def streaming_low_rank_cov(X, rank, steps=None, tree=False, reset=None):
"""Low rank factorization of the covariance matrix of a streaming dataset.
Computes a factorization of the covariance matrix of a dataset from a
stream of batches of samples.
If the full data set was given in a matrix ``A`` of shape ``[N, M]``, where
``N`` is the number of data samples and ``M`` is the dimensionality of each
sample, then the covariance matrix is
``1/(N-1)*(A-mean(A)).T @ (A-mean(A))`` and of shape ``[M, M]``.
The function computes a matrix ``L`` of shape ``[M, K]`` such that
``L.T @ L`` is an approximation of the covariance matrix of rank at most
``K``.
This is done in a two-pass algorithm that first computes the mean from a
stream of batches and then the covariance using
`streaming_low_rank_autocorrelation` shifted by the precomputed mean.
This is done from a stream of sample batches without ever explicitly
forming matrices of the full shape ``[M, M]``. Batches can be combined in a
*linear* streaming way (which gives more relative weight to later batches)
or in *binary tree* mode, where batches are combined pairwise, then the
results are combined again pairwise and so on (this leads to an additional
memory requirement of a factor of ``log(N)``).
The data has to be provided by an iterator yielding batches of samples.
Either a number of steps can be specified, or the iterator is assumed to
be emptied in a finite number of steps. In the first case only the given
number of batches is extracted from the iterator and used for the
covariance calculation, even if the iterator could yield more data.
Samples are given along the first axis. The full covariance would have the
squared shape as the remaining axes, e.g. batches of shape
``[batch_size, d1, ..., dN]`` would result in a covariance maxtrix of shape
``[d1, ..., dN, d1, ..., dN]``. The low-rank covariance factor ``L`` will
have shape ``[K, d1, ..., dN]``.
This function consumes an iterator twice, thus only finite iterators
can be handled and the given iterator will be empty after a call to this
function, unless ``steps`` is set to a smaller number than batches in the
iterator. For restarting the iterator for the second pass, a reset
function needs to be available. This can either be passed as a seperate
argument or be part of the iterator itself. If no reset function is
provided as argument, the iterator ``X`` is assumed to have a ``reset()``
method, which is called after the mean computation.
Parameters
----------
X : iterable
An iterator yielding the batches of samples.
rank : int
The maximal rank of the approximate decomposition factor.
steps : int, optional
The number of batches to use from the iterator (all available batches
are used if set to `None`). The defaul is `None`.
tree : bool, optional
Use the binary tree mode to combine batches more evenly at the cost of
additional memory requirement. The default is `False`.
reset : callable or None, optional
A function handle to reset the iterator after the first pass for the
mean calculation. The reset function must accept the iterator as
argument and return a resetted iterator. If set to `None` the iterator
is assumed to have a reset method, which will then be used. The default
is `None`.
Returns
-------
array
A low-rank factor of a symmetric decomposition of the covariance
matrix of the seen data.
Notes
-----
Computing covariances necessarily includes computing the mean,
so there is no computational benefit of using `streaming_low_rank_cov` over
using `streaming_mean_and_low_rank_cov`. In fact this function internally
uses the latter and simly discards the mean.
The algorithm for combining SVD based low rank approximations is
described in more detail in [1]_.
References
----------
.. [1] <NAME>,
"Scalability of Semantic Analysis in Natural Language Processing",
2011.
"""
mean = streaming_mean(X, steps=steps)
if reset:
X = reset(X)
else:
X.reset()
covariance = streaming_low_rank_autocorrelation(
X, rank, steps=steps, shift=mean, tree=tree,
)
return covariance
def streaming_mean_and_low_rank_cov(
X, rank, steps=None, tree=False, reset=None
):
"""Mean and a low rank factorization of the covariance matrix of a
streaming dataset.
Computes the mean and a factorization of the covariance matrix of a dataset
from a stream of batches of samples.
If the full data set was given in a matrix ``A`` of shape ``[N, M]``, where
``N`` is the number of data samples and ``M`` is the dimensionality of each
sample, then the covariance matrix is
``1/(N-1)*(A-mean(A)).T @ (A-mean(A))`` and of shape ``[M, M]``.
The function computes a matrix ``L`` of shape ``[M, K]`` such that
``L.T @ L`` is an approximation of the covariance matrix of rank at most
``K``.
This is done in a two-pass algorithm that first computes the mean from a
stream of batches and then the covariance using
`streaming_low_rank_autocorrelation` shifted by the precomputed mean.
This is done from a stream of sample batches without ever explicitly
forming matrices of the full shape ``[M, M]``. Batches can be combined in a
*linear* streaming way (which gives more relative weight to later batches)
or in *binary tree* mode, where batches are combined pairwise, then the
results are combined again pairwise and so on (this leads to an additional
memory requirement of a factor of ``log(N)``).
The data has to be provided by an iterator yielding batches of samples.
Either a number of steps can be specified, or the iterator is assumed to
be emptied in a finite number of steps. In the first case only the given
number of batches is extracted from the iterator and used for the mean and
covariance calculation, even if the iterator could yield more data.
Samples are given along the first axis. The mean has the same shape as
the remaining axes, e.g. batches of shape ``[batch_size, d1, ..., dN]``
will produce a mean of shape ``[d1, ..., dN]``. The covariance factor ``L``
will have shape ``[K, d1, ..., dN]``.
This function consumes an iterator twice, thus only finite iterators
can be handled and the given iterator will be empty after a call to this
function, unless ``steps`` is set to a smaller number than batches in the
iterator. For restarting the iterator for the second pass, a reset
function needs to be available. This can either be passed as a seperate
argument or be part of the iterator itself. If no reset function is
provided as argument, the iterator ``X`` is assumed to have a ``reset()``
method, which is called after the mean computation.
Parameters
----------
X : iterable
An iterator yielding the batches of samples.
rank : int
The maximal rank of the approximate decomposition factor.
steps : int, optional
The number of batches to use from the iterator (all available batches
are used if set to `None`). The defaul is `None`.
tree : bool, optional
Use the binary tree mode to combine batches more evenly at the cost of
additional memory requirement. The default is `False`.
reset : callable or None, optional
A function handle to reset the iterator after the first pass for the
mean calculation. The reset function must accept the iterator as
argument and return a resetted iterator. If set to `None` the iterator
is assumed to have a reset method, which will then be used. The default
is `None`.
Returns
-------
array
The mean of the seen data samples.
array
A low-rank factor of a symmetric decomposition of the covariance
matrix of the seen data.
Notes
-----
The algorithm for combining SVD based low rank approximations is
described in more detail in [1]_.
References
----------
.. [1] <NAME>,
"Scalability of Semantic Analysis in Natural Language Processing",
2011.
"""
mean = streaming_mean(X, steps=steps)
if reset:
X = reset(X)
else:
X.reset()
covariance = streaming_low_rank_autocorrelation(
X, rank, steps=steps, shift=mean, tree=tree,
)
return mean, covariance
# aliases
streaming_low_rank_covariance = streaming_low_rank_cov
streaming_mean_and_low_rank_covariance = streaming_mean_and_low_rank_cov
s_low_rank_autocorrelation = streaming_low_rank_autocorrelation
s_low_rank_cov = streaming_low_rank_cov
s_low_rank_covariance = streaming_low_rank_covariance
s_mean_and_low_rank_cov = streaming_mean_and_low_rank_cov
s_mean_and_low_rank_covariance = streaming_mean_and_low_rank_covariance
``` |
{
"source": "jmacgregor-accenture/mockRPiGPIO",
"score": 2
} |
#### File: mockRPiGPIO/mock_rpigpio/mock_gpio.py
```python
BCM = "Fake Thing"
OUT = "Another Fake Thing"
def setwarnings(setWarnings):
return bool(setWarnings)
def setmode(mode):
return mode
def setup(port, mode):
return True
``` |
{
"source": "jmacgrillen/mac_lib",
"score": 3
} |
#### File: mac_lib/maclib/mac_colours.py
```python
def limit_number(num: int) -> int:
"""
Limit the min and max of an int to a range of
0 and 255.
"""
return max(0, min(num, 255))
def rgb2hex(red: int, green: int, blue: int) -> str:
"""
Convert RGB values to a hex string. i.e. #ffffff
"""
return "#{0:02x}{1:02x}{2:02x}".format(
limit_number(red),
limit_number(green),
limit_number(blue)
)
if __name__ == "__main__": # pragma: no cover
pass
```
#### File: mac_lib/maclib/mac_detect.py
```python
import platform
import sys
if 'nt' in sys.builtin_module_names: # pragma: no cover
import winreg
else: # pragma: no cover
import fake_winreg as winreg # type: ignore
class MacDetect(object):
"""
Gather some information about the platform we're running on.
"""
os_name: str
os_version: str
os_theme: str
architecture: str
python_version: str
python_compiler: str
python_implementation: str
def __init__(self):
"""
Run through the platform and gather the info.
"""
platform_info = platform.uname()
self.os_name = platform_info[0]
self.os_version = platform_info[3]
self.architecture = platform_info[4]
self.python_version = platform.python_version()
self.python_compiler = platform.python_compiler()
self.python_implementation = platform.python_implementation()
if self.os_name == "Windows":
self.detect_windows_theme()
def detect_windows_theme(self) -> str:
"""
Check whether Windows is using dark mode or not.
"""
key = winreg.OpenKey(key=winreg.HKEY_CURRENT_USER,
sub_key="Software\\Microsoft\\Windows\\"
"CurrentVersion\\Themes\\Personalize")
subkey = winreg.QueryValueEx(key, "AppsUseLightTheme")[0]
if subkey == 0:
self.os_theme = "Dark"
else:
self.os_theme = "Light"
if __name__ == "__main__": # pragma: no cover
pass
```
#### File: mac_lib/maclib/mac_logger.py
```python
import os
import sys
import logging
import logging.handlers
import time
import inspect
from pathlib import Path
FORMAT_SYSLOG = 1
FORMAT_JSON = 2
LOGGER_NAME = Path(inspect.stack()[-1][1]).stem
def configure_logger(log_file_uri: str = None,
logging_level: int = logging.INFO,
logger_name: str = LOGGER_NAME,
use_stdout: bool = True,
use_utc: bool = True,
use_format: int = FORMAT_SYSLOG) -> logging.Logger:
"""
Setup the built in logger to work as we want it.
:arg log_file_uri: Where we want to make our log file
:arg logging_level: What level do we want to start logging at?
Default is set to INFO
:arg logger_name: Set the logger name. The default will be the name of the
process. It can overridden here, but wn't be persistent.
:arg use_stdout: Flag to show whether to output to stdout or not.
:arg use_utc: Log timestamp will use UTC. False by default
:arg use_format: Format the output as SYSLOG or JSON. Default is SYSLOG.
"""
# We may not need a file, so only configure it if it's been set.
# Otherwise just dump to the console.
log_formatter: logging.Formatter
mac_logger: logging.Logger
log_file_handler: logging.Handler
format_config: str
date_config: str
if use_format is FORMAT_JSON:
if use_utc is True:
format_config = "{ event_time : \"%(asctime)s.%(msecs)03dZ\"" \
",level : \"%(levelname)s\", function_name: \"" \
"%(module)s.%(funcName)s\", message: \"" \
"%(message)s\" }"
elif use_utc is False:
format_config = "{ event_time : \"%(asctime)s.%(msecs)03d\", " \
"level : \"%(levelname)s\", function_name: \"" \
" %(module)s.%(funcName)s\", message: \"" \
"%(message)s\" }"
else:
if use_utc:
format_config = "%(asctime)s.%(msecs)03dZ %(levelname)s " \
"%(module)s.%(funcName)s %(message)s"
else:
format_config = "%(asctime)s.%(msecs)03d %(levelname)s " \
"%(module)s.%(funcName)s %(message)s"
# ISO8601 Time format
date_config = "%Y-%m-%dT%H:%M:%S"
log_formatter = logging.Formatter(
fmt=format_config,
datefmt=date_config
)
if use_utc:
log_formatter.converter = time.gmtime
mac_logger = logging.getLogger(name=logger_name)
if log_file_uri:
# Check the logging directory is available.
# If not create it.
if not os.path.isdir(os.path.dirname(log_file_uri)):
try:
os.makedirs(os.path.dirname(log_file_uri))
except OSError as o_error:
print("Unable to create the logging directory "
f"{os.path.dirname(log_file_uri)}.\n{o_error.strerror}")
sys.exit(1)
log_file_handler = logging.handlers.RotatingFileHandler(
filename=log_file_uri,
maxBytes=1240000,
backupCount=5)
else:
log_file_handler = logging.StreamHandler()
log_file_handler.setFormatter(fmt=log_formatter)
mac_logger.addHandler(hdlr=log_file_handler)
# User can supress console spam, but the log file will still be
# written to.
if use_stdout:
log_std_out = logging.StreamHandler(stream=sys.stdout)
log_std_out.setFormatter(fmt=log_formatter)
mac_logger.addHandler(hdlr=log_std_out)
mac_logger.setLevel(level=logging_level)
return mac_logger
if __name__ == "__main__": # pragma: no cover
pass
```
#### File: mac_lib/tests/test_exception.py
```python
import maclib.mac_exception as mexception
def test_exception_01():
"""
Test that the exception is raised properly
"""
test_message = "Test exception"
m_except = mexception.MacException(test_message)
assert test_message in str(m_except)
```
#### File: mac_lib/tests/test_file_management.py
```python
import pytest
from pathlib import Path
import os
import maclib.mac_file_management as fm
def test_fm_01_exists_true(monkeypatch):
"""
Test the does_exist function returns true
"""
fake_path = "test"
def mock_exists(faked_exist):
"""
Check the path is being passed correctly and
return true.
"""
assert f"{faked_exist.name}" == fake_path
return True
monkeypatch.setattr(Path, "exists", mock_exists)
assert fm.does_exist(fake_path) is True
def test_fm_02_exists_false(monkeypatch):
"""
Test the does_exist function returns false
"""
fake_path = "test"
def mock_exists(faked_exist):
"""
Check the path is being passed correctly and
return false.
"""
assert f"{faked_exist.name}" == fake_path
return False
monkeypatch.setattr(Path, "exists", mock_exists)
assert fm.does_exist(fake_path) is False
def test_fm_03_can_read_true(monkeypatch):
"""
Test the can read function returns true
"""
fake_path = "test"
def mock_access(path, mode):
"""
Check the path and mode are being passed correctly
and return true.
"""
assert path == fake_path
assert mode == os.R_OK
return True
monkeypatch.setattr(os, 'access', mock_access)
assert fm.can_read(fake_path) is True
def test_fm_04_can_read_false(monkeypatch):
"""
Test the can read function returns false
"""
fake_path = "test"
def mock_access(path, mode):
"""
Check the path and mode are being passed correctly
and return false.
"""
assert path == fake_path
assert mode == os.R_OK
return False
monkeypatch.setattr(os, 'access', mock_access)
assert fm.can_read(fake_path) is False
def test_fm_05_can_write_true(monkeypatch):
"""
Test the can write function returns true
"""
fake_path = "test"
def mock_access(path, mode):
"""
Check the path and mode are being passed correctly
and return true.
"""
assert path == fake_path
assert mode == os.W_OK
return True
monkeypatch.setattr(os, 'access', mock_access)
assert fm.can_write_to(fake_path) is True
def test_fm_06_can_write_false(monkeypatch):
"""
Test the can write function returns false
"""
fake_path = "test"
def mock_access(path, mode):
"""
Check the path and mode are being passed correctly
and return false.
"""
assert path == fake_path
assert mode == os.W_OK
return False
monkeypatch.setattr(os, 'access', mock_access)
assert fm.can_write_to(fake_path) is False
def test_fm_07_parent_dir_correct_path():
"""
Test the parent directory is being returned
"""
test_path = 'PYTEST_TMPDIR/test_directory/testfile.txt'
assert fm.get_parent_dir(test_path) == 'PYTEST_TMPDIR'
def test_fm_08_parent_dir_file_name_only():
"""
When being fed a file name this should raise an exception
"""
test_path = 'testfile.txt'
with pytest.raises(IndexError):
assert fm.get_parent_dir(test_path)
def test_fm_09_delete_file_true(tmp_path):
"""
Test the delete function
"""
temp_path = tmp_path / "test.txt"
with open(temp_path, 'w') as file_p:
file_p.write('Test\n')
assert fm.delete_file(temp_path) is True
def test_fm_10_delete_file_no_file():
"""
Test what happens when delete fails
"""
assert fm.delete_file("nofile.txt") is False
def test_fm_11_delete_file_failed(monkeypatch):
"""
Test that OSError is handled
"""
test_file_name = "nofile.txt"
def does_exist_true(os_path: str):
"""
Make sure we get a True returned.
"""
assert os_path == test_file_name
return True
def remove_response(os_path: str):
"""
Make sure that OSError is raised
"""
assert os_path == test_file_name
raise OSError("This is a test.")
monkeypatch.setattr(fm, 'does_exist', does_exist_true)
monkeypatch.setattr(os, 'remove', remove_response)
assert fm.delete_file(test_file_name) is False
def test_fm_12_create_dir(tmp_path):
"""
Test that does_exist is being properly invoked.
"""
test_dir_name = tmp_path / "mytest"
assert fm.create_dir(test_dir_name) is True
def test_fm_12_create_dir_already_exists(monkeypatch):
"""
Test that does_exist is being properly invoked.
"""
test_dir_name = "test_dir/mytest"
def does_exist_true(dir_name: str):
"""
Return True to test the creation fails.
"""
assert dir_name == test_dir_name
return True
monkeypatch.setattr(fm, 'does_exist', does_exist_true)
assert fm.create_dir(test_dir_name) is False
def test_fm_12_create_dir_cant_write(monkeypatch):
"""
Test that does_exist is being properly invoked.
"""
test_dir_name = "/home/test_dir/test_dir_name"
def does_exist_false(dir_name: str):
"""
Return True to test the creation fails.
"""
assert dir_name == test_dir_name
return False
def can_write_false(dir_name: str):
"""
Return True to test the creation fails.
"""
assert Path(dir_name) == Path('/home')
return False
monkeypatch.setattr(fm, 'does_exist', does_exist_false)
monkeypatch.setattr(fm, 'can_write_to', can_write_false)
assert fm.create_dir(test_dir_name) is False
def test_fm_12_create_dir_exception(monkeypatch):
"""
Test that does_exist is being properly invoked.
"""
test_dir_name = "/home/test_dir/mytest"
def does_exist_false(dir_name: str):
"""
Return True to test the creation fails.
"""
assert dir_name == test_dir_name
return False
def can_write_true(dir_name: str):
"""
Return True to test the creation fails.
"""
assert Path(dir_name) == Path('/home')
return True
def makedirs_exception(name: str):
"""
Raise an OSError to make sure it's handled
"""
assert name == test_dir_name
raise OSError("Test Exception")
monkeypatch.setattr(fm, 'does_exist', does_exist_false)
monkeypatch.setattr(fm, 'can_write_to', can_write_true)
monkeypatch.setattr(os, 'makedirs', makedirs_exception)
assert fm.create_dir(test_dir_name) is False
if __name__ == "__main__":
pass
``` |
{
"source": "jmacgrillen/perspective",
"score": 2
} |
#### File: jmacgrillen/perspective/setup.py
```python
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
install_requirements = [
"maclib",
"opencv-python",
"numpy",
"Pillow",
"charset-normalizer"
]
def setup_perspective_package() -> None:
"""
Install and configure Perspective for use
"""
setup(
name='Perspective',
version="0.0.1",
description='Analyse images using the range of tools provided',
long_description=long_description,
author='J.MacGrillen',
scripts=[],
packages=find_packages(exclude=['tests*']),
include_package_data=True,
install_requires=install_requirements,
license="MIT License",
python_requires=">= 3.7.*",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
)
if __name__ == "__main__":
setup_perspective_package()
```
#### File: src/ui/main_window.py
```python
import logging
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
from PyQt5.QtWidgets import QApplication, QAction, QHBoxLayout, QWidget
from PyQt5.QtWidgets import QMainWindow, QMenuBar, QStatusBar, QFileDialog
import maclib.mac_logger as mac_logger
import qdarkstyle
from src.perspective_settings import PerspecitveSettings
from src.ui.image_view import PerspectiveImageView
from maclib.mac_detect import MacDetect
class MacWindow(QMainWindow):
"""
Base window for mac_lib ui
"""
menu_bar: QMenuBar
status_bar: QStatusBar
scaling_ratio: float
mac_detect: MacDetect
logger: logging.Logger
perspective_settings: PerspecitveSettings
def __init__(self,
window_name: str,
main_app: QApplication,
window_width: int = 800,
window_height: int = 600,
window_icon: object = None,
*args,
**kwargs):
"""
Create a QT main window
"""
super(MacWindow, self).__init__(*args, **kwargs)
self.logger = logging.getLogger(name=mac_logger.LOGGER_NAME)
self.mac_detect = MacDetect()
self.perspective_settings = PerspecitveSettings()
# Decide whether to use the light or dark theme.
if self.perspective_settings.app_settings['ui']['theme'] == 'system':
self.logger.debug("Using system UI theme...")
if self.mac_detect.os_theme == "Dark":
main_app.setStyleSheet(qdarkstyle.load_stylesheet(
qt_api='pyqt5'))
else:
pass
elif self.perspective_settings.app_settings['ui']['theme'] == 'dark':
self.logger.debug("Enabling dark theme from saved setting.")
main_app.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyqt5'))
else:
self.logger.debug("Using default light theme.")
self.setWindowTitle(window_name)
# If the window position has been saved in settings, use them to
# set the position on the window.
if self.perspective_settings.key_exists('window'):
if self.perspective_settings.app_settings[
'window']['save_pos'] == "True":
self.logger.debug(
"Using settings to place window and set size.")
self.move(
self.perspective_settings.app_settings[
'window']['x_coord'],
self.perspective_settings.app_settings[
'window']['y_coord']
)
self.resize(
self.perspective_settings.app_settings[
'window']['width'],
self.perspective_settings.app_settings[
'window']['height']
)
else:
self.resize(window_width, window_height)
else:
self.resize(window_width, window_height)
self.status_bar = self.statusBar()
self.menu_bar = self.menuBar()
self.setCentralWidget(QWidget())
self.show()
def save_window_geometry(self) -> None:
"""
Save the window position to the settings file.
"""
self.logger.debug("Saving window coords before closing app")
self.perspective_settings.app_settings[
'window']['width'] = self.width()
self.perspective_settings.app_settings[
'window']['height'] = self.height()
self.perspective_settings.app_settings[
'window']['x_coord'] = self.x()
self.perspective_settings.app_settings[
'window']['y_coord'] = self.y()
self.perspective_settings.save_settings()
def closeEvent(self, close_event: QtGui.QCloseEvent) -> None:
self.logger.debug("User pressed the window close button.")
self.save_window_geometry()
close_event.accept()
return super(MacWindow, self).closeEvent(close_event)
class PerspectiveWindow(object):
"""
This is the main window that controls image_tools.
"""
main_app: QApplication
main_window: MacWindow
default_status: str = "Ready"
logger: logging.Logger
perspective_settings: PerspecitveSettings
h_layout: QHBoxLayout
image_view: PerspectiveImageView
def __init__(self):
"""
Create and run the main window for WAD Walker.
"""
super(PerspectiveWindow, self).__init__()
self.logger = logging.getLogger(name=mac_logger.LOGGER_NAME)
# Handle high dpi display scaling
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
self.perspective_settings = PerspecitveSettings()
self.create_window()
self.run()
def load_image(self) -> None:
"""
Load an image.
"""
self.main_window.status_bar.showMessage("Loading image")
file_name = QFileDialog.getOpenFileName(filter="Image (*.*)")[0]
self.logger.debug(f"Opening file {file_name}")
self.image_view = PerspectiveImageView()
self.image_view.load_image(file_name)
self.main_window.status_bar.showMessage("Image loaded successfully")
def do_nothing(self) -> None:
"""
Literlly do nothing!
"""
self.logger.debug("I ain't doin' nuffink.")
def quit_application(self) -> None:
"""
Update the settings with the window geometry.
"""
self.main_window.save_window_geometry()
self.main_app.quit()
def create_file_menu(self) -> None:
"""
Create the main file menu
"""
open_action = QAction('&Open', self.main_window)
open_action.setShortcut('Ctrl+O')
open_action.setStatusTip('Open an image')
open_action.triggered.connect(self.load_image)
quit_action = QAction('&Quit', self.main_window)
quit_action.setShortcut('Ctrl+Q')
quit_action.setStatusTip('Quit application')
quit_action.triggered.connect(self.quit_application)
file_menu = self.main_window.menu_bar.addMenu('&File')
file_menu.addAction(open_action)
file_menu.addAction(quit_action)
def create_edit_menu(self) -> None:
"""
Create the main Edit menu
"""
settings_action = QAction('&Settings', self.main_window)
settings_action.setShortcut('Ctrl+S')
settings_action.setStatusTip('Adjust application settings')
settings_action.triggered.connect(self.do_nothing)
file_menu = self.main_window.menu_bar.addMenu('&Edit')
file_menu.addAction(settings_action)
def create_window(self) -> None:
"""
Create the main Perspective window.
"""
self.main_app = QApplication([])
self.main_window = MacWindow("Perspective", self.main_app)
central_widget = self.main_window.centralWidget()
self.logger.debug("Adding horizontal layout to the main window.")
self.h_layout = QHBoxLayout(central_widget)
central_widget.setLayout(self.h_layout)
self.logger.debug("Adding image view to the layout.")
self.image_view = PerspectiveImageView(central_widget)
self.h_layout.addWidget(self.image_view)
self.create_file_menu()
self.create_edit_menu()
def run(self) -> None:
"""
Run the main window
"""
# Now show and run the window.
self.logger.debug("Starting the main application loop.")
self.main_app.exec()
if __name__ == "__main__":
pass
``` |
{
"source": "jmachalica/PygameProjects",
"score": 3
} |
#### File: PygameProjects/InverseKinematics/segment.py
```python
import numpy as np
import pygame as pg
import math
class Segment:
""" Class representing single Segment
Attributes:
base_point (pygame.math.Vector2)
end_point (pygame.math.Vector2)
angle (float) - angle with respect to the x axis, counter clockwise
parent (Segment)
child (Segment)
"""
n_of_generated_segments = 0
def __init__(self, x, y, length, angle, parent=None, child=None):
self.base_point = pg.math.Vector2(x, y)
self.length = length
self.angle = angle
self.id = Segment.n_of_generated_segments
self.parent = parent
self.calc_end()
self.child = child
Segment.n_of_generated_segments += 1
def calc_end(self):
"""Calculates end point of the segment with respect to base point, length of segment and angle to x axis
"""
x_end = self.length * \
np.cos(math.radians(self.angle)) + self.base_point.x
y_end = self.length * \
np.sin(math.radians(self.angle)) + self.base_point.y
self.end_point = pg.math.Vector2(x_end, y_end)
def calc_base(self):
"""Calculates base point of the segment with respect to end point, length of segment and angle to x axis
"""
self.base_point.x = self.end_point.x - \
self.length*np.cos(math.radians(self.angle))
self.base_point.y = self.end_point.y - \
self.length*np.sin(math.radians(self.angle))
def get_end(self):
return self.end_point
def get_base(self):
return self.base_point
def set_end(self, end_point):
self.end_point = end_point
def follow(self, point_to_follow):
"""Changes end point of the segment to point_to_follow cordinates and calculates appropiate new angle and base_point. If parent is not None it calls parent's follow function in order to follow new base point.
"""
diff = point_to_follow-self.base_point
alfa = math.degrees(math.atan2(diff.y, diff.x))
self.set_end(point_to_follow)
self.angle = alfa
self.calc_base()
if self.parent is not None:
self.parent.follow(self.base_point)
def generate_segments(self, n_of_segments):
if Segment.n_of_generated_segments < n_of_segments:
prev_end = self.get_end()
curr_segment = Segment(prev_end.x, prev_end.y,
self.length, self.angle, self) # new segment
self.child = curr_segment
return self.child.generate_segments(n_of_segments)
else:
return self
def draw_segment(self, SCREEN, color, width):
pg.draw.line(SCREEN, color, self.base_point,
self.end_point, width)
```
#### File: PygameProjects/Maze/maze_example_dynamic.py
```python
import pygame as pg
from pygame import mouse
from maze import Maze
import numpy as np
from PathFinder import PathFinder
from drawing_grid import *
def get_grid_cordinates_mouse(grid, rect_width, rect_height, mouse_cordinates):
x, y = mouse_cordinates
height, width = grid.shape
return x//rect_width, y//rect_height
def add_point(points, point):
if len(points) < 2:
points.append(point)
def find_path(grid, points, path_finder):
path = path_finder.find_path(points[0], points[1])
return path
CLOCK = pg.time.Clock()
WHITE = (255, 255, 255)
RECT_SIZE = 32
maze = Maze(15, 15)
maze_grid = maze.grid
SCREEN_SIZE = (maze.grid_width*RECT_SIZE, maze.grid_height*RECT_SIZE)
pg.init()
SCREEN = pg.display.set_mode(SCREEN_SIZE)
points = []
path_found = False
path = []
path_finder = PathFinder(maze_grid)
while True:
for event in pg.event.get():
if event.type == pg.MOUSEBUTTONDOWN:
cordinates = get_grid_cordinates_mouse(
maze_grid, RECT_SIZE, RECT_SIZE, event.pos)
try:
if path_finder.validate_cordinates(cordinates):
add_point(points, cordinates)
except Exception:
print("Invalid position for the point")
if event.type == pg.QUIT:
pg.quit()
break
SCREEN.fill((0, 0, 0))
draw_grid(maze_grid, SCREEN, RECT_SIZE, RECT_SIZE)
draw_extremal_points(SCREEN, points, RECT_SIZE, RECT_SIZE)
if len(points) == 2 and not path_found:
path = find_path(maze_grid, points, path_finder)
path_found = True
if path_found:
draw_path(path, SCREEN, RECT_SIZE, RECT_SIZE)
pg.display.flip()
CLOCK.tick(10)
``` |
{
"source": "jmachalica/PyImage",
"score": 3
} |
#### File: PyImage/source/conversion.py
```python
import numpy as np
from .utils import check_dimension, check_dtype, convert_dtype, clip_to_uint
def rgb_to_gray(image):
check_dimension(image,3)
shape=image.shape
image= image.astype(np.float64)
applied=np.apply_along_axis(lambda x: x[0] *0.299 + x[1]*0.587 + x[2]*0.114,2,image )
return clip_to_uint(applied)
```
#### File: PyImage/source/filters.py
```python
from cmath import pi
import numpy as np
from pyparsing import col
from .utils import check_2D, check_dimension
def _calc_gaussian_value(i,j,sigma,half_size):
return 1/(2*np.pi * sigma**2)* np.exp( -1* ( ( i-(half_size+1) )**2 + (j-(half_size+1))**2 ) /(2*sigma**2) )
def create_gaussian_filter(size,sigma):
if(size%2 ==0):
raise ValueError()
filter= np.zeros((size,size))
half_size=size//2
for i in range(size):
for j in range(size):
filter[i][j]= _calc_gaussian_value(i,j,sigma,half_size)
return filter
def is_in_edge_range(image_shape, coords, filter_size):
y,x= coords
filter_size =filter_size//2
print(x,y,filter_size)
return (x-filter_size)<-1 or (x+filter_size) > image_shape[1] or (y-filter_size)<-1 or (y+filter_size) > image_shape[0]
def convolve(arr1, arr2):
return np.sum(np.multiply(arr1,arr2))
def filter_image(image,filter, egde=""): # 3x3 5x5 array
check_2D(image)
filter_row_size, filter_col_size =filter.shape
row_n = image.shape[0] - filter_row_size-1
col_n = image.shape[1] - filter_col_size-1
filtered=np.zeros((row_n, col_n) )
for row_i in range(row_n):
for col_i in range(col_n):
current_arr= image[row_i:row_i+filter_row_size,col_i:col_i+filter_col_size]
filtered[row_i][col_i]= convolve(current_arr,filter)
return filtered
def _median(arr):
return int(np.median(arr))
def _min(arr):
return np.min(arr)
def _max(arr):
return np.max(arr)
def _mean(arr):
return np.mean(arr)
def _diff(arr):
return _max(arr) - _min(arr)
def _filter_nonlinear(image, shape,function):
check_2D(image)
filter_row_size, filter_col_size =shape
row_n = image.shape[0] - filter_row_size-1
col_n = image.shape[1] - filter_col_size-1
filtered=np.zeros((row_n, col_n) )
for row_i in range(row_n):
for col_i in range(col_n):
current_arr= image[row_i:row_i+filter_row_size,col_i:col_i+filter_col_size]
filtered[row_i][col_i]= function(current_arr)
return filtered
def medfilt(image,shape):
return _filter_nonlinear(image,shape,_median)
def minfilt(image,shape):
return _filter_nonlinear(image,shape, _min)
def maxfilt(image,shape):
return _filter_nonlinear(image,shape,_max)
def rangefilt(image,shape):
return _filter_nonlinear(image,shape,_diff)
def meanfilt(image,shape):
return _filter_nonlinear(image,shape,_mean)
```
#### File: PyImage/tests/test_points.py
```python
import unittest
from unittest import mock
from unittest.mock import patch
from source import points
import numpy as np
class TestPoints(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.img=np.ones((10,10))
self.img2= np.ones((11,11))
def test_check_number(self):
with self.assertRaises(ValueError) as context:
points.check_number('1')
self.assertTrue('Value is not a number' in str(context.exception))
with self.assertRaises(ValueError) as context:
points.check_number((1,))
self.assertTrue('Value is not a number' in str(context.exception))
try:
points.check_number(1)
except ValueError:
self.fail("Points.check_number raised exception on int")
try:
points.check_number(10.5)
except ValueError:
self.fail("Points.check_number raised exception on float")
def test_validate(self):
value=2
def test_add(self):
self.assertEqual( (points.add(self.img,10) == self.img+10).all() ,True )
def test_multiply(self):
result=points.image_multiply(self.img, 20)
self.assertEqual( ( result != self.img *20 ).all() , True )
result=points.image_multiply(self.img, -20)
self.assertEqual( (result==0).all() , False )
def test_substract_images(self):
img3= self.img.copy()
try:
result=points.substract_images(self.img, img3)
except ValueError:
self.fail("Exception raised on same shape")
self.assertEqual( (result==0).all() ,True )
self.assertEqual( result.shape,img3.shape )
self.assertEqual( result.shape,self.img.shape )
with self.assertRaises(ValueError) as context:
points.substract_images(self.img, self.img2)
self.assertTrue('Images have different shape' in str(context.exception))
def test_gamma_correction(self):
result=points.gamma_correction(self.img,1)
self.assertEqual(( result ==1).all(), True )
result=points.gamma_correction(self.img,-1)
self.assertEqual(( result ==1).all(), True )
image=self.img.copy()*2
result=points.gamma_correction(image,2)
self.assertEqual(( result ==4).all(), True )
result=points.gamma_correction(image,2)
self.assertEqual(( result ==4).all(), True )
result=points.gamma_correction(image,1/2)
self.assertEqual( (result == 1).all(), True )
image=self.img*255
result=points.gamma_correction(image,1/2)
self.assertEqual( (result == 15).all(), True )
def test_binarize_image_exceptions(self):
image=self.img.copy()
image[:,:]=128
# check valid
valid= ["lower","upper","both","histeresis"]
for method in valid:
try:
points.binarize_image(image,10 , btype = method)
except ValueError:
self.fail(f"Exception raised on valid method {method}")
try:
points.binarize_image(image,20.7 , btype = method)
except ValueError:
self.fail(f"Exception raised on valid threshold")
with self.assertRaises(ValueError) as context:
points.binarize_image(image,None)
self.assertTrue('Value is not a number' in str(context.exception))
with self.assertRaises(ValueError) as context:
points.binarize_image(image,'a')
self.assertTrue('Value is not a number' in str(context.exception))
def test_binarize_image_lower(self):
image=self.img.copy()
image[:,:]=128
result=points.binarize_image(image,129)
self.assertTrue( (result==0).all())
result=points.binarize_image(image,128)
self.assertTrue( (result==0).all())
result=points.binarize_image(image,127)
self.assertTrue( (result==1).all())
image[:5,:5]=128
result=points.binarize_image(image,129)
self.assertEqual( np.count_nonzero(result !=0 ), 0)
image[:5,:5]=128
result=points.binarize_image(image,128)
self.assertEqual( np.count_nonzero(result !=0 ), 100)
image[:5,:5]=128
result=points.binarize_image(image,127)
self.assertEqual( np.count_nonzero(result !=0 ), 25)
def test_binarize_image_upper(self):
image=self.img.copy()
image[:,:]=128
result=points.binarize_image(image,129, btype='upper')
self.assertTrue( (result==0).all())
result=points.binarize_image(image,128)
self.assertTrue( (result==0).all())
result=points.binarize_image(image,127)
self.assertTrue( (result==1).all())
image[:5,:5]=128
result=points.binarize_image(image,129)
self.assertEqual( np.count_nonzero(result !=0 ), 0)
image[:5,:5]=128
result=points.binarize_image(image,128)
self.assertEqual( np.count_nonzero(result !=0 ), 100)
image[:5,:5]=128
result=points.binarize_image(image,127)
self.assertEqual( np.count_nonzero(result !=0 ), 25)
if __name__=='__main__':
unittest.main()
``` |
{
"source": "jmacinnis94/ixbrl-viewer",
"score": 2
} |
#### File: ixbrl-viewer/iXBRLViewerPlugin/localviewer.py
```python
from arelle.LocalViewer import LocalViewer
from arelle.webserver.bottle import static_file
from arelle.FileSource import archiveFilenameParts
import os, shutil
import logging
import zipfile, sys, traceback
from .iXBRLViewer import IXBRLViewerBuilder
VIEWER_BASENAME_SUFFIX = "_ixbrlview"
class iXBRLViewerLocalViewer(LocalViewer):
# plugin-specific local file handler
def getLocalFile(self, file, relpath, request):
_report, _sep, _file = file.partition("/")
if file == 'ixbrlviewer.js':
return static_file('ixbrlviewer.js', os.path.abspath(os.path.join(os.path.dirname(__file__), "viewer", "dist")))
elif _report.isnumeric(): # in reportsFolder folder
# check if file is in the current or parent directory (may bve
_fileDir = self.reportsFolders[int(_report)]
_fileExists = False
if os.path.exists(os.path.join(_fileDir, _file)):
_fileExists = True
elif "/" in _file and os.path.exists(os.path.join(_fileDir, os.path.filepart(_file))):
# xhtml in a subdirectory for output files may refer to an image file in parent directory
_fileExists = True
_file = os.path.filepart(_file)
if not _fileExists:
self.cntlr.addToLog("http://localhost:{}/{}".format(self.port,file), messageCode="localViewer:fileNotFound",level=logging.DEBUG)
return static_file(_file, root=_fileDir, more_headers=self.noCacheHeaders) # extra_headers modification to py-bottle
return static_file(file, root="/") # probably can't get here unless path is wrong
localViewer = iXBRLViewerLocalViewer("iXBRL Viewer", os.path.dirname(__file__))
def launchLocalViewer(cntlr, modelXbrl):
from arelle import LocalViewer
try:
viewerBuilder = IXBRLViewerBuilder(cntlr.modelManager.modelXbrl)
iv = viewerBuilder.createViewer(scriptUrl="/ixbrlviewer.js")
# first check if source file was in an archive (e.g., taxonomy package)
_archiveFilenameParts = archiveFilenameParts(modelXbrl.modelDocument.filepath)
if _archiveFilenameParts is not None:
outDir = os.path.dirname(_archiveFilenameParts[0]) # it's a zip or package
else:
outDir = modelXbrl.modelDocument.filepathdir
_localhost = localViewer.init(cntlr, outDir)
# for IXDS, outPath must be a directory name, suffix is applied in saving files
if len(iv.files) > 1:
# save files in a separate directory from source files
_localhost += "/" + VIEWER_BASENAME_SUFFIX
outDir = os.path.join(outDir, VIEWER_BASENAME_SUFFIX)
os.makedirs(outDir, exist_ok=True)
iv.save(outDir) # no changes to html inline files so inter-file refereences still can work
htmlFile = iv.files[0].filename
else:
iv.save(outDir, outBasenameSuffix=VIEWER_BASENAME_SUFFIX)
htmlFile = "{0[0]}{1}{0[1]}".format(os.path.splitext(modelXbrl.modelDocument.basename), VIEWER_BASENAME_SUFFIX)
import webbrowser
webbrowser.open(url="{}/{}".format(_localhost, htmlFile))
except Exception as ex:
modelXbrl.error("viewer:exception",
"Exception %(exception)s \sTraceback %(traceback)s",
modelObject=modelXbrl, exception=ex, traceback=traceback.format_tb(sys.exc_info()[2]))
```
#### File: ixbrl-viewer/samples/fetch-sample-files.py
```python
import json
import os
import re
import sys
from urllib.parse import urlparse
from urllib.request import urlretrieve
import hashlib
def sha256sum(filename):
h = hashlib.sha256()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
print("Checking for sample files/packages...")
basedir = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(basedir, "sample-files.list")
missing_checksums = []
with open(filename, "r") as f:
for l in f.readlines():
if re.match(r'^\s*(#.*)?$', l) is not None:
continue
m = re.match(r'^(\S*)\s+(\S+)\s*(?:\s+(\S+))?$',l)
if m is None:
print("Unexpected line: %s" % l)
sys.exit(1)
(subdir, url, fhash) = (m[1], m[2], m[3])
if subdir == '':
subdir = prev_subdir
prev_subdir = subdir
if '|' in url:
(url, filepart) = url.split('|')
else:
o = urlparse(url)
m = re.search(r'[^/]+$', o.path)
if m is None:
print("Could not get filename from: %s" % o.path)
filepart = m[0]
target_dir = os.path.join(basedir, subdir)
target = os.path.join(target_dir, filepart)
if not os.path.exists(target):
if not os.path.exists(target_dir):
print("Creating %s" % target_dir)
os.makedirs(target_dir)
print("Downloading %s to %s" % (url, target))
urlretrieve(url, target)
actual_fhash = sha256sum(target)
if fhash is None:
missing_checksums.append("%s %s %s" % (subdir, url, actual_fhash))
else:
if actual_fhash != fhash:
print("Checksum does not match for %s (expected: %s, got: %s)" % (target, fhash, actual_fhash))
if len(missing_checksums) > 0:
print("\n".join(missing_checksums))
``` |
{
"source": "jmackereth/eaglepy",
"score": 2
} |
#### File: eaglepy/eaglepy/h5read.py
```python
import h5py
import os
import glob
import re
import numpy as np
from . import peano
import warnings
from scipy.integrate import quad
base_path = os.environ['EAGLE_BASE_PATH']
release = os.environ['EAGLE_ACCESS_TYPE']
class Snapshot:
""" Basic SnapShot superclass which finds the relevant files and gets relevant information
regarding the snapshot specified.
arguments:
run - the run (e.g. L0012N0188)
model - an EAGLE model (e.g. Ref)
tag - a tag string specifying a snapshot output (e.g. 028_z000p000)
history:
written - Mackereth (UoB) - 22/11/2019
"""
def __init__(self, run, model, tag, load_particles=False):
#store the snapshot identity info
self.run = run
self.model = model
self.tag = tag
if release == 'public':
self.simlabel = self.model+self.run
self.snaplabel = 'snapshot_'+self.tag
self.base_subfile = 'snap_'+self.tag
self.path = os.path.join(base_path, self.simlabel, self.snaplabel)
elif release == 'ARI':
self.snaplabel = 'snapshot_'+self.tag
self.base_subfile = 'snap_'+self.tag
self.path = os.path.join(base_path, self.run, self.model, 'data', self.snaplabel)
else:
raise Exception('private/custom data access is not yet implemented!')
if not os.path.exists(os.path.join(self.path, self.base_subfile+'.0.hdf5')):
raise Exception('could not see snapshot data in directory: '+self.path)
#get the files related to this snapshot and load some of their metadata
self.files = natural_sort(glob.glob(os.path.join(self.path, self.base_subfile+'*.hdf5')))
self.nfiles = len(self.files)
self.header_dict = dict(h5py.File(self.files[0], 'r')['/Header'].attrs.items())
self.abundance_dict = dict(h5py.File(self.files[0], 'r')['/Parameters/ChemicalElements'].attrs.items())
self.elements = ['Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 'Silicon', 'Sulphur', 'Magnesium', 'Iron']
self.solar_abundances = dict([(self.elements[i],self.abundance_dict['SolarAbundance_%s' % self.elements[i]]) for i in range(len(self.elements))])
self.BoxSize = self.header_dict['BoxSize']
self.HubbleParam = self.header_dict['HubbleParam']
self.Omega0, self.OmegaLambda, self.OmegaBaryon, self.a0 = self.header_dict['Omega0'], self.header_dict['OmegaLambda'], self.header_dict['OmegaBaryon'], self.header_dict['ExpansionFactor']
self.NumPartTotal = self.header_dict['NumPart_Total']
self.ParticleTypes = np.array([0,1,2,3,4,5])
self.ParticleTypePresent = self.NumPartTotal > 0
self.ParticleTypePresent_file = np.zeros((len(self.files),len(self.NumPartTotal)), dtype=bool)
for ii, file in enumerate(self.files):
head = dict(h5py.File(file, 'r')['/Header'].attrs.items())
self.ParticleTypePresent_file[ii, head['NumPart_ThisFile'] > 0] = True
self._ptypeind = {self.ParticleTypes[self.ParticleTypePresent][i]:i for i in range(len(self.ParticleTypes[self.ParticleTypePresent]))}
#get the Hash Table info for P-H key sorting
self.HashBits = dict(h5py.File(self.files[0], 'r')['/HashTable'].attrs.items())['HashBits']
self.HashGridSideLength = 2**self.HashBits
self.HashGridCellSize = self.BoxSize/self.HashGridSideLength
self.firstkeys = np.zeros((len(self.ParticleTypes[self.ParticleTypePresent]),self.nfiles))
self.lastkeys = np.zeros((len(self.ParticleTypes[self.ParticleTypePresent]),self.nfiles))
self.datasets = {}
for ii,parttype in enumerate(self.ParticleTypes[self.ParticleTypePresent]):
self.firstkeys[ii] = np.array(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/FirstKeyInFile'])
self.lastkeys[ii] = np.array(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/LastKeyInFile'])
#be sure we get a file with this parttype (only really an issue for when low N stars!!)
ind = np.nonzero(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/LastKeyInFile'][:])[0][0]
self.datasets['PartType'+str(parttype)] = list(h5py.File(self.files[ind], 'r')['/PartType'+str(parttype)].keys())
if load_particles:
self._get_coordinates()
def _get_coordinates(self):
""" Load all the coordinates of the available particles
"""
#load coordinates and velocities
coordinates = []
velocities = []
for ii,type in enumerate(self.ParticleTypes[self.ParticleTypePresent]):
#now load the coordinates in these files and save the indices for each particle type
thistypecoord, thistypevels = self._get_parttype_indices(type, self.files)
coordinates.append(thistypecoord)
velocities.append(thistypevels)
self.velocities = velocities
self.coordinates = coordinates
def _get_parttype_indices(self, parttype, files):
"""get the coordinates and indices for a given particle type in a given region"""
coords, velocities, indices = [], [], []
for ii,file in enumerate(files):
#check this particle type is present here
if not _particle_type_present(parttype, file):
return None, None
# load the file
thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])
thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])
#store the coordinates and the indices of these particles in the file
coords.append(thisfilecoords)
velocities.append(thisfilevels)
return np.concatenate(coords), np.concatenate(velocities)
def _get_coords_vels(self, parttype, files):
"""get the coordinates and velocities for all particles of a certain type"""
if not self.ParticleTypePresent[parttype]:
warnings.warn('Particle type is not present, returning empty arrays...')
return np.array([]), np.array([]), np.array([])
coords, velocities, indices = [], [], []
for file in files:
# load the file
thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])
thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])
#store the coordinates and the indices of these particles in the file
coords.append(thisfilecoords)
velocities.append(thisfilevels)
return np.concatenate(coords), np.concatenate(velocities)
def get_dataset(self, parttype, dataset, physical=False, cgs=False):
""" get the data for a given entry in the HDF5 file for the given region """
if not self.ParticleTypePresent[parttype]:
warnings.warn('Particle type is not present, returning empty arrays...')
return np.array([])
key = os.path.join('/PartType'+str(parttype),dataset)
if physical:
#find conversion factor
factor = self._conversion_factor(key, self.a0, self.HubbleParam, cgs=cgs)
elif not physical and cgs:
factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']
else:
#else just multiply by 1!
factor = 1.
out = []
for ii,file in enumerate(self.files):
# load this file and get the particles
out.append(np.array(h5py.File(file, 'r')[key]) * factor)
return np.concatenate(out)
def _conversion_factor(self, key, a, h, cgs=False):
aexp_scale, h_scale = self._get_conversion_factor_exponents(key)
if cgs:
cgs_factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']
else:
cgs_factor = 1.
return a**(aexp_scale)*h**(h_scale)*cgs_factor
def _get_conversion_factor_exponents(self, key):
aexp_scale = h5py.File(self.files[0], 'r')[key].attrs['aexp-scale-exponent']
h_scale = h5py.File(self.files[0], 'r')[key].attrs['h-scale-exponent']
return aexp_scale, h_scale
def _single_X_H(self,X,H,element):
solar = self.solar_abundances[element]
solarH = self.solar_abundances['Hydrogen']
return np.log10(X/H)-np.log10(solar/solarH)
def abundance_ratios(self,gas=False,smoothed=True):
""" Compute element abundance ratios for the region, returns a dict of [X/H] """
if smoothed:
e_key = 'SmoothedElementAbundance'
else:
e_key = 'ElementAbundance'
if gas:
parttype = 0
else:
parttype = 4
entries = []
H = self.get_dataset(parttype,os.path.join(e_key,'Hydrogen'))
for i in range(len(self.elements)):
if self.elements[i] == 'Hydrogen' or self.elements[i] == 'Sulphur':
continue
X = self.get_dataset(parttype,os.path.join(e_key,self.elements[i]))
entries.append((self.elements[i],self._single_X_H(X,H,self.elements[i])))
return dict(entries)
def t_lookback(self,a):
return a / (np.sqrt(self.Omega0 * a + self.OmegaLambda * (a ** 4)))
def z2age(self,z):
a = 1 / (1 + z)
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t
def a2age(self,a):
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t
def z2tau(self,z):
t_em = quad(self.t_lookback, 0., self.a0)[0]
t_em = (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t_em
a = 1 / (1 + z)
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return t_em - ((1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t)
def a2tau(self,a):
t_em = quad(self.t_lookback, 0., self.a0)[0]
t_em = (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t_em
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return t_em - ((1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t)
class SnapshotRegion(Snapshot):
""" A class inheriting from SnapShot, which defines a region inside a larger simulation snapshot.
when initialised, this will read the files in that region, and get the indices of the particles inside the
desired region. The necessary datasets can then be loaded by using get_dataset.
arguments:
run - the run (e.g. L0012N0188)
model - an EAGLE model (e.g. Ref)
tag - a tag string specifying a snapshot output (e.g. 028_z000p000)
center - the center of the desired region
sidelength - the length of a side of the volume required
history:
written - Mackereth (UoB) - 22/11/2019
"""
def __init__(self, run, model, tag, center, sidelength, just_get_files=False):
#we want everything from SnapShot plus some extras
super().__init__(run, model, tag)
self.center = center
self.sidelength = sidelength
self.centered = False
self._index_region(self.center, self.sidelength, justfiles=just_get_files)
def _index_region(self, center, side_length, phgrid_n=70, justfiles=False):
""" Load a region defined by a central cordinate and a side length
arguments:
center - the [x,y,z] coordinate of the desired center (simulation units)
side_length - the desired side length (in the simulation units)
keyword arguments:
phgrid_n - the number of grid points along a side length to look for PH cells (default 70)
"""
#work out which files contain the desired region
grid = peano.coordinate_grid(center, side_length, self.BoxSize, n=phgrid_n)
keys = peano.get_unique_grid_keys(grid, self.HashGridCellSize, self.BoxSize, bits=self.HashBits)
particles_in_volume = self.ParticleTypes[self.ParticleTypePresent]
self.files_for_region = []
self.file_indices = []
coordinates = []
velocities = []
indices = []
for ii in self.ParticleTypes:
if not self.ParticleTypePresent[ii]:
continue
Nfiles = self._get_parttype_files(ii, keys)
if len(Nfiles) < 1:
#particle is not present in the region - remove from here
self.ParticleTypePresent[ii] = 0
continue
thisfiles = np.array(self.files)[Nfiles]
thisindices = Nfiles
self.files_for_region.append(thisfiles)
self.file_indices.append(Nfiles)
if justfiles:
continue
present = False
for file in thisfiles:
present += _particle_type_present(ii, file)
if present:
#now load the coordinates in these files and save the indices for each particle type
thistypecoord, thistypevels, thistypeindices = self._get_parttype_indices(ii, thisfiles, thisindices)
if thistypecoord is None:
self.ParticleTypePresent[ii] = 0
continue
coordinates.append(thistypecoord)
velocities.append(thistypevels)
indices.append(thistypeindices)
else:
self.ParticleTypePresent[ii] = 0
if not justfiles:
self.velocities = velocities
self.coordinates = coordinates
self.indices = indices
self.NumPart_ThisRegion = np.zeros(len(self.NumPartTotal),dtype=np.int64)
for ii,type in enumerate(self.ParticleTypes[self.ParticleTypePresent]):
self.NumPart_ThisRegion[type] = len(self.coordinates[ii])
def _get_parttype_indices(self, parttype, files, file_indices):
"""get the coordinates and indices for a given particle type in a given region"""
coords, velocities, indices = [], [], []
for ii,file in enumerate(files):
#check this particle type is present here
if not _particle_type_present(parttype, file):
return None, None, None
# load the file
thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])
thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])
if (np.array(self.center)+self.sidelength > self.BoxSize).any():
thisfilecoords = thisfilecoords - (self.center - self.BoxSize/2.)
thisfilecoords = np.mod(thisfilecoords,self.BoxSize)
thisfilecoords -= self.BoxSize/2.
thisfilecoords += self.center
# mask it to the region desired
mask = (np.fabs(thisfilecoords[:,0]-self.center[0]) < self.sidelength/2.) &\
(np.fabs(thisfilecoords[:,1]-self.center[1]) < self.sidelength/2.) &\
(np.fabs(thisfilecoords[:,2]-self.center[2]) < self.sidelength/2.)
#store the coordinates and the indices of these particles in the file
thisfileindices = np.where(mask)[0]
coords.append(thisfilecoords[mask])
velocities.append(thisfilevels[mask])
indices.append(thisfileindices)
return np.concatenate(coords), np.concatenate(velocities), indices
def get_dataset(self, parttype, dataset, physical=False, cgs=False):
""" get the data for a given entry in the HDF5 file for the given region """
if not self.ParticleTypePresent[parttype]:
warnings.warn('Particle type is not present, returning empty arrays...')
return np.array([])
key = os.path.join('/PartType'+str(parttype),dataset)
if physical:
#find conversion factor
factor = self._conversion_factor(key, self.a0, self.HubbleParam, cgs=cgs)
elif not physical and cgs:
factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']
else:
#else just multiply by 1!
factor = 1.
out = []
ptypeind = self._ptypeind[parttype]
for ii,file in enumerate(self.files_for_region[ptypeind]):
if not _particle_type_present(parttype, file):
continue
# load this file and get the particles
out.append(np.array(h5py.File(file, 'r')[key])[self.indices[ptypeind][ii]] * factor)
if len(out) < 2:
return out[0]
return np.concatenate(out)
def _get_parttype_files(self, parttype, keys):
""" get the files containing this region for a given particle type """
Nfiles = []
ptypeind = self._ptypeind[parttype]
for i in range(len(keys)):
if len(np.where(self.firstkeys[ptypeind] < keys[i])[0]) < 1:
start = 0
else:
start = np.where(self.firstkeys[ptypeind] < keys[i])[0][-1]
if len(np.where(self.firstkeys[ptypeind] > keys[i])[0]) < 1:
end = len(self.firstkeys[ptypeind])-1
else:
end = np.where(self.firstkeys[ptypeind] > keys[i])[0][0]
Nfiles.extend(np.arange(start,end+1,1))
Nfiles = np.unique(Nfiles)
return Nfiles
def angular_momentum(self, parttype=1, percentile=10):
"""Compute the angular momentum of particles within some percentile of their
radii
"""
ptypeind = self._ptypeind[parttype]
pos, vel = self.coordinates[ptypeind], self.velocities[ptypeind]
radii = np.linalg.norm(self.coordinates[ptypeind], axis=1)
inside = radii < np.percentile(radii, percentile)
if parttype == 1:
mass= np.ones(len(pos))*self.header_dict['MassTable'][1]
else:
mass = self.get_dataset(parttype, 'Mass')
vec = np.cross(pos[inside],vel[inside]*mass[inside][:,np.newaxis])
tot = np.sum(vec, axis=0)
return tot/np.linalg.norm(tot)
def _transform(self,vector):
"""Build a transformation matrix"""
a = vector
b = np.matrix([0,0,1])
v = np.cross(a,b)
s = np.linalg.norm(v)
c = np.dot(a,b.T)
vx = np.matrix([[0,-v[0,2],v[0,1]],[v[0,2],0,-v[0,0]],[-v[0,1],v[0,0],0]])
transform = np.eye(3,3) + vx + (vx*vx)*(1/(1+c[0,0]))
return transform
def center_and_align(self, parttype=1, align_percentile=10, return_transform=False, use_transform=False, trans=None, verbose=False, centeronly=False):
"""Center and align the particles in the region either with a supplied transformation matrix
or by computation of the mean angular momentum of some range of particles (defined by some percentile)
of the radii
"""
ptypeind = self._ptypeind[parttype]
if not self.centered:
for i in range(len(self.coordinates)):
self.coordinates[i] -= np.array(self.center)
radii = np.linalg.norm(self.coordinates[ptypeind], axis=1)
inside = radii < np.percentile(radii, align_percentile)
self.bulkvel = np.median(self.velocities[ptypeind][inside], axis=0)
for i in range(len(self.velocities)):
self.velocities[i] -= np.array(self.bulkvel)
if centeronly:
return None
self.centered = True
if use_transform:
t = trans
else:
t = self._transform(self.angular_momentum(parttype=parttype, percentile=align_percentile))
if verbose:
print('Transforming Coordinates...')
for i in range(len(self.coordinates)):
self.coordinates[i] = np.einsum('ij,aj->ai', t, self.coordinates[i])
self.velocities[i] = np.einsum('ij,aj->ai', t, self.velocities[i])
if return_transform:
return t
def _single_X_H(self,X,H,element):
solar = self.solar_abundances[element]
solarH = self.solar_abundances['Hydrogen']
return np.log10(X/H)-np.log10(solar/solarH)
def abundance_ratios(self,gas=False,smoothed=True):
""" Compute element abundance ratios for the region, returns a dict of [X/H] """
if smoothed:
e_key = 'SmoothedElementAbundance'
else:
e_key = 'ElementAbundance'
if gas:
parttype = 0
else:
parttype = 4
entries = []
H = self.get_dataset(parttype,os.path.join(e_key,'Hydrogen'))
for i in range(len(self.elements)):
if self.elements[i] == 'Hydrogen' or self.elements[i] == 'Sulphur':
continue
X = self.get_dataset(parttype,os.path.join(e_key,self.elements[i]))
entries.append((self.elements[i],self._single_X_H(X,H,self.elements[i])))
return dict(entries)
class Subfind:
""" Basic Subfind superclass which finds the relevant files.
arguments:
run - the run (e.g. L0012N0188)
model - an EAGLE model (e.g. Ref)
tag - a tag string specifying a snapshot output (e.g. 028_z000p000)
history:
written - Mackereth (UoB) - 22/11/2019
"""
def __init__(self, run, model, tag):
#store the snapshot identity info
self.run = run
self.model = model
self.tag = tag
if release == 'public':
self.simlabel = self.model+self.run
self.snaplabel = 'groups_'+self.tag
self.base_subfile = 'eagle_subfind_tab_'+self.tag
self.path = os.path.join(base_path, self.simlabel, self.snaplabel)
elif release == 'ARI':
self.snaplabel = 'groups_'+self.tag
self.base_subfile = 'eagle_subfind_tab_'+self.tag
self.path = os.path.join(base_path, self.run, self.model, 'data', self.snaplabel)
else:
raise Exception('private/custom data access is not yet implemented!')
if not os.path.exists(os.path.join(self.path, self.base_subfile+'.0.hdf5')):
raise Exception('could not see snapshot data in directory: '+self.path)
#get the files related to this snapshot and load some of their metadata
self.files = natural_sort(glob.glob(os.path.join(self.path, self.base_subfile+'*.hdf5')))
self.nfiles = len(self.files)
self.header_dict = dict(h5py.File(self.files[0], 'r')['/Header'].attrs.items())
self.BoxSize = self.header_dict['BoxSize']
self.HubbleParam = self.header_dict['HubbleParam']
self.Omega0, self.OmegaLambda, self.OmegaBaryon, self.a0 = self.header_dict['Omega0'], self.header_dict['OmegaLambda'], self.header_dict['OmegaBaryon'], self.header_dict['ExpansionFactor']
self.datasets = {}
bases = ['FOF', 'Subhalo']
for base in bases:
self.datasets[base] = list(h5py.File(self.files[0], 'r')[base].keys())
def get_dataset(self, dataset, physical=False, cgs=False):
""" get the data for a given entry in the HDF5 files """
out = []
if physical:
#find conversion factor
factor = self._conversion_factor(dataset, self.a0, self.HubbleParam, cgs=cgs)
elif not physical and cgs:
factor = h5py.File(self.files[0], 'r')[dataset].attrs['CGSConversionFactor']
else:
#else just multiply by 1!
factor = 1
for file in self.files:
# load this file and get the particles
out.append(np.array(h5py.File(file, 'r')[dataset])[:] * factor)
return np.concatenate(out)
def _conversion_factor(self, key, a, h, cgs=False):
aexp_scale, h_scale = self._get_conversion_factor_exponents(key)
if cgs:
cgs_factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']
else:
cgs_factor = 1.
return a**(aexp_scale)*h**(h_scale)*cgs_factor
def _get_conversion_factor_exponents(self, key):
aexp_scale = h5py.File(self.files[0], 'r')[key].attrs['aexp-scale-exponent']
h_scale = h5py.File(self.files[0], 'r')[key].attrs['h-scale-exponent']
return aexp_scale, h_scale
def natural_sort(l):
"""natural sort using regex (adapted by <NAME> on StackOverflow
from http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html)"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def _particle_type_present(type, file):
head = dict(h5py.File(file, 'r')['/Header'].attrs.items())
return head['NumPart_ThisFile'][type] > 0
``` |
{
"source": "jmackereth/seismestimates",
"score": 3
} |
#### File: asteroestimate/detections/noise.py
```python
import numpy as np
def kepler_noise_model(G, BP, RP, cadence):
"""
return the ppm/hr noise for Kepler
INPUT:
G - G band magnitude
BP - G_BP magnitude (not used for kepler!)
RP - G_RP magnitude (not used for kepler!)
cadence - the integration time
OUTPUT:
noise - the instrument noise in ppm/hour
HISTORY:
27/04/2020 - written - <NAME> (UoB)
"""
#kepler noise model (Gilliland+ 2010)... have to assume V ~ G
mag = G
c = 1.28 * 10**(0.4*(12-mag)+7)
sigma = 1e6/c*(c+9.5e5*(14/mag)**5)**0.5
return 2e-6*sigma**2*cadence
def tess_noise_model(G, BP, RP, cadence):
"""
return the ppm/hr noise for TESS
INPUT:
G - G band magnitude
BP - G_BP magnitude (not used for kepler!)
RP - G_RP magnitude (not used for kepler!)
cadence - the integration time
OUTPUT:
noise - the instrument noise in ppm
HISTORY:
27/04/2020 - written - <NAME> (UoB)
"""
#Stassun model
color = BP-RP
mag = G
integration = cadence/(60.) #cadence back to mins!
tessmag = mag - 0.00522555*color**3 + 0.0891337*color**2 - 0.633923*color + 0.0324473
#return get_oneSigmaNoise(integration, tessmag)
return noise_fit_27min(G)
def noise_fit_27min(G):
coeff= np.array([ 6.20408891e-06, 5.97152412e-05, 2.54251067e-04, -2.53740192e-03, -3.57921614e-02, 1.44013454e+00])
poly = np.poly1d(coeff)
return 10**poly(G)
def get_oneHourNoiseLnsigma(TessMag):
"""
from tessgi/ticgen
TESS photometric error estimate [ppm] based on
magnitude and Eq. on bottom of P24 of
arxiv.org/pdf/1706.00495.pdf
seems like a fit to curves in other papers...?
"""
F = 4.73508403525e-5
E = -0.0022308015894
D = 0.0395908321369
C = -0.285041632435
B = 0.850021465753
lnA = 3.29685004771
return (lnA + B * TessMag + C * TessMag**2 + D * TessMag**3 +
E * TessMag**4 + F * TessMag**5)
def get_oneSigmaNoise(exp_time, TessMag):
""" from tessgi/ticgen """
onesig = (np.exp(get_oneHourNoiseLnsigma(TessMag))/np.sqrt(exp_time / 60.))
if hasattr(onesig, '__iter__'):
onesig[onesig < 60] = 60.
elif onesig < 60:
onesig = 60.
return onesig
``` |
{
"source": "jmackie/activityio",
"score": 2
} |
#### File: activityio/tcx/_reading.py
```python
import re
from pandas import to_datetime
from activityio._types import ActivityData, special_columns
from activityio._util import drydoc, exceptions
from activityio._util.xml_reading import (
gen_nodes, recursive_text_extract, sans_ns)
CAP = re.compile(r'([A-Z]{1})')
# According to Garmin, all times are stored in UTC.
DATETIME_FMT = '%Y-%m-%dT%H:%M:%SZ'
# Despite what the schema says, there are files out
# in the wild with fractional seconds...
DATETIME_FMT_WITH_FRAC = '%Y-%m-%dT%H:%M:%S.%fZ'
COLUMN_SPEC = {
'altitude_meters': special_columns.Altitude,
'cadence': special_columns.Cadence,
'distance_meters': special_columns.Distance,
'longitude_degrees': special_columns.Longitude,
'latitude_degrees': special_columns.Latitude,
'speed': special_columns.Speed,
'watts': special_columns.Power,
}
def titlecase_to_undercase(string):
""" ColumnName --> column_name """
under = CAP.sub(lambda pattern: '_' + pattern.group(1).lower(), string)
return under.lstrip('_')
@drydoc.gen_records
def gen_records(file_path):
nodes = gen_nodes(file_path, ('Trackpoint',), with_root=True)
root = next(nodes)
if sans_ns(root.tag) != 'TrainingCenterDatabase':
raise exceptions.InvalidFileError('tcx')
trackpoints = nodes
for trkpt in trackpoints:
yield recursive_text_extract(trkpt)
def read_and_format(file_path):
data = ActivityData.from_records(gen_records(file_path))
times = data.pop('Time') # should always be there
data = data.astype('float64', copy=False) # try and make numeric
# Prettier column names!
data.columns = map(titlecase_to_undercase, data.columns)
try:
timestamps = to_datetime(times, format=DATETIME_FMT, utc=True)
except ValueError: # bad format, try with fractional seconds
timestamps = to_datetime(times, format=DATETIME_FMT_WITH_FRAC, utc=True)
timeoffsets = timestamps - timestamps[0]
data._finish_up(column_spec=COLUMN_SPEC,
start=timestamps[0], timeoffsets=timeoffsets)
return data
``` |
{
"source": "jmackraz/baker-house",
"score": 3
} |
#### File: baker-house/scripts/poke_iot.py
```python
from os import environ
import sys
import json
import boto3
thing_name = environ['BAKERHOUSE_IOT_THING']
state_json='{"input" : "directv", "volume" : 29}'
def poke(desired_state_json):
client=boto3.client('iot-data')
print("POKE with state:", desired_state_json)
## construct payload in json because we take json arg
#format_str='{{ "state": {{ "desired" : {} }} }}'
#payload=format_str.format(state)
desired_state = json.loads(desired_state_json)
doc = { 'state': { 'desired': desired_state}}
payload = json.dumps(doc)
print("payload:", payload)
client.update_thing_shadow(thingName=thing_name, payload=payload)
if __name__ == "__main__":
if len(sys.argv) == 1:
poke(state_json)
elif len(sys.argv) > 1:
poke(sys.argv[1])
``` |
{
"source": "jmadamesila/TBCCpylinac",
"score": 3
} |
#### File: pylinac/core/mtf.py
```python
import warnings
import numpy as np
from scipy.interpolate import interp1d
from .decorators import value_accept
class MTF:
"""This class will calculate relative MTF"""
def __init__(self, lp_spacings, lp_maximums, lp_minimums):
"""
Parameters
----------
lp_spacings : sequence of floats
These are the physical spacings per unit distance. E.g. 0.1 line pairs/mm.
lp_maximums : sequence of floats
These are the maximum values of the sample ROIs.
lp_minimums : sequence of floats
These are the minimum values of the sample ROIs.
"""
self.spacings = lp_spacings
self.maximums = lp_maximums
self.minimums = lp_minimums
self.mtfs = {}
self.norm_mtfs = {}
for (spacing, max, min) in zip(lp_spacings, lp_maximums, lp_minimums):
self.mtfs[spacing] = (max - min) / (max + min)
# sort according to spacings
self.mtfs = {k: v for k, v in sorted(self.mtfs.items(), key=lambda x: x[0])}
for key, value in self.mtfs.items():
self.norm_mtfs[key] = value / self.mtfs[lp_spacings[0]] # normalize to first region
# check that the MTF drops monotonically by measuring the deltas between MTFs
# if the delta is increasing it means the MTF rose on a subsequent value
max_delta = np.max(np.diff(list(self.norm_mtfs.values())))
if max_delta > 0:
warnings.warn("The MTF does not drop monotonically; be sure the ROIs are correctly aligned.")
@value_accept(x=(0, 100))
def relative_resolution(self, x=50):
"""Return the line pair value at the given rMTF resolution value.
Parameters
----------
x : float
The percentage of the rMTF to determine the line pair value. Must be between 0 and 100.
"""
f = interp1d(list(self.norm_mtfs.values()), list(self.norm_mtfs.keys()), fill_value='extrapolate')
mtf = f(x / 100)
if mtf > max(self.spacings):
warnings.warn(f"MTF resolution wasn't calculated for {x}% that was asked for. The value returned is an extrapolation. Use a higher % MTF to get a non-interpolated value.")
return float(mtf)
@classmethod
def from_high_contrast_diskset(cls, spacings, diskset):
"""Construct the MTF using high contrast disks from the ROI module."""
maximums = [roi.max for roi in diskset]
minimums = [roi.min for roi in diskset]
return cls(spacings, maximums, minimums)
```
#### File: TBCCpylinac/pylinac/picketfence.py
```python
from collections import Sequence
from functools import lru_cache
import os.path as osp
import io
from itertools import cycle
from tempfile import TemporaryDirectory
from typing import Union, Tuple, List
import argue
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
from numpy.core._multiarray_umath import ndarray
from pylinac.core.typing import NumberLike
from pylinac.core.utilities import open_path
from .core import image
from .core.geometry import Line, Rectangle, Point
from .core.io import get_url, retrieve_demo_file
from .core import pdf
from .core.profile import MultiProfile, SingleProfile
from .log_analyzer import load_log
from .settings import get_dicom_cmap
# possible orientations of the pickets.
UP_DOWN = 'Up-Down'
LEFT_RIGHT = 'Left-Right'
class PFDicomImage(image.LinacDicomImage):
"""A subclass of a DICOM image that checks for noise and inversion when instantiated. Can also adjust for EPID sag."""
def __init__(self, path: str, **kwargs):
super().__init__(path, **kwargs)
self._check_for_noise()
self.check_inversion_by_histogram()
def _check_for_noise(self):
"""Check if the image has extreme noise (dead pixel, etc) by comparing
min/max to 1/99 percentiles and smoothing if need be."""
safety_stop = 5
while self._has_noise() and safety_stop > 0:
self.filter(size=3)
safety_stop -= 1
def _has_noise(self) -> bool:
"""Helper method to determine if there is spurious signal in the image."""
min = self.array.min()
max = self.array.max()
near_min, near_max = np.percentile(self.array, [0.5, 99.5])
max_is_extreme = max > near_max * 1.25
min_is_extreme = (min < near_min * 0.75) and (abs(min - near_min) > 0.1 * (near_max - near_min))
return max_is_extreme or min_is_extreme
def adjust_for_sag(self, sag, orientation):
"""Roll the image to adjust for EPID sag."""
direction = 'y' if orientation == UP_DOWN else 'x'
self.roll(direction, sag)
class PicketFence:
"""A class used for analyzing EPID images where radiation strips have been formed by the
MLCs. The strips are assumed to be parallel to one another and normal to the image edge;
i.e. a "left-right" or "up-down" orientation is assumed. Further work could follow up by accounting
for any angle.
Attributes
----------
pickets: :class:`~pylinac.picketfence.PicketHandler`
image: :class:`~pylinac.core.image.DicomImage`
Examples
--------
Run the demo::
>>> PicketFence.run_demo()
Load the demo image:
>>> pf = PicketFence.from_demo_image()
Load an image along with its machine log:
>>> pf_w_log = PicketFence('my/pf.dcm', log='my/log.bin')
Typical session:
>>> img_path = r"C:/QA/June/PF.dcm" # the EPID image
>>> mypf = PicketFence(img_path)
>>> mypf.analyze(tolerance=0.5, action_tolerance=0.3)
>>> print(mypf.results())
>>> mypf.plot_analyzed_image()
"""
def __init__(self, filename: str, filter: int=None, log: str=None, use_filename: bool=False):
"""
Parameters
----------
filename : str, None
Name of the file as a string. If None, image must be loaded later.
filter : int, None
If None (default), no filtering will be done to the image.
If an int, will perform median filtering over image of size ``filter``.
log : str
Path to a log file corresponding to the delivery. The expected fluence of the log file is
used to construct the pickets. MLC peaks are then compared to an absolute reference instead of
a fitted picket.
use_filename : bool
If False (default), no action will be performed.
If True, the filename will be searched for keywords that describe the gantry and/or collimator angle.
For example, if set to True and the file name was "PF_gantry45.dcm" the gantry would be interpreted as being at 45 degrees.
"""
if filename is not None:
self.image = PFDicomImage(filename, use_filenames=use_filename)
if isinstance(filter, int):
self.image.filter(size=filter)
if log is not None:
self._load_log(log)
else:
self._log_fits = None
self._is_analyzed = False
@classmethod
def from_url(cls, url: str, filter: int=None):
"""Instantiate from a URL."""
filename = get_url(url, progress_bar=True)
return cls(filename, filter=filter)
@classmethod
def from_demo_image(cls, filter: int=None):
"""Construct a PicketFence instance using the demo image."""
demo_file = retrieve_demo_file(url='EPID-PF-LR.dcm')
return cls(demo_file, filter=filter)
@classmethod
def from_multiple_images(cls, path_list: Sequence):
"""Load and superimpose multiple images and instantiate a Starshot object.
Parameters
----------
path_list : iterable
An iterable of path locations to the files to be loaded/combined.
"""
obj = cls.from_demo_image()
# save a combined image to a temporary dir, then load it back in as a PFDicomImage
with TemporaryDirectory() as tmp:
filename = osp.join(tmp, 'mydcm.dcm')
image.load_multiples(path_list, method='mean').save(filename)
obj.image = PFDicomImage(filename)
return obj
@property
def passed(self) -> bool:
"""Boolean specifying if all MLC positions were within tolerance."""
return self.pickets.passed
@property
def percent_passing(self) -> float:
"""Return the percentage of MLC positions under tolerance."""
num = 0
num_pass = 0
for picket in self.pickets:
num += len(picket.error_array)
num_pass += sum(picket.error_array < self.settings.tolerance)
pct_pass = 100 * num_pass / num
return pct_pass
@property
def max_error(self) -> float:
"""Return the maximum error found."""
return max(picket.max_error for picket in self.pickets)
@property
def max_std(self) -> float:
pos, vals, err, leaf_nums = self.pickets.error_hist()
return np.max(err)
@property
def max_error_picket(self) -> int:
"""Return the picket number where the maximum error occurred."""
return np.argmax([picket.max_error for picket in self.pickets])
@property
def max_error_leaf(self) -> int:
"""Return the leaf that had the maximum error."""
picket = self.pickets[self.max_error_picket]
return np.argmax(picket.error_array)
@property
@lru_cache()
def abs_median_error(self) -> float:
"""Return the median error found."""
return np.median(np.hstack([picket.error_array for picket in self.pickets]))
@property
def num_pickets(self) -> int:
"""Return the number of pickets determined."""
return len(self.pickets)
def _load_log(self, log: str):
"""Load a machine log that corresponds to the picket fence delivery.
This log determines the location of the pickets. The MLC peaks are then compared to the expected log pickets,
not a simple fit of the peaks."""
# load the log fluence image
mlog = load_log(log)
fl = mlog.fluence.expected.calc_map(equal_aspect=True)
fli = image.load(fl, dpi=254) # 254 pix/in => 1 pix/0.1mm (default fluence calc)
# equate them such that they're the same size & DPI
fluence_img, self.image = image.equate_images(fli, self.image)
# get picket fits from the modified fluence image
pf = PicketFence.from_demo_image()
pf.image = fluence_img
pf.analyze()
self._log_fits = cycle([p.fit for p in pf.pickets])
@staticmethod
def run_demo(tolerance: float=0.5, action_tolerance: float=None):
"""Run the Picket Fence demo using the demo image. See analyze() for parameter info."""
pf = PicketFence.from_demo_image()
pf.analyze(tolerance, action_tolerance=action_tolerance)
print(pf.results())
pf.plot_analyzed_image(leaf_error_subplot=True)
def analyze(self, tolerance: float=0.5, action_tolerance: float=None, hdmlc: bool=False, num_pickets: int=None,
sag_adjustment: Union[float, int]=0,
orientation: str=None, invert: bool=False):
"""Analyze the picket fence image.
Parameters
----------
tolerance : int, float
The tolerance of difference in mm between an MLC pair position and the
picket fit line.
action_tolerance : int, float, None
If None (default), no action tolerance is set or compared to.
If an int or float, the MLC pair measurement is also compared to this
tolerance. Must be lower than tolerance. This value is usually meant
to indicate that a physicist should take an "action" to reduce the error,
but should not stop treatment.
hdmlc : bool
If False (default), a standard (5/10mm leaves) Millennium MLC model is assumed.
If True, an HD (2.5/5mm leaves) Millennium is assumed.
num_pickets : int, None
.. versionadded:: 0.8
The number of pickets in the image. A helper parameter to limit the total number of pickets,
only needed if analysis is catching more pickets than there really are.
sag_adjustment : float, int
.. versionadded:: 0.8
The amount of shift in mm to apply to the image to correct for EPID sag.
For Up-Down picket images, positive moves the image down, negative up.
For Left-Right picket images, positive moves the image left, negative right.
orientation : None, str
.. versionadded: 1.6
If None (default), the orientation is automatically determined. If for some reason the determined
orientation is not correct, you can pass it directly using this parameter.
If passed a string with 'u' (e.g. 'up-down', 'u-d', 'up') it will set the orientation of the pickets as
going up-down. If passed a string with 'l' (e.g. 'left-right', 'lr', 'left') it will set it as going
left-right.
invert : bool
.. versionadded: 1.7
If False (default), the inversion of the image is automatically detected and used.
If True, the image inversion is reversed from the automatic detection. This is useful when runtime errors
are encountered.
"""
if action_tolerance is not None and tolerance < action_tolerance:
raise ValueError("Tolerance cannot be lower than the action tolerance")
# crop the images so that Elekta images don't fail. See #168
if not self._is_analyzed:
self.image.crop(pixels=2)
if invert:
self.image.invert()
"""Pre-analysis"""
self._orientation = orientation
self.settings = Settings(self.orientation, tolerance, action_tolerance, hdmlc, self.image, self._log_fits)
# adjust for sag
if sag_adjustment != 0:
sag_pixels = int(round(sag_adjustment * self.settings.dpmm))
self.image.adjust_for_sag(sag_pixels, self.orientation)
"""Analysis"""
self.pickets = PicketManager(self.image, self.settings, num_pickets)
self._is_analyzed = True
def plot_analyzed_image(self, guard_rails: bool=True, mlc_peaks: bool=True, overlay: bool=True,
leaf_error_subplot: bool=True, show: bool=True, picket: bool=True):
"""Plot the analyzed image.
Parameters
----------
guard_rails : bool
Do/don't plot the picket "guard rails" around the ideal picket
mlc_peaks : bool
Do/don't plot the MLC positions.
overlay : bool
Do/don't plot the alpha overlay of the leaf status.
leaf_error_subplot : bool
.. versionadded:: 1.0
If True, plots a linked leaf error subplot adjacent to the PF image plotting the average and standard
deviation of leaf error.
"""
if picket == False:
fig = plt.figure(figsize=(16,8))
ax2 = fig.add_subplot(111)
if picket:
# plot the image
fig, ax = plt.subplots(figsize=self.settings.figure_size)
ax.imshow(self.image.array, cmap=get_dicom_cmap())
# generate a leaf error subplot if desired
if leaf_error_subplot:
self._add_leaf_error_subplot(ax2)
# plot guard rails and mlc peaks as desired
if picket:
for p_num, picket in enumerate(self.pickets):
if guard_rails:
picket.add_guards_to_axes(ax.axes)
if mlc_peaks:
for idx, mlc_meas in enumerate(picket.mlc_meas):
mlc_meas.plot2axes(ax.axes, width=1.5)
# plot the overlay if desired.
if overlay:
o = Overlay(self.image, self.settings, self.pickets)
o.add_to_axes(ax)
if picket:
# plot CAX
ax.plot(self.image.center.x, self.image.center.y, 'r+', ms=12, markeredgewidth=3)
# tighten up the plot view
ax.set_xlim([0, self.image.shape[1]])
ax.set_ylim([0, self.image.shape[0]])
ax.axis('off')
if show:
plt.show()
def _add_leaf_error_subplot(self, ax: plt.Axes):
"""Add a bar subplot showing the leaf error."""
tol_line_height = [self.settings.tolerance, self.settings.tolerance]
tol_line_width = [0, max(self.image.shape)]
# # make the new axis
# divider = make_axes_locatable(ax)
# if self.settings.orientation == UP_DOWN:
# axtop = divider.append_axes('right', 2, pad=1, sharey=ax)
# else:
# axtop = divider.append_axes('bottom', 2, pad=1, sharex=ax)
axtop = ax
# get leaf positions, errors, standard deviation, and leaf numbers
pos, vals, err, leaf_nums = self.pickets.error_hist()
#self.max_std = np.max(err)
# plot the leaf errors as a bar plot
if self.settings.orientation == UP_DOWN:
axtop.barh(pos, vals, xerr=err, height=self.pickets[0].sample_width * 2, alpha=0.4, align='center')
# plot the tolerance line(s)
# TODO: replace .plot() calls with .axhline when mpld3 fixes funtionality
axtop.plot(tol_line_height, tol_line_width, 'r-', linewidth=3)
if self.settings.action_tolerance is not None:
axtop.plot(tol_line_height, tol_line_width, 'y-', linewidth=3)
# reset xlims to comfortably include the max error or tolerance value
axtop.set_xlim([0, max(max(vals), self.settings.tolerance) + 0.1])
else:
axtop.bar(pos, vals, yerr=err, width=self.pickets[0].sample_width * 2, alpha=0.4, align='center')
axtop.plot(tol_line_width, tol_line_height,
'r-', linewidth=3)
if self.settings.action_tolerance is not None:
axtop.plot(tol_line_width, tol_line_height, 'y-', linewidth=3)
axtop.set_ylim([0, max(max(vals), self.settings.tolerance) + 0.1])
# add formatting to axis
axtop.grid(True)
axtop.set_title("Average Error (mm)")
def save_analyzed_image(self, filename: str, picket: bool=True, guard_rails: bool=True, mlc_peaks: bool=True, overlay: bool=True,
leaf_error_subplot: bool=False, **kwargs):
"""Save the analyzed figure to a file. See :meth:`~pylinac.picketfence.PicketFence.plot_analyzed_image()` for
further parameter info.
"""
self.plot_analyzed_image(guard_rails, mlc_peaks, overlay, leaf_error_subplot=leaf_error_subplot, picket=picket, show=False)
plt.savefig(filename, **kwargs)
if isinstance(filename, str):
print(f"Picket fence image saved to: {osp.abspath(filename)}")
def results(self) -> str:
"""Return results of analysis. Use with print()."""
pass_pct = self.percent_passing
offsets = ' '.join('{:.1f}'.format(pk.dist2cax) for pk in self.pickets)
offset_list = list(map(float, offsets.split()))
string = f"Picket Fence Results: \n{pass_pct:2.1f}% " \
f"Passed\nMedian Error: {self.abs_median_error:2.3f}mm \n" \
f"Mean picket spacing: {self.pickets.mean_spacing:2.1f}mm \n" \
f"Picket offsets from CAX (mm): {offsets}\n" \
f"Max Error: {self.max_error:2.3f}mm on Picket: {self.max_error_picket}, Leaf: {self.max_error_leaf}\n" \
f"Distance between Line 1 and 2: {abs(round(offset_list[0]-offset_list[1],4))} mm\n" \
f"Distance between Line 2 and 3: {abs(round(offset_list[1] - offset_list[2], 4))} mm\n" \
f"Distance between Line 3 and 4: {abs(round(offset_list[2] - offset_list[3], 4))} mm\n" \
f"Distance between Line 4 and 5: {abs(round(offset_list[3] - offset_list[4], 4))} mm\n" \
f"Distance between Line 5 and 6: {abs(round(offset_list[4] - offset_list[5], 4))} mm\n" \
f"Distance between Line 6 and 7: {abs(round(offset_list[5] - offset_list[6], 4))} mm\n" \
f"Distance between Line 7 and 8: {abs(round(offset_list[6] - offset_list[7], 4))} mm\n" \
f"Distance between Line 8 and 9: {abs(round(offset_list[7] - offset_list[8], 4))} mm\n" \
f"Distance between Line 9 and 10: {abs(round(offset_list[8] - offset_list[9], 4))} mm\n"
return string
def publish_pdf(self, filename: str, notes: str=None, open_file: bool=False, metadata: dict=None):
"""Publish (print) a PDF containing the analysis, images, and quantitative results.
Parameters
----------
filename : (str, file-like object}
The file to write the results to.
notes : str, list of strings
Text; if str, prints single line.
If list of strings, each list item is printed on its own line.
open_file : bool
Whether to open the file using the default program after creation.
metadata : dict
Extra data to be passed and shown in the PDF. The key and value will be shown with a colon.
E.g. passing {'Author': 'James', 'Unit': 'TrueBeam'} would result in text in the PDF like:
--------------
Author: James
Unit: TrueBeam
--------------
"""
plt.ioff()
canvas = pdf.PylinacCanvas(filename, page_title="Picket Fence Analysis", metadata=metadata)
data = io.BytesIO()
self.save_analyzed_image(data, leaf_error_subplot=False)
canvas.add_image(data, location=(3, 5), dimensions=(15, 19))
text = [
'Picket Fence results:',
f'Magnification factor (SID/SAD): {self.image.metadata.RTImageSID/self.image.metadata.RadiationMachineSAD:2.2f}',
f'Tolerance (mm): {self.settings.tolerance}',
f'Leaves passing (%): {self.percent_passing:2.1f}',
f'Absolute median error (mm): {self.abs_median_error:2.3f}',
f'Mean picket spacing (mm): {self.pickets.mean_spacing:2.1f}',
f'Maximum error (mm): {self.max_error:2.3f} on Picket {self.max_error_picket}, Leaf {self.max_error_leaf}',
f'Maximum std (mm): {self.max_std:2.3f}',
]
text.append(f'Gantry Angle: {self.image.gantry_angle:2.2f}')
text.append(f'Collimator Angle: {self.image.collimator_angle:2.2f}')
canvas.add_text(text=text, location=(10, 25.5))
if notes is not None:
canvas.add_text(text="Notes:", location=(1, 5.5), font_size=14)
canvas.add_text(text=notes, location=(1, 5))
canvas.add_new_page()
data = io.BytesIO()
self.save_analyzed_image(data, leaf_error_subplot=False, guard_rails=False, mlc_peaks=False, overlay=False)
canvas.add_image(data, location=(3, 5), dimensions=(15, 19))
canvas.add_new_page()
data = io.BytesIO()
self.save_analyzed_image(data, picket=False, leaf_error_subplot=True, guard_rails=False, mlc_peaks=False, overlay=False, show=False)
canvas.add_image(data, location=(3, 5), dimensions=(15, 19))
canvas.finish()
if open_file:
open_path(filename)
@property
@lru_cache(maxsize=1)
def orientation(self) -> str:
"""The orientation of the image, either Up-Down or Left-Right."""
# if orientation was passed in, use it
if type(self._orientation) is str:
if 'u' in self._orientation.lower():
return UP_DOWN
elif 'l' in self._orientation.lower():
return LEFT_RIGHT
# replace any dead pixels with median value
temp_image = self.image.array.copy()
temp_image[temp_image < np.median(temp_image)] = np.median(temp_image)
# find "range" of 80 to 90th percentiles
row_sum = np.sum(temp_image, 0)
col_sum = np.sum(temp_image, 1)
row80, row90 = np.percentile(row_sum, [85, 95])
col80, col90 = np.percentile(col_sum, [85, 95])
row_range = row90 - row80
col_range = col90 - col80
# The true picket side will have a greater difference in
# percentiles than will the non-picket size.
if row_range < col_range:
orientation = LEFT_RIGHT
else:
orientation = UP_DOWN
return orientation
class Overlay:
"""Class for handling the "overlay" feature of the plot."""
def __init__(self, image, settings, pickets):
self.image = image
self.settings = settings
self.pickets = pickets
def add_to_axes(self, axes):
"""Add the overlay to the axes."""
rect_width = self.pickets[0].sample_width*2
for mlc_num, mlc in enumerate(sorted(self.pickets, key=lambda x: len(x.mlc_meas))[0].mlc_meas):
# get pass/fail status of all measurements across pickets for that MLC
if self.settings.action_tolerance is not None:
if all(picket.mlc_passed_action(mlc_num) for picket in self.pickets):
color = 'b'
elif all(picket.mlc_passed(mlc_num) for picket in self.pickets):
color = 'm'
else:
color = 'r'
elif all(picket.mlc_passed(mlc_num) for picket in self.pickets):
color = 'b'
else:
color = 'r'
# create a rectangle overlay
if self.settings.orientation == UP_DOWN:
r = Rectangle(self.image.shape[1], rect_width, center=(self.image.center.x, mlc.center.y))
else:
r = Rectangle(rect_width, self.image.shape[0], center=(mlc.center.x, self.image.center.y))
r.plot2axes(axes.axes, edgecolor='none', fill=True, alpha=0.1, facecolor=color)
class Settings:
"""Simple class to hold various settings and info for PF analysis/plotting."""
def __init__(self, orientation, tolerance, action_tolerance, hdmlc, image, log_fits):
self.orientation = orientation
self.tolerance = tolerance
self.action_tolerance = action_tolerance
self.hdmlc = hdmlc
self.image = image
self.dpmm = image.dpmm
self.mmpd = 1/image.dpmm
try:
self.image_center = image.cax
except AttributeError:
self.image_center = image.center
self.log_fits = log_fits
@property
def figure_size(self) -> Tuple[int, int]:
"""The size of the figure to draw; depends on the picket orientation."""
if self.orientation == UP_DOWN:
return (35, 25)
else:
return (25, 35)
@property
def small_leaf_width(self) -> int:
"""The width of a "small" leaf in pixels."""
leaf_width_mm = 5
leaf_width_pixels = leaf_width_mm * self.dpmm
if self.hdmlc:
leaf_width_pixels /= 2
return leaf_width_pixels
@property
def large_leaf_width(self) -> int:
"""The width of a "large" leaf in pixels."""
return self.small_leaf_width * 2
@property
def number_small_leaves(self) -> int:
"""The number of small leaves; depends on HDMLC status."""
return 40 if not self.hdmlc else 32
@property
def number_large_leaves(self) -> int:
"""The number of large leaves; depends on HDMLC status."""
return 20 if not self.hdmlc else 28
@property
@lru_cache()
def leaf_centers(self) -> np.ndarray:
"""Return a set of leaf centers perpendicular to the leaf motion based on the position of the CAX."""
# generate a set of leaf center points based on physical widths of large and small leaves
first_shift = self.large_leaf_width * (self.number_large_leaves / 2 - 1) + self.large_leaf_width * 0.75
second_shift = self.small_leaf_width * (self.number_small_leaves - 1) + self.large_leaf_width * 0.75
large_leaf_section = np.arange(self.number_large_leaves / 2) * self.large_leaf_width
small_leaf_section = (np.arange(self.number_small_leaves) * self.small_leaf_width) + first_shift
large_leaf_section2 = (np.arange(
self.number_large_leaves / 2) * self.large_leaf_width) + first_shift + second_shift
leaf_centers = np.concatenate((large_leaf_section, small_leaf_section, large_leaf_section2))
# now adjust them to align with the iso
if self.orientation == UP_DOWN:
leaf30_center = self.image_center.y - self.small_leaf_width / 2
edge = self.image.shape[0]
else:
leaf30_center = self.image_center.x - self.small_leaf_width / 2
edge = self.image.shape[1]
adjustment = leaf30_center - leaf_centers[29]
leaf_centers += adjustment
# only include values that are reasonable as values might extend past image (e.g. with small SID)
values_in_image = (leaf_centers > 0 + self.large_leaf_width / 2) & (
leaf_centers < edge - self.large_leaf_width / 2)
leaf_centers = leaf_centers[values_in_image]
return np.round(leaf_centers).astype(int)
class PicketManager:
"""Finds and handles the pickets of the image."""
def __init__(self, image, settings, num_pickets):
self.pickets = []
self.image = image
self.settings = settings
self.num_pickets = num_pickets
self.find_pickets()
def error_hist(self) -> Tuple[List, ...]:
"""Returns several lists of information about the MLC measurements. For use with plotting."""
# for each MLC, get the average and standard deviation of the error across all the pickets
error_means = []
error_stds = []
error_plot_positions = []
mlc_leaves = []
for mlc_num, mlc_meas in enumerate(sorted(self.pickets, key=lambda x: len(x.mlc_meas))[0].mlc_meas):
errors = []
for picket in self.pickets:
errors.append(picket.mlc_meas[mlc_num].error)
error_means.append(np.mean(errors))
error_stds.append(np.std(errors))
mlc_leaves.append(mlc_meas.leaf_pair)
if self.settings.orientation == UP_DOWN:
error_plot_positions.append(mlc_meas.center.y)
else:
error_plot_positions.append(mlc_meas.center.x)
#print(np.max(error_stds))
return error_plot_positions, error_means, error_stds, mlc_leaves
def find_pickets(self):
"""Find the pickets of the image."""
leaf_prof = self.image_mlc_inplane_mean_profile
peak_idxs = leaf_prof.find_peaks(min_distance=0.02, threshold=0.5, max_number=self.num_pickets)
peak_spacing = np.median(np.diff(np.sort(peak_idxs)))
if np.isnan(peak_spacing):
peak_spacing = 20
for peak_idx in peak_idxs:
self.pickets.append(Picket(self.image, self.settings, peak_idx, peak_spacing/2))
@property
def passed(self) -> bool:
"""Whether all the pickets passed tolerance."""
return all(picket.passed for picket in self)
def __getitem__(self, item):
return self.pickets[item]
def __len__(self):
return len(self.pickets)
@property
def image_mlc_inplane_mean_profile(self) -> MultiProfile:
"""A profile of the image along the MLC travel direction."""
if self.settings.orientation == UP_DOWN:
leaf_prof = np.mean(self.image, 0)
else:
leaf_prof = np.mean(self.image, 1)
return MultiProfile(leaf_prof)
@property
def mean_spacing(self) -> np.ndarray:
"""The average distance between pickets in mm."""
sorted_pickets = sorted(self.pickets, key=lambda x: x.dist2cax)
return np.mean([abs(sorted_pickets[idx].dist2cax - sorted_pickets[idx+1].dist2cax) for idx in range(len(sorted_pickets)-1)])
class Picket:
"""Holds picket information in a Picket Fence test.
Attributes
----------
mlc_meas : list
Holds :class:`~pylinac.picketfence.MLCMeas` objects.
"""
def __init__(self, image, settings, approximate_idx, spacing):
self.mlc_meas = []
self.image = image
self.settings = settings
self.approximate_idx = approximate_idx
self.spacing = spacing
self._get_mlc_positions()
def _get_mlc_positions(self):
"""Calculate the positions of all the MLC pairs."""
# for each MLC...
for mlc_num, mlc_center in enumerate(self.settings.leaf_centers):
# find the MLC peak
mlc_position = self.find_mlc_peak(mlc_center)
# add MLC measurement object
if mlc_position is not None:
self.add_mlc_meas(mlc_center, mlc_position)
# now add the picket fit to the measurement so it can calculate error, etc.
for idx, meas in enumerate(self.mlc_meas):
meas.fit = self.fit
def find_mlc_peak(self, mlc_center):
"""Determine the center of the picket."""
mlc_rows = np.arange(mlc_center - self.sample_width, mlc_center + self.sample_width + 1)
if self.settings.orientation == UP_DOWN:
pix_vals = np.median(self.picket_array[mlc_rows, :], axis=0)
else:
pix_vals = np.median(self.picket_array[:, mlc_rows], axis=1)
if max(pix_vals) > np.percentile(self.picket_array, 80):
prof = SingleProfile(pix_vals)
fw80mc = prof.fwxm_center(70, interpolate=True)
return fw80mc + self.approximate_idx - self.spacing
def add_mlc_meas(self, mlc_center, mlc_position):
"""Add an MLC measurement point."""
upper_point = mlc_center - self.sample_width / 2
lower_point = mlc_center + self.sample_width / 2
if self.settings.orientation == UP_DOWN:
meas = MLCMeas((mlc_position, upper_point), (mlc_position, lower_point), self.settings)
else:
meas = MLCMeas((upper_point, mlc_position), (lower_point, mlc_position), self.settings)
self.mlc_meas.append(meas)
@property
def sample_width(self) -> float:
"""The width to sample the MLC leaf (~40% of the leaf width)."""
return np.round(np.median(np.diff(self.settings.leaf_centers) * 2 / 5) / 2).astype(int)
@property
@lru_cache()
def picket_array(self) -> np.ndarray:
"""A slice of the whole image that contains the area around the picket."""
if self.settings.orientation == UP_DOWN:
left_edge = int(self.approximate_idx - self.spacing)
right_edge = int(self.approximate_idx + self.spacing)
# see #167 & #174
if left_edge < 0:
self.spacing += left_edge
left_edge = int(self.approximate_idx - self.spacing)
right_edge = int(self.approximate_idx + self.spacing)
array = self.image[:, left_edge:right_edge]
else:
top_edge = int(self.approximate_idx - self.spacing)
bottom_edge = int(self.approximate_idx + self.spacing)
# see #167 & #174
if top_edge < 0:
self.spacing += top_edge
top_edge = int(self.approximate_idx - self.spacing)
bottom_edge = int(self.approximate_idx + self.spacing)
array = self.image[top_edge:bottom_edge, :]
return array
@property
def abs_median_error(self) -> np.ndarray:
"""The absolute median error of the MLC measurements."""
return np.median(np.abs(self.error_array))
@property
def max_error(self) -> float:
"""The max error of the MLC measurements."""
return self.error_array.max()
@property
@lru_cache()
def error_array(self) -> np.ndarray:
"""An array containing the error values of all the measurements."""
return np.array([meas.error for meas in self.mlc_meas])
@property
def passed(self) -> bool:
"""Whether or not all the measurements passed."""
return all(meas.passed for meas in self.mlc_meas)
def mlc_passed(self, mlc) -> bool:
"""Return whether a specific MLC has passed tolerance."""
return self.mlc_meas[mlc].passed
def mlc_passed_action(self, mlc) -> bool:
"""Return whether a specific MLC has passed the action tolerance."""
if self.settings.action_tolerance is not None:
return self.mlc_meas[mlc].passed_action
else:
raise AttributeError("No action tolerance was specified")
@property
@lru_cache(maxsize=1)
def fit(self):
"""The fit of a polynomial to the MLC measurements."""
if self.settings.log_fits is not None:
return next(self.settings.log_fits)
x = np.array([mlc.point1.y for mlc in self.mlc_meas])
y = np.array([mlc.point1.x for mlc in self.mlc_meas])
if self.settings.orientation == UP_DOWN:
fit = np.polyfit(x, y, 1)
else:
fit = np.polyfit(y, x, 1)
return np.poly1d(fit)
@property
def dist2cax(self) -> float:
"""The distance from the CAX to the picket, in mm."""
center_fit = np.poly1d(self.fit)
if self.settings.orientation == UP_DOWN:
length = self.image.shape[0]
else:
length = self.image.shape[1]
x_data = np.arange(length)
y_data = center_fit(x_data)
idx = int(round(len(x_data) / 2))
if self.settings.orientation == UP_DOWN:
axis = 'x'
p1 = Point(y_data[idx], x_data[idx])
else:
axis = 'y'
p1 = Point(x_data[idx], y_data[idx])
return (getattr(self.image.center, axis) - getattr(p1, axis)) * self.settings.mmpd
@property
def left_guard(self):
"""The line representing the left side guard rail."""
l_fit = np.copy(self.fit)
l_fit[-1] += self.settings.tolerance / self.settings.mmpd
return np.poly1d(l_fit)
@property
def right_guard(self):
"""The line representing the right side guard rail."""
r_fit = np.copy(self.fit)
r_fit[-1] -= self.settings.tolerance / self.settings.mmpd
return np.poly1d(r_fit)
def add_guards_to_axes(self, axis: plt.Axes, color: str='g'):
"""Plot guard rails to the axis."""
if self.settings.orientation == UP_DOWN:
length = self.image.shape[0]
else:
length = self.image.shape[1]
x_data = np.arange(length)
left_y_data = self.left_guard(x_data)
right_y_data = self.right_guard(x_data)
if self.settings.orientation == UP_DOWN:
axis.plot(left_y_data, x_data, color=color)
axis.plot(right_y_data, x_data, color=color)
else:
axis.plot(x_data, left_y_data, color=color)
axis.plot(x_data, right_y_data, color=color)
class MLCMeas(Line):
"""Represents an MLC measurement."""
def __init__(self, point1, point2, settings):
super().__init__(point1, point2)
self.settings = settings
self.fit = None
def plot2axes(self, axes: plt.Axes, width: NumberLike=1):
"""Plot the measurement to the axes."""
super().plot2axes(axes, width, color=self.bg_color)
@property
def bg_color(self) -> str:
"""The color of the measurement when the PF image is plotted, based on pass/fail status."""
if not self.passed:
return 'r'
elif self.settings.action_tolerance is not None:
if self.passed_action:
return 'b'
else:
return 'm'
else:
return 'b'
@property
def error(self) -> float:
"""The error (difference) of the MLC measurement and the picket fit."""
if self.settings.orientation == UP_DOWN:
picket_pos = self.fit(self.center.y)
mlc_pos = self.center.x
else:
picket_pos = self.fit(self.center.x)
mlc_pos = self.center.y
return abs(mlc_pos - picket_pos) * self.settings.mmpd
@property
def passed(self) -> bool:
"""Whether the MLC measurement was under tolerance."""
return self.error < self.settings.tolerance
@property
def passed_action(self) -> bool:
"""Whether the MLC measurement was under the action level tolerance."""
if self.settings.action_tolerance is not None:
return self.error < self.settings.action_tolerance
@property
@lru_cache()
def leaf_pair(self) -> Tuple[int, int]:
"""The leaf pair that formed the MLC measurement.
Returns
-------
tuple : 2 elements which are the two leaf numbers
"""
leaves = [0, 0]
# get distance between MLC point and EPID center in *pixels*
if self.settings.orientation == UP_DOWN:
mlc_loc = self.center.y
epid_center = self.settings.image_center.y
else:
mlc_loc = self.center.x
epid_center = self.settings.image_center.x
mlc_dist = mlc_loc - epid_center
# determine leaf number based on if it's in/not in the "small leaf" region
small_region_extent = self.settings.small_leaf_width * self.settings.number_small_leaves / 2
# large leaf region
if not small_region_extent > mlc_dist > -small_region_extent:
if np.sign(mlc_dist) > 0: # positive, meaning
# offset MLC distance to the edge of the small leaf region
mlc_dist -= small_region_extent
# divide the MLC distance by the leaf width and convert to leaf number
leaf = int(round((abs(mlc_dist) + self.settings.large_leaf_width / 2) / self.settings.large_leaf_width))
starting_leaf = 14 if self.settings.hdmlc else 10 + 1
leaves[0] = starting_leaf - leaf
else:
# offset MLC distance to the edge of the small leaf region
mlc_dist += small_region_extent
# divide the MLC distance by the leaf width and convert to leaf number
leaf = int(round((abs(mlc_dist) + self.settings.large_leaf_width / 2) / self.settings.large_leaf_width))
starting_leaf = 46 if self.settings.hdmlc else 50
leaves[0] = starting_leaf + leaf
# small leaf region
else:
# divide the MLC distance by the leaf width and convert to leaf number
leaf = int(round((abs(mlc_dist) + self.settings.small_leaf_width / 2) / self.settings.small_leaf_width))
if np.sign(mlc_dist) > 0:
leaves[0] = 31 - leaf
else:
leaves[0] = 30 + leaf
# set opposite leaf using an offset
leaves[1] = 121 - leaves[0]
return leaves
``` |
{
"source": "jmadden4/flasky-and-friends",
"score": 3
} |
#### File: flasky-and-friends/app/summarizer.py
```python
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from collections import defaultdict
from string import punctuation
from heapq import nlargest
class FrequencySummarizer:
def __init__(self, min_cut = 0.1, max_cut = 0.9):
"""
Initialize the text summarizer.
Words that have a frequency term lower than min_cut
or higher than max_cut will be ignored.
"""
self._min_cut = min_cut
self._max_cut = max_cut
self._stopwords = set(stopwords.words('english') + list(punctuation))
def _compute_frequencies(self, word_sent):
"""
Compute the frequency of each of word.
Input:
word_sent, a list of sentences already tokenized.
Output:
freq, a dictionary where freq[w] is the frequency of w.
"""
freq = defaultdict(int)
for s in word_sent:
for word in s:
if word not in self._stopwords:
freq[word] += 1
# frequencies normalization and fitering
m = float(max(freq.values()))
for w in freq.keys():
freq[w] /= m
if freq[w] >= self._max_cut or freq[w] <= self._min_cut:
del freq[w]
return freq
def summarize(self, text, n):
"""
Return a list of n sentences
which represent the summary of text.
"""
sents = sent_tokenize(text)
assert n <= len(sents)
word_sent = [word_tokenize(s.lower()) for s in sents]
self._freq = self._compute_frequencies(word_sent)
ranking = defaultdict(int)
for i,sent in enumerate(word_sent):
for w in sent:
if w in self._freq:
ranking[i] += self._freq[w]
sents_idx = self._rank(ranking, n)
return [sents[j] for j in sents_idx]
def _rank(self, ranking, n):
""" return the first n sentences with highest ranking """
return nlargest(n, ranking, key = ranking.get)
``` |
{
"source": "jmadden4/PersonaBot",
"score": 3
} |
#### File: app/main/forms.py
```python
from flask_wtf import FlaskForm
from werkzeug.datastructures import MultiDict
from wtforms import StringField, TextAreaField, BooleanField, SelectField,\
SubmitField
from wtforms.validators import Required, Length, Email, Regexp
from wtforms import ValidationError
from flask import Flask
app = Flask(__name__)
class SomeForm(FlaskForm):
name = StringField('Enter a data point from a customer', validators=[Required()])
#email = StringField('Email', [DataRequired(), Email()])
submit = SubmitField('Submit')
def reset(self):
#data = MultiDict([ ('csrf', self.generate_csrf_token() ) ])
#data = self.generate_csrf_token()
data = self.generate_csrf_token()
self.process(data)
def generate_csrf_token():
if '_csrf_token' not in session:
session['_csrf_token'] = some_random_string()
return session['_csrf_token']
```
#### File: app/main/views.py
```python
from . import main
from .forms import SomeForm
import json
import datetime
from flask import jsonify, redirect, render_template, request, url_for, jsonify, make_response, abort, flash
from flask_restful import Resource, Api
from flask import Flask, request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
@app.before_request
def csrf_protect():
if request.method == "POST":
token = session.pop('_csrf_token', None)
if not token or token != request.form.get('_csrf_token'):
abort(403)
def generate_csrf_token():
if '_csrf_token' not in session:
session['_csrf_token'] = some_random_string()
return session['_csrf_token']
@main.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
question = ""
@main.route('/', methods=['GET', 'POST'])
@main.route('/index', methods=['GET', 'POST'])
#@main.route('/output', methods=['GET', 'POST'])
def index():
form = SomeForm()
if form.validate_on_submit():
#do nothing
app.jinja_env.globals['csrf_token'] = generate_csrf_token
form.reset()
# seriously do nothing
return render_template('index.html',form=form)
@main.route('/2', methods=['GET', 'POST'])
@main.route('/index2', methods=['GET', 'POST'])
def index2():
form = SomeForm()
#question = form.name.data
if request.method == 'POST':
question = form.name.data
print question
from PersonaChatBotResponse import response
input1=question
imgPath = url_for('static', filename='sharkDog.jpg')
resp1 = response(input1)
imgString = "<img src="+imgPath+" width=120px height=170px></img>"
formResponse = "<table class=""table"">\
<thead>\
<tr>\
<th>Input Value #1</th>\
<th>Input Value #2</th>\
<th>Persona Bot Output</th>\
</thead>\
</tr>\
<tbody>\
<tr>\
<td><h4>"+imgString+"</h4></td>\
<td><h4>"+question+"</h4></td>\
<td><h4>"+resp1+"</h4></td>\
</tr>\
</tbody>\
</table>"
#resp = "Response from Diskey Bot is: " + "<h3>" + respTemp2 + "</h3><br/>"+ "\n" + "\n" + "Input was: "+respTemp + "\n"
#return redirect(url_for('main.fetchFromChatBot', id=question))
#return jsonify(data={'question': question})
#/_fetchFromChatBot/<id>'
#return redirect(url_for('main.index', form=form))
#return render_template('index.html', form=form, question=question)
abort
return render_template('index.html', form=form)
#@main.route('/_SubmitForm')
#def submitForm():
# return render_template('index.html',form=form)
@main.route('/_askChatBot/<id>', methods=['GET', 'POST'])
def askChatBot(id):
time_stamp = datetime.datetime.now()
id = time_stamp
id_to_post = id
id = -1
question="hello mr bear?"
print id_to_post
print id
return jsonify(id_to_post)
@main.route('/_askChatBotRandom/<id>', methods=['GET', 'POST'])
def askChatBotRandom(id):
time_stamp = datetime.datetime.now()
id = time_stamp
id_to_post = id
#from diskeyChatBotModel import stemmer
id = -1
#id = time_stamp
#id_to_send =
question="no tech?"
print id_to_post
print id
return jsonify(id_to_post)
@main.route('/_fetchFromChatBot/<id>', methods=['GET', 'POST'])
def fetchFromChatBot(id):
import os, sys, random
from PersonaChatBotResponse import response
input1 = "Tablet"
input2 = "PC"
input3 = "Mobile"
imgPath1 = url_for('static', filename='sharkDog.jpg')
imgPath2 = url_for('static', filename='DiscoDayCare.jpg')
imgPath3 = url_for('static', filename='tiredTuckedIn.jpg')
combos = [0, 1, 2]
comboTemp = random.choice(combos)
combo = comboTemp
imgs = [imgPath1, imgPath2, imgPath3]
imgTemp = imgs[combo]
imgPath = imgTemp
imgString = "<img src="+imgPath+" width=140px height=170px></img>"
inputOptions = [input1, input2, input3]
resp1 = response(input1)
resp2 = response(input2)
resp3 = response(input3)
respInput = [input1, input2, input3]
#respTemp = random.choice(respInput)
respTemp = respInput[combo]
respTemp2 = response(respTemp)
responseTable = "<table class=""table"">\
<thead>\
<tr>\
<th>User Photo</th>\
<th>Variable Type</th>\
<th>Input Variable</th>\
<th>Persona Bot Output</th>\
</thead>\
</tr>\
<tbody>\
<tr>\
<td><h4>"+imgString+"</h4></td>\
<td><h4>Technology</h4></td>\
<td><h4>"+respTemp+"</h4></td>\
<td><h4>"+respTemp2+"</h4></td>\
</tr>\
</tbody>\
</table>"
#resp = "Response from Diskey Bot is: " + "<h3>" + respTemp2 + "</h3><br/>"+ "\n" + "\n" + "Input was: "+respTemp + "\n"
return responseTable
@main.route('/_fetchFromChatBotRandom/<id>', methods=['GET', 'POST'])
def fetchFromChatBotRandom(id):
import os, sys, random
#from diskeyChatBotModel import words
#os.system('python /home/joe/workspace/diskey-bot/diskeyChatBotModel.py')
#resp = words
#resp = "wowww"
#os.system('python /home/joe/workspace/diskey-bot/diskeyChatBotResponse.py')
from PersonaChatBotResponse import response
input1 = "shark"
input2 = "pup"
input3 = "sleepy"
imgPath1 = url_for('static', filename='sharkDog.jpg')
imgPath2 = url_for('static', filename='DiscoDayCare.jpg')
imgPath3 = url_for('static', filename='tiredTuckedIn.jpg')
combos = [0, 1, 2]
comboTemp = random.choice(combos)
combo = comboTemp
imgs = [imgPath1, imgPath2, imgPath3]
imgTemp = imgs[combo]
imgPath = imgTemp
imgString = "<img src="+imgPath+" width=120px height=170px></img>"
inputOptions = [input1, input2, input3]
resp1 = response(input1)
resp2 = response(input2)
resp3 = response(input3)
respInput = [input1, input2, input3]
#respTemp = random.choice(respInput)
respTemp = respInput[combo]
respTemp2 = response(respTemp)
responseTable = "<table class=""table"">\
<thead>\
<tr>\
<th>Input Value #1</th>\
<th>Input Value #2</th>\
<th>Dog Bot Output</th>\
</thead>\
</tr>\
<tbody>\
<tr>\
<td><h4>"+imgString+"</h4></td>\
<td><h4>"+respTemp+"</h4></td>\
<td><h4>"+respTemp2+"</h4></td>\
</tr>\
</tbody>\
</table>"
#resp = "Response from Diskey Bot is: " + "<h3>" + respTemp2 + "</h3><br/>"+ "\n" + "\n" + "Input was: "+respTemp + "\n"
return responseTable
``` |
{
"source": "jmadibekov/web-dev",
"score": 2
} |
#### File: hh_back/api/serializers.py
```python
from rest_framework import serializers
from .models import Company, Vacancy
class CompanySerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(required=True, allow_blank=False, max_length=200)
description = serializers.CharField(
required=False, allow_blank=True, max_length=500
)
city = serializers.CharField(required=True, allow_blank=False, max_length=200)
address = serializers.CharField(required=False, allow_blank=True, max_length=500)
def create(self, validated_data):
return Company.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get("name", instance.name)
instance.description = validated_data.get("description", instance.description)
instance.city = validated_data.get("city", instance.city)
instance.address = validated_data.get("address", instance.address)
instance.save()
return instance
class VacancySerializer(serializers.ModelSerializer):
class Meta:
model = Vacancy
fields = "__all__"
``` |
{
"source": "jmadler/teletraan",
"score": 2
} |
#### File: deploy_board/webapp/host_views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import View
import logging
from helpers import environs_helper, agents_helper, autoscaling_groups_helper
from helpers import environ_hosts_helper, hosts_helper
from deploy_board.settings import IS_PINTEREST, CMDB_API_HOST, CMDB_INSTANCE_URL, CMDB_UI_HOST, PHOBOS_URL
from datetime import datetime
import pytz
import requests
import common
requests.packages.urllib3.disable_warnings()
log = logging.getLogger(__name__)
def get_agent_wrapper(request, hostname):
# gather the env name and stage info
agents = agents_helper.get_agents_by_host(request, hostname)
agent_wrappers = []
is_unreachable = False
for agent in agents:
agent_wrapper = {}
agent_wrapper["agent"] = agent
envId = agent['envId']
agent_env = environs_helper.get(request, envId)
agent_wrapper["env"] = agent_env
agent_wrapper["error"] = ""
if agent.get('lastErrno', 0) != 0:
agent_wrapper["error"] = agents_helper.get_agent_error(request, agent_env['envName'],
agent_env['stageName'], hostname)
if agent['state'] == 'UNREACHABLE':
is_unreachable = True
agent_wrappers.append(agent_wrapper)
return agent_wrappers, is_unreachable
# TODO deprecated it
def get_asg_name(request, hosts):
if IS_PINTEREST:
for host in hosts:
if host and host.get('groupName'):
group_info = autoscaling_groups_helper.get_group_info(request, host.get('groupName'))
if group_info and group_info.get("launchInfo") and group_info.get("launchInfo")["asgStatus"] == "ENABLED":
return host.get('groupName')
return None
def get_show_terminate(hosts):
for host in hosts:
if host and host.get('state') and host.get('state') != 'PENDING_TERMINATE' and host.get('state') != 'TERMINATING' and host.get('state') != 'TERMINATED':
return True
return False
def get_host_id(hosts):
if hosts:
return hosts[0].get('hostId')
return None
def _get_cloud(json_obj):
try:
return json_obj.get('cloud', None).get('aws', None)
except:
return None
def get_host_details(host_id):
if not host_id:
return None
host_url = CMDB_API_HOST + CMDB_INSTANCE_URL + host_id
response = requests.get(host_url)
try:
instance = response.json()
except:
# the host not found in CMDB
return None
cloud_info = _get_cloud(instance)
if not cloud_info:
return None
launch_time = cloud_info.get('launchTime', 0)
launch_time = datetime.fromtimestamp(launch_time / 1000, pytz.timezone('America/Los_Angeles')).strftime("%Y-%m-%d %H:%M:%S")
availability_zone = cloud_info.get('placement', {}).get('availability_zone', None)
ami_id = cloud_info.get('image_id', None)
host_details = {
'Subnet Id': instance.get('subnet_id', None),
'State': instance.get('state', None),
'Security Groups': instance.get('security_groups', None),
'Availability Zone': availability_zone,
'Tags': instance['tags'],
'Launch Time': launch_time,
'AMI Id': ami_id,
}
if IS_PINTEREST and PHOBOS_URL:
host_ip = instance['config']['internal_address']
host_name = instance['config']['name']
if host_ip is not None:
phobos_link = PHOBOS_URL + host_name
host_details['Phobos Link'] = phobos_link
return host_details
class GroupHostDetailView(View):
def get(self, request, groupname, hostname):
hosts = hosts_helper.get_hosts_by_name(request, hostname)
host_id = get_host_id(hosts)
asg = get_asg_name(request, hosts)
show_terminate = get_show_terminate(hosts)
show_warning_message = not show_terminate
agent_wrappers, is_unreachable = get_agent_wrapper(request, hostname)
host_details = get_host_details(host_id)
return render(request, 'hosts/host_details.html', {
'group_name': groupname,
'hostname': hostname,
'hosts': hosts,
'host_id': host_id,
'agent_wrappers': agent_wrappers,
'show_warning_message': show_warning_message,
'asg_group': asg,
'is_unreachable': is_unreachable,
'pinterest': IS_PINTEREST,
'host_information_url': CMDB_UI_HOST,
'host_details': host_details,
})
class HostDetailView(View):
def get(self, request, name, stage, hostname):
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
duplicate_stage = ''
for stage_name in stages:
if stage_name != stage:
hosts = environs_helper.get_env_capacity(request, name, stage_name, capacity_type="HOST")
if hostname in hosts:
duplicate_stage = stage_name
hosts = environ_hosts_helper.get_host_by_env_and_hostname(request, name, stage, hostname)
host_id = get_host_id(hosts)
show_terminate = get_show_terminate(hosts)
show_warning_message = not show_terminate
asg = get_asg_name(request, hosts)
is_protected = False
if asg:
is_protected = autoscaling_groups_helper.is_hosts_protected(request, asg, [host_id])
agent_wrappers, is_unreachable = get_agent_wrapper(request, hostname)
host_details = get_host_details(host_id)
return render(request, 'hosts/host_details.html', {
'env_name': name,
'stage_name': stage,
'hostname': hostname,
'hosts': hosts,
'host_id': host_id,
'agent_wrappers': agent_wrappers,
'show_terminate': show_terminate,
'show_warning_message': show_warning_message,
'show_force_terminate': IS_PINTEREST,
'asg_group': asg,
'is_unreachable': is_unreachable,
'pinterest': IS_PINTEREST,
'host_information_url': CMDB_UI_HOST,
'instance_protected': is_protected,
'host_details': host_details,
'duplicate_stage': duplicate_stage,
})
def hosts_list(request):
return render(request, 'hosts/hosts_landing.html', {
})
def hosts_show(request, build_id):
return HttpResponse("NOT IMPLEMENTED")
```
#### File: teletraan/deploy-sentinel/quickstart.py
```python
import os
import subprocess
import traceback
import time
import string
import random
import requests
requests.packages.urllib3.disable_warnings()
def main():
build_path = "file://%s/%s" % (os.path.dirname(os.path.realpath(__file__)),
"quickstart-build.tar.gz")
host_info_path = "file://%s/%s" % (os.path.dirname(os.path.realpath(__file__)),
"host_info")
build_dest_dir = '/tmp/quickstart-build.tar.gz'
host_info_dest_dir = '/tmp/deployd/host_info'
build_download_cmd = ['curl', '-ksS', build_path, '-o', build_dest_dir]
host_info_download_cmd = ['curl', '-ksS', host_info_path, '-o', host_info_dest_dir]
try:
# Publish build
process = subprocess.Popen(build_download_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
if error:
print "Error: failed to publish build to /tmp directory.", error
return
# Make deployd directory if it doesn't yet exist
if not os.path.exists("/tmp/deployd"):
os.makedirs("/tmp/deployd")
# Copy over host_info file
process = subprocess.Popen(host_info_download_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
if error:
print "Error: failed to publish host_info to /tmp directory.", error
return
publish_local_build("file://%s" % build_dest_dir)
except Exception as e:
print traceback.format_exc()
return None, e.message, 1
def gen_random_num(size=8, chars=string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def publish_local_build(build_path, build_name='deploy-sentinel', branch='master', commit=gen_random_num(32)):
build = {}
publish_build_url = "http://localhost:8080/v1/builds"
headers = {'Content-type': 'application/json'}
build['name'] = build_name
build['repo'] = 'local'
build['branch'] = branch
build['commit'] = commit
build['commitDate'] = int(round(time.time()))
build['artifactUrl'] = build_path
build['publishInfo'] = build_path
r = requests.post(publish_build_url, json=build, headers=headers)
if 200 <= r.status_code < 300:
print "Successfully published local deploy-sentinel build and host_info " \
"configuration file to local /tmp directory!"
else:
print "Error publishing local deploy-sentinel build. Status code = %s, response = %s" % (str(r.status_code),
str(r.text))
return build
if __name__ == "__main__":
main()
``` |
{
"source": "jmadtha/ce-cloudify-bora",
"score": 2
} |
#### File: nodecellar-config-plugin/nodecellar_config_plugin/tasks.py
```python
from cloudify.decorators import operation
from subprocess import call
@operation
def get_mongo_host_and_port(ctx, **kwargs):
"""
Gets the mongo ip address and port and stores them in a file to be sourced by the
nodecellar startup script
"""
mongo_ip_address = ctx.related.runtime_properties['ip_address']
mongo_port = ctx.related.runtime_properties['port']
ctx.logger.info("Mongo IP address is {} and port is {}".format(mongo_ip_address, mongo_port))
env_file_path = ctx.properties.get("env_file_path", "/tmp/mongo_host_and_port.sh")
ctx.logger.info("Writing file {}".format(env_file_path))
with open(env_file_path, 'w') as env_file:
env_file.write("export MONGO_PORT={}\n".format(mongo_port))
env_file.write("export MONGO_HOST={}\n".format(mongo_ip_address))
call(["chmod", "+x", env_file_path])
``` |
{
"source": "jmaenpaa/db2_helpers",
"score": 3
} |
#### File: src/db2_helpers/db2_helpers.py
```python
import base64
import collections
import os
import pickle
import stat
from pathlib import Path
from getpass import getpass
from hashlib import blake2b
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet
import ibm_db
_hdbc = None
_sqlerror = None
_sqlcode = None
_sqlstate = None
_default_environment = "dev"
_default_settings_location = Path("") # Location for dev_host_db.pickle files
_default_secret_key_location = Path.home() # Location of secret key file (user's home directory)
_secretkeyfile = _default_secret_key_location / ".db2_helpers.secret.key"
_default_secretkey = collections.OrderedDict([
("secret", None),
("locked", False),
("hash", ""),
("secrethash", "")
])
_default_settings = collections.OrderedDict([
("database", "sample"),
("hostname", "localhost"),
("protocol", "tcpip"),
("port", "50000"),
("security", "nossl"),
("servercert", "db2inst1.arm"),
("uid", "db2inst1"),
("pwd", "password"),
("environment", _default_environment),
("secrethash", "") # Hash of secret key used to encrypt password
])
_settings = _default_settings.copy()
_prompt_label = collections.OrderedDict([
("database", "database name"),
("hostname", "host name for database"),
("protocol", "protocol for database"),
("port", "port for tcpip connection"),
("servercert", "certificate file for database"),
("uid", "userid for database connection"),
("pwd", "password for database connection"),
])
def db_connect(settings: collections.OrderedDict = None) -> ibm_db.IBM_DBConnection or None:
"""Connect to Db2"""
global _hdbc
if _hdbc and db_connected():
return _hdbc
if not settings:
print("Settings not loaded")
_hdbc = None
return _hdbc
try:
if not settings["database"]:
print("Settings are incorrect")
_hdbc = None
return _hdbc
except KeyError:
print("Settings content is corrupted")
_hdbc = None
return _hdbc
if "security" in settings and settings["security"].upper() == "SSL":
dsn = (
"DRIVER={{IBM DB2 ODBC DRIVER}};"
"DATABASE={0};"
"HOSTNAME={1};"
"PORT={2};"
"PROTOCOL=TCPIP;"
"UID={3};"
"PWD={4};"
"SECURITY=SSL;SSLServerCertificate={5}").format(settings["database"],
settings["hostname"],
settings["port"],
settings["uid"],
settings["pwd"],
settings["servercert"])
else:
dsn = (
"DRIVER={{IBM DB2 ODBC DRIVER}};"
"DATABASE={0};"
"HOSTNAME={1};"
"PORT={2};"
"PROTOCOL=TCPIP;"
"UID={3};"
"PWD={4};").format(settings["database"],
settings["hostname"],
settings["port"],
settings["uid"],
settings["pwd"])
# Get a database handle (hdbc) for subsequent access to DB2
try:
_hdbc = ibm_db.connect(dsn, "", "")
except Exception as err:
print(str(err))
_hdbc = None
return _hdbc
def db_connect_prompt(database=None, hostname=None) -> collections.OrderedDict or None:
"""Prompt for connection settings, do not actually connect"""
global _default_settings, _prompt_label
settings = _default_settings.copy()
if database:
settings["database"] = database
if hostname:
settings["hostname"] = hostname
print("Enter the database connection details (Enter a period '.' to cancel input")
for k in settings.keys():
if k in ["servercert", "hash", "secrethash", "environment"]:
pass
elif k == "pwd":
x = getpass("Enter password: ")
if x == ".":
return None
if x:
settings[k] = x
elif k == "security":
prompt_string = "Enter 'SSL' to use an encrypted connection[" + settings[k] + "]: "
x = input(prompt_string).lower() or settings[k]
if x == ".":
return None
m = "servercert"
settings[k] = x
if x == "ssl":
y = input("Enter the name of the .ARM file containing the server certificate["
+ settings[m] + "]: ") or settings[m]
if y == ".":
return None
z = Path(y)
if z.is_file() and os.access(y, os.R_OK):
settings[m] = y
else:
print("Unable to access file", z)
return None
else:
settings[m] = ""
else:
prompt_string = "Enter the " + _prompt_label[k] + "[" + settings[k] + "]: "
x = input(prompt_string)
if x == ".":
return None
if x:
settings[k] = x.lower()
return settings
def db_connected(hdbc=None) -> bool:
""" Return state of Db2 connection"""
global _hdbc
if hdbc:
return ibm_db.active(hdbc)
if _hdbc:
return ibm_db.active(_hdbc)
return False
def db_connection() -> ibm_db.IBM_DBConnection or None:
""" Return Db2 connection handle"""
global _hdbc
return _hdbc
# noinspection PyBroadException
def db_disconnect(hdbc=None):
"""Disconnect from the database"""
if hdbc:
use_hdbc = hdbc
else:
use_hdbc = _hdbc
try:
ibm_db.close(use_hdbc)
except Exception:
db_error(False)
# noinspection PyBroadException
def db_error(quiet):
"""Handle Db2 Errors"""
global _sqlerror, _sqlcode, _sqlstate
errmsg = ibm_db.stmt_errormsg().replace("\r", " ")
errmsg = errmsg[errmsg.rfind("]") + 1:].strip()
_sqlerror = errmsg
msg_start = errmsg.find("SQLSTATE=")
if msg_start != -1:
msg_end = errmsg.find(" ", msg_start)
if msg_end == -1:
msg_end = len(errmsg)
_sqlstate = errmsg[msg_start + 9:msg_end]
else:
_sqlstate = "0"
msg_start = errmsg.find("SQLCODE=")
if msg_start != -1:
msg_end = errmsg.find(" ", msg_start)
if msg_end == -1:
msg_end = len(errmsg)
_sqlcode = errmsg[msg_start + 8:msg_end]
try:
_sqlcode = int(_sqlcode)
except Exception:
pass
else:
_sqlcode = 0
if quiet:
return
print(errmsg)
def db_keys_get(password=<PASSWORD>, prompt=True) -> collections.OrderedDict:
"""Load saved secret key"""
global _secretkeyfile, _default_secretkey
passphrase = ""
try:
with open(_secretkeyfile, "rb") as f:
secretkey = pickle.load(f)
if secretkey["locked"]:
getit = True
if password:
passphrase = password
if secretkey["hash"] == blake2b(str.encode(passphrase)).hexdigest():
print("Secret key file is locked.")
print("Using supplied password for temporary unlock.")
else:
print("Secret key file is locked.")
print("Supplied unlock password does not match secret")
elif prompt:
print("Secret key file is locked.")
print("No secret password supplied")
attempts = 0
while getit:
attempts += 1
if attempts > 9:
getit = False
passphrase = getpass("Enter password: ")
if secretkey["hash"] == blake2b(str.encode(passphrase)).hexdigest():
getit = False
k = Fernet(password_to_key(passphrase))
secretkey["secret"] = k.decrypt(str.encode(secretkey["secret"])).decode()
secretkey["locked"] = False
except FileNotFoundError:
print("Secret key file does not exist, creating new one")
secretkey = _default_secretkey.copy()
secretkey = db_keys_set(secretkey, True)
return secretkey
# noinspection PyBroadException
def db_keys_lock(passphrase) -> bool:
"""Lock secret key with a pass phrase"""
global _secretkeyfile
try:
with open(_secretkeyfile, "rb") as f:
secretkey = pickle.load(f)
if secretkey["locked"]:
print("Secret key file is already locked")
return True
if passphrase:
usepass = passphrase
else:
usepass = getpass("Enter pass phrase: ")
usepass2 = getpass("Enter pass phrase again: ")
print("")
if usepass != usepass2:
print("Pass phrase mismatch, secret key still unlocked")
return False
if usepass:
k = Fernet(password_to_key(usepass))
secretkey["secret"] = k.encrypt(str.encode(secretkey["secret"])).decode()
secretkey["locked"] = True
secretkey["hash"] = blake2b(str.encode(usepass)).hexdigest()
db_keys_set(secretkey, False)
except Exception:
print("Error locking secret key content")
return False
print("Secret key successfully locked")
return True
# noinspection PyBroadException
def db_keys_set(secretkey: collections.OrderedDict, newkey=False) -> collections.OrderedDict:
"""Save secret key with option to generate a new one"""
global _secretkeyfile
global _default_secretkey
if newkey:
secret = Fernet.generate_key() # Create new secret
secrethash = blake2b(secret).hexdigest()
secretkey = _default_secretkey
secretkey["secret"] = secret.decode()
secretkey["locked"] = False
secretkey["hash"] = None
secretkey["secrethash"] = secrethash
try:
with open(_secretkeyfile, "wb") as f:
pickle.dump(secretkey, f)
except PermissionError:
print("Failed trying to write secret key file (permissions).")
return collections.OrderedDict()
except FileNotFoundError:
print("Failed trying to write secret key file (not found).")
return collections.OrderedDict()
try:
os.chmod(_secretkeyfile, stat.S_IRUSR | stat.S_IWUSR)
except PermissionError:
print("Failed setting permissions on secret key file.")
return collections.OrderedDict()
return secretkey
# noinspection PyBroadException
def db_keys_unlock(passphrase) -> bool:
"""Unlock secret key with pass phrase"""
global _secretkeyfile
try:
with open(_secretkeyfile, "rb") as f:
secretkey = pickle.load(f)
if not secretkey["locked"]:
print("Secret key file is already unlocked")
return True
if passphrase:
usepass = passphrase
else:
usepass = getpass("Enter pass phrase: ")
print("")
if usepass:
if secretkey["hash"] == blake2b(str.encode(usepass)).hexdigest():
k = Fernet(password_to_key(usepass))
secretkey["secret"] = k.decrypt(str.encode(secretkey["secret"])).decode()
secretkey["locked"] = False
db_keys_set(secretkey, False)
else:
print("Pass phrase did not match, secret key remains locked")
return False
except Exception:
print("Error locking secret key content")
return False
print("Secret key successfully unlocked")
return True
# noinspection PyBroadException
def db_load_settings(database, hostname, environment=_default_environment,
password=None) -> collections.OrderedDict or None:
"""Load saved settings"""
global _default_settings_location
keys = db_keys_get(password)
fname = _default_settings_location / str(
environment.lower() + "_" + hostname.lower() + "_" + database.lower() + ".pickle")
try:
with open(fname, "rb") as f:
settings = pickle.load(f)
if keys:
if settings["secrethash"] == keys["secrethash"]:
k = Fernet(str.encode(keys["secret"]))
settings["pwd"] = k.decrypt(str.encode(settings["pwd"])).decode()
else:
print("Saved settings are incorrect, wrong secret key")
return None
except Exception:
return None
return settings
def db_save_settings(settings: collections.OrderedDict, password=None) -> bool:
"""Save settings"""
global _default_secretkey
use_settings = settings.copy()
keys = db_keys_get(password)
if not keys or "secret" not in keys or not keys["secret"]:
print("Setting up new secret key file")
keys = db_keys_set(_default_secretkey, True)
use_settings["secrethash"] = keys["secrethash"]
k = Fernet(str.encode(keys["secret"]))
use_settings["pwd"] = k.encrypt(str.encode(use_settings["pwd"])).decode()
fname = _default_settings_location / str(
use_settings["environment"].lower() + "_" + use_settings["hostname"].lower() + "_" + use_settings[
"database"].lower() + ".pickle")
try:
with open(fname, "wb") as f:
pickle.dump(use_settings, f)
except PermissionError:
print("Failed trying to write credentials file.")
return False
try:
os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR)
except PermissionError:
print("Failed setting permissions on credentials file.")
return False
return True
def db_show_settings(settings: collections.OrderedDict):
"""Show current connection settings"""
if settings:
print("Credentials for", settings["database"].upper(),
"on", settings["hostname"].upper(),
"for environment", settings["environment"].upper())
for k, v in settings.items():
if k == "pwd":
if settings[k]:
print("password: [not displayed]")
else:
print("password: [no password]")
else:
print(k + ":", v)
else:
print("Settings have not been loaded")
def password_to_key(passphrase):
"""Convert passphrase to Fernet compatible key"""
password = <PASSWORD>(passphrase)
salt = b'<PASSWORD>' # Use fixed salt, don't store the result
# noinspection PyArgumentList
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def table_list(schema, allow_views=False) -> []:
"""Get list of tables in schema"""
global _hdbc
temp_list = []
if allow_views:
sqlcat = """select distinct tabname
from syscat.tables
where tabschema = ?
order by tabname;"""
else:
sqlcat = """select distinct tabname
from syscat.tables
where tabschema = ?
and type = 'T'
order by tabname;"""
try:
stmtcat = ibm_db.prepare(_hdbc, sqlcat)
parameters = (str(schema.upper()),)
if ibm_db.execute(stmtcat, parameters):
cat_row = ibm_db.fetch_assoc(stmtcat)
while cat_row:
# export_table(str(schema.upper()), cat_row["TABNAME"])
temp_list.append(cat_row["TABNAME"])
cat_row = ibm_db.fetch_assoc(stmtcat)
ibm_db.free_stmt(stmtcat)
except Exception as err:
print(err)
db_error(False)
return None
return temp_list
```
#### File: src/db_commands/db_commands.py
```python
import sys
import click
from db2_helpers import db_connect, db_connect_prompt, db_connected, db_disconnect, \
db_load_settings, db_save_settings, db_show_settings, db_keys_lock, db_keys_unlock
# --------------------------------------------------
# Defaults for this program
# --------------------------------------------------
_default_schema = "db2inst1"
_default_file_location = "./db"
_default_folder_mask = 0o775
# --------------------------------------------------
# Main Function
# --------------------------------------------------
@click.command()
@click.option("--action", type=click.Choice(["verify", "lock", "unlock", "reset"], case_sensitive=False),
help="Credentials action", default="verify", show_default=True)
@click.option("--database", "-D", help="Database Name", default="sample",
envvar="DB_DATABASE", show_default=True)
@click.option("--hostname", "--host", "-H", help="Database Host Name", default="localhost",
envvar="DB_HOSTNAME", show_default=True)
@click.option("--environment", "-E", help="Environment (dev/test/prod)", default="dev",
envvar="DB_ENVIRONMENT", show_default=True)
@click.option("--password", "--pwd", "-P", help="Pass phrase for secret key (not database)", default=None)
@click.option("--show/--no-show", "-S", help="Show credentials", is_flag=True, default=False, show_default=True)
def db_credentials(action, database, hostname, environment, password, show):
"""Connect to Db2 and save credentials
Connection to Db2 is verified from prompted settings and saved.
To protect passwords, a secret key is generated and stored in a file
in the user's home directory. The secret key in this file can itself
be encrypted/decrypted using the lock/unlock actions with a password.
"""
use_database = database.lower()
use_hostname = hostname.lower()
use_environment = environment.lower()
if action == "lock":
db_keys_lock(password)
elif action == "unlock":
db_keys_unlock(password)
elif action == "reset":
print("Reset requested, enter new credentials")
settings = db_connect_prompt(use_database, use_hostname)
settings["environment"] = use_environment
db_connect(settings)
if db_connected():
print("Connection successful with new credentials")
if db_save_settings(settings, password):
print("Credentials have been saved")
else:
print("Credentials have not been saved")
if show:
db_show_settings(settings)
elif action == "verify":
settings = db_load_settings(use_database, use_hostname, use_environment, password)
if settings:
db_connect(settings)
if db_connected():
print("Connection credentials are correct")
doit = False
else:
print("Current credentials are incorrect, enter new credentials")
doit = True
else:
print("No saved credentials for", use_database.upper(),
"on", use_hostname.upper(),
"for environment", use_environment.upper())
print("Enter credentials")
settings = db_connect_prompt(use_database, use_hostname)
if settings:
doit = True
else:
print("Connection attempt cancelled at user request")
sys.exit(1)
if doit:
db_connect(settings)
if db_connected():
print("Connection successful with new credentials")
if db_save_settings(settings, password):
print("Credentials have been saved")
else:
print("Credentials have not been saved")
if show:
db_show_settings(settings)
# --------------------------------------------------
# Clean up
# --------------------------------------------------
db_disconnect()
else:
print("Unexpected action")
sys.exit(1)
``` |
{
"source": "jma/flask-wiki-old",
"score": 3
} |
#### File: flask-wiki-old/flask_wiki/__init__.py
```python
from flask import current_app
import os
from .views import blueprint
from werkzeug.middleware.shared_data import SharedDataMiddleware
from . import config
class Wiki(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.register_blueprint(
blueprint,
url_prefix=app.config.get('WIKI_URL_PREFIX')
)
app.add_url_rule(
app.config.get('WIKI_URL_PREFIX') + '/files/#',
'uploaded_files', build_only=True)
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {app.config.get(
'WIKI_URL_PREFIX') + '/files': app.config['WIKI_UPLOAD_FOLDER']})
app.extensions['flask-wiki'] = self
def init_config(self, app):
"""Initialize configuration."""
for k in dir(config):
if k.startswith('WIKI_'):
app.config.setdefault(k, getattr(config, k))
```
#### File: flask-wiki-old/flask_wiki/views.py
```python
import glob
import os
from functools import wraps
from babel import Locale
from flask import (Blueprint, abort, current_app, flash, jsonify, redirect,
render_template, request, url_for)
from flask.wrappers import Response
from flask_babelex import gettext as _
from werkzeug.utils import secure_filename
from .api import Processor, current_wiki, get_wiki
from .forms import EditorForm
blueprint = Blueprint(
'wiki',
__name__,
template_folder='templates',
static_folder='static'
)
# PERMISSIONS
# ===========
def can_read_permission(func):
"""Check Reading Permission."""
@wraps(func)
def decorated_view(*args, **kwargs):
permission = current_app.config.get('WIKI_READ_VIEW_PERMISSION')()
if isinstance(permission, bool):
if not permission:
abort(403)
return func(*args, **kwargs)
return permission
return decorated_view
def can_edit_permission(func):
"""Check Edition Permission."""
@wraps(func)
def decorated_view(*args, **kwargs):
permission = current_app.config.get('WIKI_EDIT_VIEW_PERMISSION')()
if isinstance(permission, bool):
if not permission:
abort(403)
return func(*args, **kwargs)
return permission
return decorated_view
# FILTERS
# =======
@blueprint.app_template_filter()
def prune_url(path):
return path.replace(
current_app.config.get('WIKI_URL_PREFIX'),
'').strip('/')
@blueprint.app_template_filter()
def translate_ln(ln):
return Locale(current_wiki.current_language).languages.get(ln)
@blueprint.app_template_filter()
def edit_path_list(path):
ln = path.split('_')[-1]
base_path = path
if ln in current_wiki.languages:
base_path = path.rsplit('_', 1)[0]
return list(
filter(
lambda v: v['path'] != path,
[dict(ln=ln, path='_'.join((base_path, ln)))
for ln in current_wiki.languages]))
# PROCESSORS
# ==========
@blueprint.context_processor
def permission_processor():
return dict(
can_edit_wiki=current_app.config.get('WIKI_EDIT_UI_PERMISSION')(),
can_read_wiki= current_app.config.get('WIKI_READ_UI_PERMISSION')()
)
# MISCS
# =====
@blueprint.before_request
def setWiki():
get_wiki()
def allowed_file(filename):
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# ROUTES
# ======
@blueprint.route('/')
@can_read_permission
def index():
return redirect(url_for('wiki.page', url=current_app.config.get('WIKI_HOME')))
@blueprint.route('/<path:url>/')
@can_read_permission
def page(url):
page = current_wiki.get_or_404(url)
return render_template(
current_app.config.get('WIKI_PAGE_TEMPLATE'),
page=page)
@blueprint.route('/edit/<path:url>/', methods=['GET', 'POST'])
@can_edit_permission
def edit(url):
page = current_wiki.get(url)
form = EditorForm(obj=page)
if form.validate_on_submit():
if not page:
page = current_wiki.get_bare(url)
form.populate_obj(page)
page.save()
flash(_('Saved'), category='success')
return redirect(url_for('wiki.page', url=url))
return render_template(
current_app.config.get('WIKI_EDITOR_TEMPLATE'),
form=form, page=page, path=url)
@blueprint.route('/preview/', methods=['POST'])
@can_edit_permission
def preview():
data = {}
processor = Processor(request.form['body'])
data['html'], data['body'], data['meta'], data['toc'] = processor.process()
return data['html']
@blueprint.route('/files', methods=['GET', 'POST'])
@can_read_permission
def files():
if request.method == 'POST' and current_app.config['WIKI_EDIT_UI_PERMISSION']():
# check if the post request has the file part
if 'file' not in request.files:
flash(_('No file part'))
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash(_('No selected file'))
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
output_filename = os.path.join(
current_app.config['WIKI_UPLOAD_FOLDER'], filename)
if os.path.isfile(output_filename):
flash(_('File already exists'), category='danger')
else:
file.save(output_filename)
if request.method == 'POST' and not current_app.config['WIKI_EDIT_UI_PERMISSION']():
flash(_('You do not have the permission to add files.'))
files = [os.path.basename(f) for f in sorted(glob.glob(
'/'.join([current_app.config.get('WIKI_UPLOAD_FOLDER'), '*'])), key=os.path.getmtime)]
return render_template(
current_app.config.get('WIKI_FILES_TEMPLATE'),
files=files)
@blueprint.route('/search', methods=['GET'])
@can_read_permission
def search():
query = request.args.get('q', '')
results = current_wiki.search(query)
return render_template(
current_app.config.get('WIKI_SEARCH_TEMPLATE'),
results=results, query=query)
@blueprint.errorhandler(404)
def not_found(error):
return render_template(
current_app.config.get('WIKI_NOT_FOUND_TEMPLATE')), 404
@blueprint.errorhandler(403)
def forbidden(error):
return render_template(
current_app.config.get('WIKI_FORBIDDEN_TEMPLATE')), 403
``` |
{
"source": "JMagers/chanpy",
"score": 2
} |
#### File: chanpy/tests/test_channel.py
```python
import asyncio
import threading
import time
import unittest
import chanpy as c
from chanpy import _buffers, chan, transducers as xf
from chanpy._channel import Promise, create_flag, FlagHandler
def b_list(ch):
return list(ch.to_iter())
async def a_list(ch):
return await c.to_list(ch).get()
class TestAsync(unittest.TestCase):
def test_thread_put_to_async_get_without_wait(self):
def putter(ch):
ch.b_put('success')
async def main():
ch = chan()
threading.Thread(target=putter, args=[ch]).start()
return await ch.get()
self.assertEqual(asyncio.run(main()), 'success')
def test_thread_get_to_async_put_after_wait(self):
result = None
def getter(ch):
nonlocal result
result = ch.b_get()
async def main():
ch = chan()
getter_thread = threading.Thread(target=getter, args=[ch])
getter_thread.start()
self.assertIs(await ch.put('success'), True)
getter_thread.join()
self.assertEqual(result, 'success')
asyncio.run(main())
def test_async_only_transfer(self):
async def getter(ch):
return await ch.get()
async def main():
ch = chan()
get_ch = c.go(getter(ch))
self.assertIs(await ch.put('success'), True)
self.assertEqual(await get_ch.get(), 'success')
asyncio.run(main())
def test_go_from_different_thread(self):
def getter_thread(ch):
async def getter():
return await ch.get()
return c.go(getter()).b_get()
async def main():
ch = chan()
thread_result_ch = c.thread(lambda: getter_thread(ch))
self.assertIs(await ch.put('success'), True)
self.assertEqual(await thread_result_ch.get(), 'success')
asyncio.run(main())
def test_go_coroutine_never_awaited(self):
""" Test that no 'coroutine was not awaited' warning is raised
The warning could be raised if the coroutine was added to the loop
indirectly.
Example:
# If 'go' used a wrapper coroutine around 'coro' then 'coro' may
# never be added to the loop. This is because there is no guarantee
# that the wrapper coroutine will ever run and thus call await on
# 'coro'.
#
# The following 'go' implementation would fail if wrapper never
# ends up running:
def go(coro):
ch = chan(1)
async def wrapper():
ret = await coro # I may never run
if ret is not None:
await ch.put(ret)
ch.close()
asyncio.run_coroutine_threadsafe(wrapper(), get_loop())
"""
def thread():
async def coro():
pass
c.go(coro())
async def main():
c.thread(thread).b_get()
# Assert does NOT warn
with self.assertRaises(AssertionError):
with self.assertWarns(RuntimeWarning):
asyncio.run(main())
def test_alt_get_no_wait(self):
get_ch, put_ch = chan(), chan()
async def putter():
await get_ch.put('success')
async def main():
c.go(putter())
await asyncio.sleep(0.1)
return await c.alt([put_ch, 'noSend'], get_ch, priority=True)
self.assertEqual(asyncio.run(main()), ('success', get_ch))
def test_alt_put_after_wait(self):
get_ch, put_ch = chan(), chan()
async def putter():
await asyncio.sleep(0.1)
await put_ch.get()
async def main():
c.go(putter())
return await c.alt([put_ch, 'success'], get_ch, priority=True)
self.assertEqual(asyncio.run(main()), (True, put_ch))
def test_alt_timeout(self):
async def main():
start_time = time.time()
timeout_ch = c.timeout(100)
self.assertEqual(await c.alt(chan(), timeout_ch),
(None, timeout_ch))
elapsed_secs = time.time() - start_time
self.assertIs(0.05 < elapsed_secs < 0.15, True)
asyncio.run(main())
def test_alt_default_when_available(self):
async def main():
ch = chan(1)
await ch.put('success')
self.assertEqual(await c.alt(ch, default='ignore me'),
('success', ch))
asyncio.run(main())
def test_alt_default_when_unavailable(self):
async def main():
ch = chan()
self.assertEqual(await c.alt(ch, default='success'),
('success', 'default'))
asyncio.run(main())
def test_successful_cancel_get(self):
async def main():
ch = chan()
get_future = ch.get()
self.assertIs(get_future.cancelled(), False)
self.assertIs(get_future.cancel(), True)
self.assertIs(get_future.cancelled(), True)
self.assertIs(ch.offer('reject me'), False)
asyncio.run(main())
def test_successful_cancel_put(self):
async def main():
ch = chan()
put_future = ch.put('cancel me')
self.assertIs(put_future.cancelled(), False)
self.assertIs(put_future.cancel(), True)
self.assertIs(put_future.cancelled(), True)
self.assertIsNone(ch.poll())
asyncio.run(main())
def test_successful_cancel_alt(self):
async def main():
ch = chan()
alt_future = c.alt(ch, priority=True)
self.assertIs(alt_future.cancelled(), False)
self.assertIs(alt_future.cancel(), True)
self.assertIs(alt_future.cancelled(), True)
self.assertIs(ch.offer('reject me'), False)
asyncio.run(main())
def test_unsuccessful_cancel_get(self):
async def main():
ch = chan()
get_future = ch.get()
self.assertIs(await ch.put('success'), True)
# cancel() will end up calling set_result() since
# set_result_threadsafe() callback won't have been called yet
self.assertIs(get_future.cancel(), False)
self.assertEqual(get_future.result(), 'success')
asyncio.run(main())
def test_unsuccessful_cancel_put(self):
async def main():
ch = chan()
put_future = ch.put('val')
self.assertEqual(await ch.get(), 'val')
# cancel() will end up calling set_result() since
# set_result_threadsafe() callback won't have been called yet
self.assertIs(put_future.cancel(), False)
self.assertIs(put_future.result(), True)
asyncio.run(main())
def test_unsuccessful_cancel_alt(self):
async def main():
success_ch, fail_ch = chan(), chan()
alt_future = c.alt(fail_ch, success_ch)
self.assertIs(await success_ch.put('success'), True)
# cancel() will end up calling set_result() since
# set_result_threadsafe() callback won't have been called yet
self.assertIs(alt_future.cancel(), False)
self.assertEqual(alt_future.result(), ('success', success_ch))
asyncio.run(main())
class AbstractTestBufferedBlocking:
def test_unsuccessful_blocking_put_none(self):
with self.assertRaises(TypeError):
self.chan(1).b_put(None)
def test_successful_blocking_get(self):
ch = self.chan(1)
threading.Thread(target=ch.b_put, args=['success']).start()
self.assertEqual(ch.b_get(), 'success')
def test_successful_blocking_put(self):
self.assertIs(self.chan(1).b_put('success'), True)
def test_blocking_get_closed_empty_buffer(self):
ch = self.chan(1)
ch.close()
self.assertIsNone(ch.b_get())
def test_blocking_get_closed_full_buffer(self):
ch = self.chan(1)
ch.b_put('success')
ch.close()
self.assertEqual(ch.b_get(), 'success')
def test_blocking_put_closed_empty_buffer(self):
ch = self.chan(1)
ch.close()
self.assertIs(ch.b_put('failure'), False)
def test_blocking_put_closed_full_buffer(self):
ch = self.chan(1)
ch.b_put('fill buffer')
ch.close()
self.assertIs(ch.b_put('failure'), False)
def test_close_while_blocking_get(self):
ch = self.chan(1)
def thread():
time.sleep(0.1)
ch.close()
threading.Thread(target=thread).start()
self.assertIsNone(ch.b_get())
def test_close_while_blocking_put(self):
ch = self.chan(1)
ch.b_put('fill buffer')
def thread():
time.sleep(0.1)
ch.close()
ch.b_get()
threading.Thread(target=thread).start()
self.assertIs(ch.b_put('success'), True)
self.assertEqual(ch.b_get(), 'success')
self.assertIsNone(ch.b_get())
def test_iter(self):
ch = self.chan(2)
ch.b_put('one')
ch.b_put('two')
ch.close()
self.assertEqual(b_list(ch), ['one', 'two'])
class TestBufferedBlockingChan(unittest.TestCase,
AbstractTestBufferedBlocking):
@staticmethod
def chan(n):
return c.chan(c.buffer(n))
class AbstractTestXform:
def test_xform_map(self):
async def main():
ch = self.chan(1, xf.map(lambda x: x + 1))
c.onto_chan(ch, [0, 1, 2])
self.assertEqual(await a_list(ch), [1, 2, 3])
asyncio.run(main())
def test_xform_filter(self):
async def main():
ch = self.chan(1, xf.filter(lambda x: x % 2 == 0))
c.onto_chan(ch, [0, 1, 2])
self.assertEqual(await a_list(ch), [0, 2])
asyncio.run(main())
def test_xform_early_termination(self):
async def main():
ch = self.chan(1, xf.take(2))
c.onto_chan(ch, [1, 2, 3, 4])
self.assertEqual(await a_list(ch), [1, 2])
asyncio.run(main())
def test_xform_early_termination_works_after_close(self):
async def main():
ch = self.chan(1, xf.take_while(lambda x: x != 2))
for i in range(4):
ch.f_put(i)
ch.close()
self.assertEqual(await a_list(ch), [0, 1])
self.assertEqual(len(ch._puts), 0)
asyncio.run(main())
def test_xform_successful_overfilled_buffer(self):
ch = self.chan(1, xf.cat)
ch.b_put([1, 2, 3])
ch.close()
self.assertEqual(b_list(ch), [1, 2, 3])
def test_xform_unsuccessful_offer_overfilled_buffer(self):
ch = self.chan(1, xf.cat)
ch.b_put([1, 2])
self.assertIs(ch.offer([1]), False)
def test_unsuccessful_transformation_to_none(self):
ch = self.chan(1, xf.map(lambda _: None))
with self.assertRaises(AssertionError):
ch.b_put('failure')
def test_close_flushes_xform_buffer(self):
ch = self.chan(3, xf.partition_all(2))
for i in range(3):
ch.b_put(i)
ch.close()
self.assertEqual(b_list(ch), [(0, 1), (2,)])
def test_close_does_not_flush_xform_with_pending_puts(self):
ch = self.chan(1, xf.partition_all(2))
for i in range(3):
ch.f_put(i)
ch.close()
self.assertEqual(b_list(ch), [(0, 1), (2,)])
def test_xform_ex_handler_non_none_return(self):
def handler(e):
if isinstance(e, ZeroDivisionError):
return 'zero'
ch = self.chan(3, xf.map(lambda x: 12 // x), handler)
ch.b_put(-1)
ch.b_put(0)
ch.b_put(2)
ch.close()
self.assertEqual(b_list(ch), [-12, 'zero', 6])
def test_xform_ex_handler_none_return(self):
ch = self.chan(3, xf.map(lambda x: 12 // x), lambda _: None)
ch.b_put(-1)
ch.b_put(0)
ch.b_put(2)
ch.close()
self.assertEqual(b_list(ch), [-12, 6])
class TestXformBufferedChan(unittest.TestCase, AbstractTestXform):
@staticmethod
def chan(n, xform, ex_handler=None):
return c.chan(c.buffer(n), xform, ex_handler)
class AbstractTestBufferedNonblocking:
def test_unsuccessful_offer_none(self):
with self.assertRaises(TypeError):
self.chan(1).offer(None)
def test_successful_poll(self):
ch = self.chan(1)
threading.Thread(target=ch.b_put, args=['success']).start()
time.sleep(0.1)
self.assertEqual(ch.poll(), 'success')
def test_successful_offer(self):
ch = self.chan(1)
def thread():
time.sleep(0.1)
ch.offer('success')
threading.Thread(target=thread).start()
self.assertEqual(ch.b_get(), 'success')
def test_unsuccessful_poll(self):
self.assertIsNone(self.chan(1).poll())
def test_unsuccessful(self):
ch = self.chan(1)
ch.b_put('fill buffer')
self.assertIs(ch.offer('failure'), False)
def test_poll_closed_empty_buffer(self):
ch = self.chan(1)
ch.close()
self.assertIsNone(ch.poll())
def test_poll_closed_full_buffer(self):
ch = self.chan(1)
ch.b_put('success')
ch.close()
self.assertEqual(ch.poll(), 'success')
def test_offer_closed_empty_buffer(self):
ch = self.chan(1)
ch.close()
self.assertIs(ch.offer('failure'), False)
def test_closed_full_buffer(self):
ch = self.chan(1)
ch.b_put('fill buffer')
ch.close()
self.assertIs(ch.offer('failure'), False)
class TestBufferedNonBlockingChan(unittest.TestCase,
AbstractTestBufferedNonblocking):
@staticmethod
def chan(n):
return chan(c.buffer(n))
class TestChan(unittest.TestCase):
def test_ValueError_nonpositive_buffer(self):
with self.assertRaises(ValueError):
chan(0)
class AbstractTestUnbufferedBlocking:
def test_unsuccessful_blocking_put_none(self):
with self.assertRaises(TypeError):
self.chan().b_put(None)
def test_blocking_get_first(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.b_put('success')
threading.Thread(target=thread).start()
self.assertEqual(ch.b_get(), 'success')
def test_blocking_put_first(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.b_get()
threading.Thread(target=thread).start()
self.assertIs(ch.b_put('success'), True)
def test_put_blocks_until_get(self):
status = 'failure'
ch = self.chan()
def thread():
nonlocal status
time.sleep(0.1)
status = 'success'
ch.b_get()
threading.Thread(target=thread).start()
ch.b_put(1)
self.assertEqual(status, 'success')
def test_blocking_get_after_close(self):
ch = self.chan()
ch.close()
self.assertIsNone(ch.b_get())
def test_blocking_put_after_close(self):
ch = self.chan()
ch.close()
self.assertIs(ch.b_put('failure'), False)
def test_close_while_blocking_get(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.close()
threading.Thread(target=thread).start()
self.assertIsNone(ch.b_get())
def test_close_while_blocking_put(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.close()
ch.b_get()
threading.Thread(target=thread).start()
self.assertIs(ch.b_put('success'), True)
self.assertIsNone(ch.b_get())
def test_iter(self):
ch = self.chan()
ch.f_put('one')
ch.f_put('two')
ch.close()
self.assertEqual(b_list(ch), ['one', 'two'])
def test_xform_exception(self):
with self.assertRaises(TypeError):
self.chan(None, xf.cat)
def test_ex_handler_exception(self):
with self.assertRaises(TypeError):
self.chan(ex_handler=xf.identity)
class TestUnbufferedBlockingChan(unittest.TestCase,
AbstractTestUnbufferedBlocking):
@staticmethod
def chan():
return chan()
class AbstractTestUnbufferedNonblocking:
def test_unsuccessful_offer_none(self):
with self.assertRaises(TypeError):
self.chan().offer(None)
def test_successful_poll(self):
ch = self.chan()
threading.Thread(target=ch.b_put, args=['success']).start()
time.sleep(0.1)
self.assertEqual(ch.poll(), 'success')
def test_successful_offer(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.offer('success')
threading.Thread(target=thread).start()
self.assertEqual(ch.b_get(), 'success')
def test_unsuccessful_poll(self):
self.assertIsNone(self.chan().poll())
def test_unsuccessful_offer(self):
self.assertIs(self.chan().offer('failure'), False)
def test_poll_after_close(self):
ch = self.chan()
ch.close()
self.assertIsNone(ch.poll())
def test_offer_after_close(self):
ch = self.chan()
ch.close()
self.assertIs(ch.offer('failure'), False)
class TestUnbufferedNonblockingChan(unittest.TestCase,
AbstractTestUnbufferedNonblocking):
@staticmethod
def chan():
return chan()
class TestPromiseChan(unittest.TestCase):
def test_multiple_gets(self):
ch = c.promise_chan()
self.assertIs(ch.b_put('success'), True)
self.assertEqual(ch.b_get(), 'success')
self.assertEqual(ch.b_get(), 'success')
def test_multiple_puts(self):
ch = c.promise_chan()
self.assertIs(ch.b_put('success'), True)
self.assertIs(ch.b_put('drop me'), True)
def test_after_close(self):
ch = c.promise_chan()
ch.b_put('success')
ch.close()
self.assertIs(ch.b_put('failure'), False)
self.assertIs(ch.b_put('failure'), False)
self.assertEqual(ch.b_get(), 'success')
self.assertEqual(ch.b_get(), 'success')
def test_xform_filter(self):
ch = c.promise_chan(xf.filter(lambda x: x > 0))
self.assertIs(ch.b_put(-1), True)
self.assertIs(ch.b_put(1), True)
self.assertIs(ch.b_put(2), True)
self.assertEqual(ch.b_get(), 1)
self.assertEqual(ch.b_get(), 1)
def test_xform_complete_flush(self):
ch = c.promise_chan(xf.partition_all(3))
self.assertIs(ch.b_put(1), True)
self.assertIs(ch.b_put(2), True)
self.assertIsNone(ch.poll())
ch.close()
self.assertEqual(ch.b_get(), (1, 2))
self.assertEqual(ch.b_get(), (1, 2))
self.assertIs(ch.b_put('drop me'), False)
def test_xform_with_reduced_return(self):
ch = c.promise_chan(xf.take(1))
self.assertIs(ch.b_put('success'), True)
self.assertIs(ch.b_put('failure'), False)
self.assertEqual(ch.b_get(), 'success')
self.assertEqual(ch.b_get(), 'success')
class AbstractTestAlt:
def _confirm_chs_not_closed(self, *chs):
for ch in chs:
ch.f_put('notClosed')
self.assertEqual(ch.b_get(), 'notClosed')
def test_no_operations(self):
with self.assertRaises(ValueError):
c.b_alt()
def test_single_successful_get_on_initial_request(self):
ch = self.chan()
ch.f_put('success')
ch.f_put('notClosed')
self.assertEqual(c.b_alt(ch), ('success', ch))
self.assertEqual(ch.b_get(), 'notClosed')
def test_single_successful_get_on_wait(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.f_put('success')
ch.f_put('notClosed')
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(ch), ('success', ch))
self.assertEqual(ch.b_get(), 'notClosed')
def test_single_successful_put_on_initial_request(self):
ch = self.chan()
def thread():
time.sleep(0.1)
ch.b_put(c.b_alt([ch, 'success']))
threading.Thread(target=thread).start()
self.assertEqual(ch.b_get(), 'success')
self.assertEqual(ch.b_get(), (True, ch))
def test_get_put_same_channel(self):
ch = self.chan()
with self.assertRaises(ValueError):
c.b_alt(ch, [ch, 'success'], priority=True)
class AbstractTestUnbufferedAlt(AbstractTestAlt):
def test_single_successful_put_on_wait(self):
ch = self.chan()
def thread():
ch.b_put(c.b_alt([ch, 'success']))
threading.Thread(target=thread).start()
time.sleep(0.1)
self.assertEqual(ch.b_get(), 'success')
self.assertEqual(ch.b_get(), (True, ch))
def test_multiple_successful_get_on_initial_request(self):
successGetCh = self.chan()
cancelGetCh = self.chan()
cancelPutCh = self.chan()
successGetCh.f_put('success')
time.sleep(0.1)
self.assertEqual(c.b_alt(cancelGetCh,
successGetCh,
[cancelPutCh, 'noSend'],
priority=True),
('success', successGetCh))
self._confirm_chs_not_closed(successGetCh, cancelGetCh, cancelPutCh)
def test_multiple_successful_get_on_wait(self):
successGetCh = self.chan()
cancelGetCh = self.chan()
cancelPutCh = self.chan()
def thread():
time.sleep(0.1)
successGetCh.b_put('success')
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(cancelGetCh,
successGetCh,
[cancelPutCh, 'noSend'],
priority=True),
('success', successGetCh))
self._confirm_chs_not_closed(successGetCh, cancelGetCh, cancelPutCh)
def test_multiple_successful_put_on_initial_requst(self):
successPutCh = self.chan()
cancelGetCh = self.chan()
cancelPutCh = self.chan()
def thread():
time.sleep(0.1)
successPutCh.b_put(c.b_alt(cancelGetCh,
[successPutCh, 'success'],
[cancelPutCh, 'noSend'],
priority=True))
threading.Thread(target=thread).start()
self.assertEqual(successPutCh.b_get(), 'success')
self.assertEqual(successPutCh.b_get(), (True, successPutCh))
self._confirm_chs_not_closed(cancelGetCh, successPutCh, cancelPutCh)
def test_multiple_successful_put_on_wait(self):
successPutCh = self.chan()
cancelGetCh = self.chan()
cancelPutCh = self.chan()
def thread():
successPutCh.b_put(c.b_alt(cancelGetCh,
[successPutCh, 'success'],
[cancelPutCh, 'noSend'],
priority=True))
threading.Thread(target=thread).start()
time.sleep(0.1)
self.assertEqual(successPutCh.b_get(), 'success')
self.assertEqual(successPutCh.b_get(), (True, successPutCh))
self._confirm_chs_not_closed(cancelGetCh, successPutCh, cancelPutCh)
def test_close_before_get(self):
closedGetCh = self.chan()
cancelPutCh = self.chan()
cancelGetCh = self.chan()
closedGetCh.close()
self.assertEqual(c.b_alt([cancelPutCh, 'noSend'],
closedGetCh,
cancelGetCh,
priority=True),
(None, closedGetCh))
self.assertIsNone(closedGetCh.b_get())
self._confirm_chs_not_closed(cancelPutCh, cancelGetCh)
def test_close_before_put(self):
closedPutCh = self.chan()
cancelPutCh = self.chan()
cancelGetCh = self.chan()
closedPutCh.close()
self.assertEqual(c.b_alt(cancelGetCh,
[closedPutCh, 'noSend'],
[cancelPutCh, 'noSend'],
priority=True),
(False, closedPutCh))
self.assertIsNone(closedPutCh.b_get())
self._confirm_chs_not_closed(cancelPutCh, cancelGetCh)
def test_close_while_waiting_get(self):
closeGetCh = self.chan()
cancelGetCh = self.chan()
cancelPutCh = self.chan()
def thread():
time.sleep(0.1)
closeGetCh.close()
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(cancelGetCh,
closeGetCh,
[cancelPutCh, 'noSend'],
priority=True),
(None, closeGetCh))
self.assertIsNone(closeGetCh.b_get())
self._confirm_chs_not_closed(cancelPutCh, cancelGetCh)
def test_close_while_waiting_put(self):
closePutCh = self.chan()
cancelGetCh = self.chan()
cancelPutCh = self.chan()
def thread():
time.sleep(0.1)
closePutCh.close()
closePutCh.b_get()
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(cancelGetCh,
[closePutCh, 'success'],
[cancelPutCh, 'noSend'],
priority=True),
(True, closePutCh))
self.assertIsNone(closePutCh.b_get())
self._confirm_chs_not_closed(cancelPutCh, cancelGetCh)
def test_double_b_alt_successful_transfer(self):
ch = self.chan()
def thread():
ch.b_put(c.b_alt([ch, 'success']))
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(ch), ('success', ch))
self.assertEqual(ch.b_get(), (True, ch))
def test_taker_not_removed_from_queue_when_put_handler_inactive(self):
ch = self.chan()
get_result = None
def set_result(result):
nonlocal get_result
get_result = result
# Enqueue taker
ch.f_get(set_result)
# Put to channel with inactive handler
flag = create_flag()
flag['is_active'] = False
handler = FlagHandler(flag, lambda _: None)
# ch._p_put() must return None so alt() knows this operation remains uncommitted
self.assertIs(ch._p_put(handler, 'do not commit'), None)
# Send to taker
self.assertIs(ch.offer('success'), True)
self.assertEqual(get_result, 'success')
def test_putter_not_removed_from_queue_when_get_handler_inactive(self):
ch = self.chan()
put_result = None
def set_result(result):
nonlocal put_result
put_result = result
# Enqueue putter
ch.f_put('success', set_result)
# Get from channel with inactive handler
flag = create_flag()
flag['is_active'] = False
handler = FlagHandler(flag, lambda _: None)
# ch._p_get() must return None so alt() knows this operation remains uncommitted
self.assertIs(ch._p_get(handler), None)
# Get from putter
self.assertEqual(ch.poll(), 'success')
self.assertIs(put_result, True)
class AbstractTestBufferedAlt(AbstractTestAlt):
def test_single_successful_put_on_wait(self):
ch = self.chan(1)
ch.b_put('fill buffer')
def thread():
ch.b_put(c.b_alt([ch, 'success']))
threading.Thread(target=thread).start()
time.sleep(0.1)
self.assertEqual(ch.b_get(), 'fill buffer')
self.assertEqual(ch.b_get(), 'success')
self.assertEqual(ch.b_get(), (True, ch))
def test_multiple_successful_get_on_initial_request(self):
successGetCh = self.chan(1)
successGetCh.b_put('success')
cancelGetCh = self.chan(1)
cancelPutCh = self.chan(1)
cancelPutCh.b_put('fill buffer')
self.assertEqual(c.b_alt(cancelGetCh,
successGetCh,
[cancelPutCh, 'noSend'],
priority=True),
('success', successGetCh))
def test_multiple_successful_get_on_wait(self):
successGetCh = self.chan(1)
cancelGetCh = self.chan(1)
cancelPutCh = self.chan(1)
cancelPutCh.b_put('fill buffer')
def thread():
time.sleep(0.1)
successGetCh.b_put('success')
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(cancelGetCh,
successGetCh,
[cancelPutCh, 'noSend'],
priority=True),
('success', successGetCh))
def test_multiple_successful_put_on_intial_request(self):
successPutCh = self.chan(1)
cancelGetCh = self.chan(1)
cancelPutCh = self.chan(1)
cancelPutCh.b_put('fill buffer')
altValue = c.b_alt(cancelGetCh,
[cancelPutCh, 'noSend'],
[successPutCh, 'success'],
priority=True)
self.assertEqual(altValue, (True, successPutCh))
self.assertEqual(successPutCh.b_get(), 'success')
def test_multiple_successful_put_on_wait(self):
successPutCh = self.chan(1)
successPutCh.b_put('fill buffer')
cancelGetCh = self.chan(1)
cancelPutCh = self.chan(1)
cancelPutCh.b_put('fill buffer')
def thread():
successPutCh.b_put(c.b_alt(cancelGetCh,
[successPutCh, 'success'],
[cancelPutCh, 'noSend'],
priority=True))
threading.Thread(target=thread).start()
time.sleep(0.1)
self.assertEqual(successPutCh.b_get(), 'fill buffer')
self.assertEqual(successPutCh.b_get(), 'success')
self.assertEqual(successPutCh.b_get(), (True, successPutCh))
def test_close_before_get(self):
closedGetCh = self.chan(1)
cancelPutCh = self.chan(1)
cancelPutCh.b_put('fill buffer')
cancelGetCh = self.chan(1)
closedGetCh.close()
self.assertEqual(c.b_alt([cancelPutCh, 'noSend'],
closedGetCh,
cancelGetCh,
priority=True),
(None, closedGetCh))
self.assertIsNone(closedGetCh.b_get())
def test_close_before_put(self):
closedPutCh = self.chan(1)
cancelPutCh = self.chan(1)
cancelPutCh.b_put('fill buffer')
cancelGetCh = self.chan(1)
closedPutCh.close()
self.assertEqual(c.b_alt(cancelGetCh,
[closedPutCh, 'noSend'],
[cancelPutCh, 'noSend'],
priority=True),
(False, closedPutCh))
self.assertIsNone(closedPutCh.b_get())
def test_close_while_waiting_get(self):
closeGetCh = self.chan(1)
cancelGetCh = self.chan(1)
cancelPutCh = self.chan(1)
cancelPutCh.b_put('fill buffer')
def thread():
time.sleep(0.1)
closeGetCh.close()
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(cancelGetCh,
closeGetCh,
[cancelPutCh, 'noSend'],
priority=True),
(None, closeGetCh))
self.assertIsNone(closeGetCh.b_get())
def test_close_while_waiting_put(self):
closePutCh = self.chan(1)
closePutCh.b_put('fill buffer')
cancelGetCh = self.chan(1)
cancelPutCh = self.chan(1)
cancelPutCh.b_put('fill buffer')
def thread():
time.sleep(0.1)
closePutCh.close()
closePutCh.b_get()
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(cancelGetCh,
[closePutCh, 'success'],
[cancelPutCh, 'noSend'],
priority=True),
(True, closePutCh))
self.assertEqual(closePutCh.b_get(), 'success')
self.assertIsNone(closePutCh.b_get())
def test_double_b_alt_successful_transfer(self):
ch = self.chan(1)
self.assertEqual(c.b_alt([ch, 'success']), (True, ch))
self.assertEqual(c.b_alt(ch), ('success', ch))
def test_xform_state_is_not_modified_when_canceled(self):
xformCh = self.chan(1, xf.take(2))
xformCh.b_put('firstTake')
ch = self.chan()
def thread():
time.sleep(0.1)
ch.b_put('altValue')
threading.Thread(target=thread).start()
self.assertEqual(c.b_alt(ch, [xformCh, 'do not modify xform state'],
priority=True),
('altValue', ch))
xformCh.f_put('secondTake')
xformCh.f_put('dropMe')
self.assertEqual(b_list(xformCh), ['firstTake', 'secondTake'])
def test_put_does_not_add_to_buffer_when_handler_inactive(self):
ch = self.chan(1)
# Put to channel with inactive handler
flag = create_flag()
flag['is_active'] = False
handler = FlagHandler(flag, lambda _: None)
# ch._p_put() must return None so alt() knows this operation remains uncommitted
self.assertIs(ch._p_put(handler, 'do not commit'), None)
# Prove buffer is empty
self.assertIs(ch.poll(), None)
def test_get_does_not_remove_from_buffer_when_handler_inactive(self):
ch = self.chan(1)
ch.offer('success')
# Get from channel with inactive handler
flag = create_flag()
flag['is_active'] = False
handler = FlagHandler(flag, lambda _: None)
# ch._p_get() must return None so alt() knows this operation remains uncommitted
self.assertIs(ch._p_get(handler), None)
# Prove value still in buffer
self.assertIs(ch.poll(), 'success')
class TestUnbufferedAltChan(unittest.TestCase, AbstractTestUnbufferedAlt):
@staticmethod
def chan():
return chan()
class TestBufferedAltChan(unittest.TestCase, AbstractTestBufferedAlt):
@staticmethod
def chan(n=1, xform=xf.identity):
return chan(c.buffer(n), xform)
class TestAltThreads(unittest.TestCase):
def test_b_alt_default_when_available(self):
ch = chan(1)
ch.b_put('success')
self.assertEqual(c.b_alt(ch, default='ignore me'), ('success', ch))
def test_b_alt_default_when_unavailable(self):
ch = chan()
self.assertEqual(c.b_alt(ch, default='success'),
('success', 'default'))
class TestFPut(unittest.TestCase):
def setUp(self):
c.set_loop(asyncio.new_event_loop())
def tearDown(self):
c.get_loop().close()
c.set_loop(None)
def test_return_true_if_buffer_not_full(self):
self.assertIs(chan(1).f_put('val'), True)
def test_returns_true_if_buffer_full_not_closed(self):
self.assertIs(chan().f_put('val'), True)
def test_return_false_if_closed(self):
ch = chan()
ch.close()
self.assertIs(ch.f_put('val'), False)
def test_cb_called_if_buffer_full(self):
ch = chan()
prom = Promise()
ch.f_put('val', prom.deliver)
self.assertEqual(ch.b_get(), 'val')
self.assertIs(prom.deref(), True)
def test_cb_called_on_caller_if_buffer_not_full(self):
prom = Promise()
chan(1).f_put('val',
lambda x: prom.deliver([x, threading.get_ident()]))
self.assertEqual(prom.deref(), [True, threading.get_ident()])
class TestFGet(unittest.TestCase):
def setUp(self):
c.set_loop(asyncio.new_event_loop())
def tearDown(self):
c.get_loop().close()
c.set_loop(None)
def test_return_none_if_buffer_not_empty(self):
ch = chan(1)
ch.b_put('val')
self.assertIsNone(ch.f_get(xf.identity))
def test_return_none_if_buffer_empty(self):
self.assertIsNone(chan().f_get(xf.identity))
def test_return_none_if_closed(self):
ch = chan()
ch.close()
self.assertIsNone(ch.f_get(xf.identity))
def test_cb_called_if_buffer_empty(self):
prom = Promise()
ch = chan()
ch.f_get(prom.deliver)
ch.b_put('val')
self.assertEqual(prom.deref(), 'val')
def test_cb_called_on_caller_if_buffer_not_empty(self):
prom = Promise()
ch = chan(1)
ch.b_put('val')
ch.f_get(lambda x: prom.deliver([x, threading.get_ident()]))
self.assertEqual(prom.deref(), ['val', threading.get_ident()])
class TestDroppingBuffer(unittest.TestCase):
def test_put_does_not_block(self):
ch = chan(c.dropping_buffer(1))
ch.b_put('keep')
ch.b_put('drop')
self.assertIs(ch.b_put('drop'), True)
def test_buffer_keeps_oldest_n_elements(self):
ch = chan(c.dropping_buffer(2))
ch.b_put('keep1')
ch.b_put('keep2')
ch.b_put('drop')
ch.close()
self.assertEqual(b_list(ch), ['keep1', 'keep2'])
def test_buffer_does_not_overfill_with_xform(self):
ch = chan(c.dropping_buffer(2), xf.cat)
ch.b_put([1, 2, 3, 4])
ch.close()
self.assertEqual(b_list(ch), [1, 2])
def test_is_unblocking_buffer(self):
self.assertIs(c.is_unblocking_buffer(c.dropping_buffer(1)), True)
class TestSlidingBuffer(unittest.TestCase):
def test_put_does_not_block(self):
ch = chan(c.sliding_buffer(1))
ch.b_put('drop')
ch.b_put('drop')
self.assertIs(ch.b_put('keep'), True)
def test_buffer_keeps_newest_n_elements(self):
ch = chan(c.sliding_buffer(2))
ch.b_put('drop')
ch.b_put('keep1')
ch.b_put('keep2')
ch.close()
self.assertEqual(b_list(ch), ['keep1', 'keep2'])
def test_buffer_does_not_overfill_with_xform(self):
ch = chan(c.sliding_buffer(2), xf.cat)
ch.b_put([1, 2, 3, 4])
ch.close()
self.assertEqual(b_list(ch), [3, 4])
def test_is_unblocking_buffer(self):
self.assertIs(c.is_unblocking_buffer(c.sliding_buffer(1)), True)
class TestPromiseBuffer(unittest.TestCase):
def test_is_unblocking_buffer(self):
self.assertIs(c.is_unblocking_buffer(_buffers.PromiseBuffer()), True)
if __name__ == '__main__':
unittest.main()
```
#### File: chanpy/tests/test_core.py
```python
import asyncio
import threading
import time
import unittest
import chanpy as c
from chanpy import chan
from chanpy import transducers as xf
from concurrent.futures import ThreadPoolExecutor
async def a_list(ch):
return await c.to_list(ch).get()
class TestThreadCall(unittest.TestCase):
def setUp(self):
c.set_loop(asyncio.new_event_loop())
def tearDown(self):
c.get_loop().close()
c.set_loop(None)
def test_non_none_return_value(self):
def thread():
return 'success'
ch = c.thread(thread)
self.assertEqual(ch.b_get(), 'success')
self.assertIsNone(ch.b_get())
def test_none_return_value(self):
def thread():
return None
ch = c.thread(thread)
self.assertIsNone(ch.b_get())
def test_executor(self):
def thread():
time.sleep(0.1)
return threading.current_thread().name
executor = ThreadPoolExecutor(max_workers=1,
thread_name_prefix='executor')
thread_name = c.thread(thread, executor).b_get()
self.assertTrue(thread_name.startswith('executor'))
class TestMultAsyncio(unittest.TestCase):
def test_tap(self):
async def main():
src, dest = chan(), chan()
m = c.mult(src)
m.tap(dest)
await src.put('success')
self.assertEqual(await dest.get(), 'success')
src.close()
asyncio.run(main())
def test_untap(self):
async def main():
src, dest1, dest2 = chan(), chan(), chan()
m = c.mult(src)
m.tap(dest1)
m.tap(dest2)
await src.put('item1')
await dest1.get()
await dest2.get()
m.untap(dest2)
await src.put('item2')
await dest1.get()
await asyncio.sleep(0.1)
self.assertIsNone(dest2.poll())
src.close()
asyncio.run(main())
def test_untap_all(self):
async def main():
src, dest1, dest2 = chan(), chan(), chan()
m = c.mult(src)
m.tap(dest1)
m.tap(dest2)
await src.put('item')
await dest1.get()
await dest2.get()
m.untap_all()
self.assertIs(await src.put('dropMe'), True)
await asyncio.sleep(0.1)
self.assertIsNone(dest1.poll())
self.assertIsNone(dest2.poll())
asyncio.run(main())
def test_untap_nonexistent_tap(self):
async def main():
src = chan()
m = c.mult(src)
self.assertIsNone(m.untap(chan()))
src.close()
asyncio.run(main())
def test_mult_blocks_until_all_taps_accept(self):
async def main():
src, dest1, dest2 = chan(), chan(), chan()
m = c.mult(src)
m.tap(dest1)
m.tap(dest2)
await src.put('item')
await dest1.get()
await asyncio.sleep(0.1)
self.assertIs(src.offer('failure'), False)
await dest2.get()
src.close()
asyncio.run(main())
def test_only_correct_taps_close(self):
async def main():
src, close_dest, no_close_dest = chan(), chan(1), chan(1)
m = c.mult(src)
m.tap(close_dest)
m.tap(no_close_dest, close=False)
src.close()
await asyncio.sleep(0.1)
self.assertIs(await close_dest.put('closed'), False)
self.assertIs(await no_close_dest.put('not closed'), True)
asyncio.run(main())
def test_tap_closes_when_added_after_mult_closes(self):
async def main():
src_ch, tap_ch = chan(), chan()
m = c.mult(src_ch)
src_ch.close()
await asyncio.sleep(0.1)
m.tap(tap_ch)
self.assertIsNone(await tap_ch.get())
asyncio.run(main())
class TestMultThread(unittest.TestCase):
def test_tap(self):
def thread(src, dest):
m = c.mult(src)
m.tap(dest)
src.b_put('success')
async def main():
src, dest = chan(), chan()
c.thread(lambda: thread(src, dest))
self.assertEqual(await dest.get(), 'success')
src.close()
asyncio.run(main())
def test_untap(self):
def thread(src, dest1, dest2):
m = c.mult(src)
m.tap(dest1)
m.tap(dest2)
src.b_put('item1')
dest1.b_get()
dest2.b_get()
m.untap(dest2)
src.b_put('item2')
dest1.b_get()
async def main():
src, dest1, dest2 = chan(), chan(), chan()
c.thread(lambda: thread(src, dest1, dest2))
await asyncio.sleep(0.1)
self.assertIsNone(dest2.poll())
src.close()
asyncio.run(main())
def test_untap_all(self):
def thread(src, dest1, dest2):
m = c.mult(src)
m.tap(dest1)
m.tap(dest2)
src.b_put('item')
dest1.b_get()
dest2.b_get()
m.untap_all()
async def main():
src, dest1, dest2 = chan(), chan(), chan()
c.thread(lambda: thread(src, dest1, dest2))
await asyncio.sleep(0.1)
self.assertIs(await src.put('dropMe'), True)
await asyncio.sleep(0.1)
self.assertIsNone(dest1.poll())
self.assertIsNone(dest2.poll())
asyncio.run(main())
def test_untap_nonexistent_tap(self):
def thread(src, complete):
m = c.mult(src)
m.untap(chan())
src.close()
complete.close()
async def main():
src, complete = chan(), chan()
c.thread(lambda: thread(src, complete))
self.assertIsNone(await complete.get())
asyncio.run(main())
def test_mult_blocks_until_all_taps_accept(self):
def thread(src, dest1, dest2, complete):
m = c.mult(src)
m.tap(dest1)
m.tap(dest2)
src.b_put('item')
dest1.b_get()
time.sleep(0.1)
self.assertIs(src.offer('failure'), False)
dest2.b_get()
src.close()
complete.close()
async def main():
src, dest1, dest2, complete = chan(), chan(), chan(), chan()
c.thread(lambda: thread(src, dest1, dest2, complete))
self.assertIsNone(await complete.get())
asyncio.run(main())
def test_only_correct_taps_close(self):
def thread(src, close_dest, open_dest):
m = c.mult(src)
m.tap(close_dest)
m.tap(open_dest, close=False)
src.close()
async def main():
src, close_dest, open_dest = chan(), chan(1), chan(1)
c.thread(lambda: thread(src, close_dest, open_dest))
await asyncio.sleep(0.1)
self.assertIs(await close_dest.put('closed'), False)
self.assertIs(await open_dest.put('not closed'), True)
asyncio.run(main())
def test_tap_closes_when_added_after_mult_closes(self):
def thread(src_ch, tap_ch):
m = c.mult(src_ch)
src_ch.close()
time.sleep(0.1)
m.tap(tap_ch)
async def main():
src_ch, tap_ch = chan(), chan()
c.thread(lambda: thread(src_ch, tap_ch))
self.assertIsNone(await tap_ch.get())
asyncio.run(main())
class TestPubAsyncio(unittest.TestCase):
def test_sub(self):
async def main():
from_ch = chan(1)
a1_ch, a2_ch, b1_ch, b2_ch = chan(), chan(), chan(), chan()
p = c.pub(from_ch, lambda x: x[0])
p.sub('a', a1_ch)
p.sub('a', a2_ch)
p.sub('b', b1_ch)
p.sub('b', b2_ch)
await from_ch.put('apple')
self.assertEqual(await a1_ch.get(), 'apple')
self.assertEqual(await a2_ch.get(), 'apple')
await from_ch.put('bat')
self.assertEqual(await b1_ch.get(), 'bat')
self.assertEqual(await b2_ch.get(), 'bat')
await from_ch.put('ant')
self.assertEqual(await a1_ch.get(), 'ant')
self.assertEqual(await a2_ch.get(), 'ant')
await from_ch.put('bear')
self.assertEqual(await b1_ch.get(), 'bear')
self.assertEqual(await b2_ch.get(), 'bear')
asyncio.run(main())
def test_unsub(self):
async def main():
from_ch = chan(1)
a1_ch, a2_ch, b_ch = chan(), chan(), chan()
p = c.pub(from_ch, lambda x: x[0])
p.sub('a', a1_ch)
p.sub('a', a2_ch)
p.sub('b', b_ch)
p.unsub('a', a2_ch)
await from_ch.put('apple')
self.assertEqual(await a1_ch.get(), 'apple')
await from_ch.put('bat')
self.assertEqual(await b_ch.get(), 'bat')
await asyncio.sleep(0.1)
self.assertIsNone(a2_ch.poll())
p.sub('a', a2_ch)
from_ch.put('air')
self.assertEqual(await a2_ch.get(), 'air')
asyncio.run(main())
def test_unsub_nonexistent_topic(self):
async def main():
from_ch, to_ch = chan(1), chan()
p = c.pub(from_ch, xf.identity)
p.sub('a', to_ch)
p.unsub('b', to_ch)
await from_ch.put('a')
self.assertEqual(await to_ch.get(), 'a')
asyncio.run(main())
def test_unsub_nonexistent_ch(self):
async def main():
from_ch, to_ch = chan(1), chan()
p = c.pub(from_ch, xf.identity)
p.sub('a', to_ch)
p.unsub('b', chan())
await from_ch.put('a')
self.assertEqual(await to_ch.get(), 'a')
asyncio.run(main())
def test_unsub_all(self):
async def main():
from_ch, a_ch, b_ch = chan(2), chan(), chan()
p = c.pub(from_ch, lambda x: x[0])
p.sub('a', a_ch)
p.sub('b', b_ch)
p.unsub_all()
await from_ch.put('apple')
await from_ch.put('bat')
await asyncio.sleep(0.1)
self.assertIsNone(from_ch.poll())
self.assertIsNone(a_ch.poll())
self.assertIsNone(b_ch.poll())
p.sub('a', a_ch)
await from_ch.put('air')
self.assertEqual(await a_ch.get(), 'air')
asyncio.run(main())
def test_unsub_all_topic(self):
async def main():
from_ch = chan(2)
a1_ch, a2_ch, b_ch = chan(), chan(), chan()
p = c.pub(from_ch, lambda x: x[0])
p.sub('a', a1_ch)
p.sub('a', a2_ch)
p.sub('b', b_ch)
p.unsub_all('a')
await from_ch.put('apple')
await from_ch.put('bat')
await asyncio.sleep(0.1)
self.assertIsNone(a1_ch.poll())
self.assertIsNone(a2_ch.poll())
self.assertEqual(b_ch.poll(), 'bat')
self.assertIsNone(from_ch.poll())
p.sub('a', a1_ch)
await from_ch.put('air')
self.assertEqual(await a1_ch.get(), 'air')
asyncio.run(main())
def test_only_correct_subs_get_closed(self):
async def main():
from_ch, close_ch, open_ch = chan(1), chan(1), chan(1)
p = c.pub(from_ch, xf.identity)
p.sub('close', close_ch)
p.sub('open', open_ch, close=False)
from_ch.close()
await asyncio.sleep(0.1)
self.assertIs(await close_ch.put('fail'), False)
self.assertIs(await open_ch.put('success'), True)
asyncio.run(main())
def test_buf_fn(self):
async def main():
from_ch = chan()
a_ch, b_ch = chan(), chan()
p = c.pub(from_ch, lambda x: x[0],
lambda x: None if x == 'a' else 2)
p.sub('a', a_ch)
p.sub('b', b_ch)
await from_ch.put('a1')
await from_ch.put('a2')
await asyncio.sleep(0.1)
self.assertIs(from_ch.offer('a fail'), False)
self.assertEqual(await a_ch.get(), 'a1')
self.assertEqual(await a_ch.get(), 'a2')
await asyncio.sleep(0.1)
self.assertIsNone(a_ch.poll())
await from_ch.put('b1')
await from_ch.put('b2')
await from_ch.put('b3')
await from_ch.put('b4')
await asyncio.sleep(0.1)
self.assertIs(from_ch.offer('b fail'), False)
self.assertEqual(await b_ch.get(), 'b1')
self.assertEqual(await b_ch.get(), 'b2')
self.assertEqual(await b_ch.get(), 'b3')
self.assertEqual(await b_ch.get(), 'b4')
await asyncio.sleep(0.1)
self.assertIsNone(b_ch.poll())
asyncio.run(main())
class TestMixAsyncio(unittest.TestCase):
def test_toggle_exceptions(self):
async def main():
ch = chan()
m = c.mix(ch)
with self.assertRaises(ValueError):
m.toggle({'not a channel': {}})
with self.assertRaises(ValueError):
m.toggle({ch: {'invalid option': True}})
with self.assertRaises(ValueError):
m.toggle({ch: {'solo': 'not a boolean'}})
with self.assertRaises(ValueError):
m.toggle({ch: {'pause': 'not a boolean'}})
with self.assertRaises(ValueError):
m.toggle({ch: {'mute': 'not a boolean'}})
asyncio.run(main())
def test_solo_mode_exception(self):
async def main():
m = c.mix(chan())
with self.assertRaises(ValueError):
m.solo_mode('invalid mode')
asyncio.run(main())
def test_admix(self):
async def main():
from_ch1, from_ch2, to_ch = chan(), chan(), chan(1)
m = c.mix(to_ch)
m.admix(from_ch1)
await from_ch1.put('from_ch1')
self.assertEqual(await to_ch.get(), 'from_ch1')
m.admix(from_ch2)
await from_ch1.put('from_ch1 again')
self.assertEqual(await to_ch.get(), 'from_ch1 again')
await from_ch2.put('from_ch2')
self.assertEqual(await to_ch.get(), 'from_ch2')
asyncio.run(main())
def test_unmix(self):
async def main():
from_ch1, from_ch2, to_ch = chan(1), chan(1), chan(1)
m = c.mix(to_ch)
m.admix(from_ch1)
await from_ch1.put('from_ch1')
self.assertEqual(await to_ch.get(), 'from_ch1')
m.admix(from_ch2)
m.unmix(from_ch1)
await from_ch2.put('from_ch2')
self.assertEqual(await to_ch.get(), 'from_ch2')
await from_ch1.put('remain in from_ch1')
await asyncio.sleep(0.1)
self.assertIsNone(to_ch.poll())
self.assertEqual(await from_ch1.get(), 'remain in from_ch1')
asyncio.run(main())
def test_unmix_all(self):
async def main():
from_ch1, from_ch2, to_ch = chan(1), chan(1), chan(1)
m = c.mix(to_ch)
m.admix(from_ch1)
m.admix(from_ch2)
await from_ch1.put('from_ch1')
self.assertEqual(await to_ch.get(), 'from_ch1')
await from_ch2.put('from_ch2')
self.assertEqual(await to_ch.get(), 'from_ch2')
m.unmix_all()
await asyncio.sleep(0.1)
await from_ch1.put('ignore from_ch1 item')
await from_ch2.put('ignore from_ch2 item')
await asyncio.sleep(0.1)
self.assertIsNone(to_ch.poll())
asyncio.run(main())
def test_mute(self):
async def main():
unmuted_ch, muted_ch = chan(), chan()
to_ch = chan(1)
m = c.mix(to_ch)
m.toggle({unmuted_ch: {'mute': False},
muted_ch: {'mute': True}})
await unmuted_ch.put('not muted')
self.assertEqual(await to_ch.get(), 'not muted')
await muted_ch.put('mute me')
self.assertIsNone(to_ch.poll())
m.toggle({unmuted_ch: {'mute': True},
muted_ch: {'mute': False}})
await muted_ch.put('the mute can now talk')
self.assertEqual(await to_ch.get(), 'the mute can now talk')
await unmuted_ch.put('i made a deal with Ursula')
self.assertIsNone(to_ch.poll())
asyncio.run(main())
def test_pause(self):
async def main():
unpaused_ch, paused_ch, to_ch = chan(1), chan(1), chan(1)
m = c.mix(to_ch)
m.toggle({unpaused_ch: {'pause': False},
paused_ch: {'pause': True}})
await unpaused_ch.put('not paused')
self.assertEqual(await to_ch.get(), 'not paused')
await paused_ch.put('remain in paused_ch')
await asyncio.sleep(0.1)
self.assertEqual(await paused_ch.get(), 'remain in paused_ch')
m.toggle({unpaused_ch: {'pause': True},
paused_ch: {'pause': False}})
await paused_ch.put('no longer paused')
self.assertEqual(await to_ch.get(), 'no longer paused')
await unpaused_ch.put('paused now')
await asyncio.sleep(0.1)
self.assertEqual(await unpaused_ch.get(), 'paused now')
asyncio.run(main())
def test_pause_dominates_mute(self):
async def main():
from_ch, to_ch = chan(1), chan(1)
m = c.mix(to_ch)
m.toggle({from_ch: {'pause': True, 'mute': True}})
await from_ch.put('stay in from_ch')
await asyncio.sleep(0.1)
self.assertEqual(await from_ch.get(), 'stay in from_ch')
asyncio.run(main())
def test_solo_dominates_pause_and_mute(self):
async def main():
from_ch, to_ch = chan(), chan(1)
m = c.mix(to_ch)
m.toggle({from_ch: {'solo': True, 'pause': True, 'mute': True}})
await from_ch.put('success')
self.assertEqual(await to_ch.get(), 'success')
asyncio.run(main())
def test_solomode_mute(self):
async def main():
solo_ch1, solo_ch2, non_solo_ch = chan(), chan(), chan()
to_ch = chan(1)
m = c.mix(to_ch)
m.solo_mode('mute')
m.toggle({solo_ch1: {'solo': True},
solo_ch2: {'solo': True},
non_solo_ch: {}})
await solo_ch1.put('solo_ch1 not muted')
self.assertEqual(await to_ch.get(), 'solo_ch1 not muted')
await solo_ch2.put('solo_ch2 not muted')
self.assertEqual(await to_ch.get(), 'solo_ch2 not muted')
await non_solo_ch.put('drop me')
await asyncio.sleep(0.1)
self.assertIsNone(to_ch.poll())
m.toggle({solo_ch1: {'solo': False},
solo_ch2: {'solo': False}})
await asyncio.sleep(0.1)
await solo_ch1.put('solo_ch1 still not muted')
self.assertEqual(await to_ch.get(), 'solo_ch1 still not muted')
await solo_ch2.put('solo_ch2 still not muted')
self.assertEqual(await to_ch.get(), 'solo_ch2 still not muted')
await non_solo_ch.put('non_solo_ch not muted')
self.assertEqual(await to_ch.get(), 'non_solo_ch not muted')
asyncio.run(main())
def test_solomode_pause(self):
async def main():
to_ch = chan(1)
solo_ch1, solo_ch2, non_solo_ch = chan(1), chan(1), chan(1)
m = c.mix(to_ch)
m.solo_mode('pause')
m.toggle({solo_ch1: {'solo': True},
solo_ch2: {'solo': True},
non_solo_ch: {}})
await solo_ch1.put('solo_ch1 not paused')
self.assertEqual(await to_ch.get(), 'solo_ch1 not paused')
await solo_ch2.put('solo_ch2 not paused')
self.assertEqual(await to_ch.get(), 'solo_ch2 not paused')
await non_solo_ch.put('stay in non_solo_ch')
await asyncio.sleep(0.1)
self.assertEqual(await non_solo_ch.get(), 'stay in non_solo_ch')
m.toggle({solo_ch1: {'solo': False},
solo_ch2: {'solo': False}})
await asyncio.sleep(0.1)
await solo_ch1.put('solo_ch1 still not paused')
self.assertEqual(await to_ch.get(), 'solo_ch1 still not paused')
await solo_ch2.put('solo_ch2 still not paused')
self.assertEqual(await to_ch.get(), 'solo_ch2 still not paused')
await non_solo_ch.put('non_solo_ch not paused')
self.assertEqual(await to_ch.get(), 'non_solo_ch not paused')
asyncio.run(main())
def test_admix_unmix_toggle_do_not_interrupt_put(self):
async def main():
to_ch, from_ch = chan(), chan(1)
admix_ch, unmix_ch, pause_ch = chan(1), chan(1), chan(1)
m = c.mix(to_ch)
m.toggle({from_ch: {}, unmix_ch: {}})
# Start waiting put to to_ch
await from_ch.put('successful transfer')
await asyncio.sleep(0.1)
# Apply operations while mix is waiting on to_ch
m.admix(admix_ch)
m.unmix(unmix_ch)
m.toggle({pause_ch: {'pause': True}})
# Confirm state is correct
self.assertEqual(await to_ch.get(), 'successful transfer')
await admix_ch.put('admix_ch added')
self.assertEqual(await to_ch.get(), 'admix_ch added')
await unmix_ch.put('unmix_ch removed')
await asyncio.sleep(0.1)
self.assertEqual(await unmix_ch.get(), 'unmix_ch removed')
await pause_ch.put('pause_ch paused')
await asyncio.sleep(0.1)
self.assertEqual(await pause_ch.get(), 'pause_ch paused')
asyncio.run(main())
def test_to_ch_does_not_close_when_from_chs_do(self):
async def main():
from_ch, to_ch = chan(), chan(1)
m = c.mix(to_ch)
m.admix(from_ch)
from_ch.close()
await asyncio.sleep(0.1)
self.assertIs(await to_ch.put('success'), True)
asyncio.run(main())
def test_mix_consumes_only_one_after_to_ch_closes(self):
async def main():
from_ch, to_ch = chan(1), chan()
m = c.mix(to_ch)
m.admix(from_ch)
await asyncio.sleep(0.1)
to_ch.close()
await from_ch.put('mix consumes me')
await from_ch.put('mix ignores me')
await asyncio.sleep(0.1)
self.assertEqual(await from_ch.get(), 'mix ignores me')
asyncio.run(main())
class TestPipe(unittest.TestCase):
def test_pipe_copy(self):
async def main():
src, dest = chan(), chan()
c.pipe(src, dest)
src.f_put(1)
src.f_put(2)
src.close()
self.assertEqual(await a_list(dest), [1, 2])
asyncio.run(main())
def test_pipe_close_dest(self):
async def main():
src, dest = chan(), chan()
c.pipe(src, dest)
src.close()
self.assertIsNone(await dest.get())
asyncio.run(main())
def test_return_value_is_dest(self):
async def main():
src, dest = chan(), chan()
src.close()
self.assertIs(c.pipe(src, dest), dest)
asyncio.run(main())
def test_pipe_no_close_dest(self):
async def main():
src, dest = chan(), chan(1)
c.pipe(src, dest, close=False)
src.close()
await asyncio.sleep(0.1)
dest.put('success')
self.assertEqual(await dest.get(), 'success')
asyncio.run(main())
def test_stop_consuming_when_dest_closes(self):
async def main():
src, dest = chan(3), chan(1)
c.onto_chan(src, ['intoDest1', 'intoDest2', 'dropMe'], close=False)
c.pipe(src, dest)
await asyncio.sleep(0.1)
dest.close()
self.assertEqual(await dest.get(), 'intoDest1')
self.assertEqual(await dest.get(), 'intoDest2')
self.assertIsNone(await dest.get())
await asyncio.sleep(0.1)
self.assertIsNone(src.poll())
asyncio.run(main())
class TestPipeline(unittest.TestCase):
def _test_output(self, mode):
def f(x):
time.sleep(0.2)
return str(x)
async def main():
xform = xf.map(f)
start_time = time.time()
to_ch = chan(5)
finished_ch = c.pipeline(5, to_ch, xform, c.to_chan(range(5)),
mode=mode)
self.assertIs(await finished_ch.get(), None)
elapsed_time = time.time() - start_time
self.assertTrue(0.1 < elapsed_time < 0.3)
self.assertEqual(await a_list(to_ch), ['0', '1', '2', '3', '4'])
asyncio.run(main())
def _test_to_ch_not_closed(self, mode):
async def main():
to_ch = chan(5)
c.pipeline(5, to_ch, xf.map(str), c.to_chan(range(5)),
close=False, mode=mode)
for i in range(5):
self.assertEqual(await to_ch.get(), str(i))
self.assertIs(await to_ch.put('success'), True)
to_ch.close()
self.assertEqual(await to_ch.get(), 'success')
self.assertIs(await to_ch.get(), None)
asyncio.run(main())
def _test_stop_consuming_from_ch(self, mode):
async def main():
to_ch = chan(5, xf.take(5))
from_ch = c.to_chan(range(20))
c.pipeline(5, to_ch, xf.identity, from_ch, mode=mode)
await asyncio.sleep(0.1)
self.assertEqual(await a_list(to_ch), [0, 1, 2, 3, 4])
self.assertTrue(len(await a_list(from_ch)) > 5)
asyncio.run(main())
def _test_output_with_chunksize(self, mode):
async def main():
to_ch = chan(5)
finished_ch = c.pipeline(5, to_ch, xf.map(str), c.to_chan(range(5)),
mode=mode, chunksize=2)
self.assertIs(await finished_ch.get(), None)
self.assertEqual(await a_list(to_ch), ['0', '1', '2', '3', '4'])
asyncio.run(main())
def _test_ex_handler(self, mode):
def f(x):
if x == 1:
raise ValueError
return str(x)
def ex_handler(e):
if isinstance(e, ValueError):
return 'ex_handler value'
async def main():
to_ch = chan(2)
c.pipeline(1, to_ch, xf.map(f), c.to_chan([1, 2]),
ex_handler=ex_handler, mode=mode)
self.assertEqual(await a_list(to_ch), ['ex_handler value', '2'])
asyncio.run(main())
def test_invalid_mode(self):
with self.assertRaises(ValueError):
c.pipeline(1, chan(), xf.identity, chan(), mode='invalid')
def test_thread_output(self):
self._test_output('thread')
def test_thread_to_ch_not_closed(self):
self._test_to_ch_not_closed('thread')
def test_thread_stop_consuming_from_ch(self):
self._test_stop_consuming_from_ch('thread')
def test_thread_output_with_chunksize(self):
self._test_output_with_chunksize('thread')
def test_thread_ex_handler(self):
self._test_ex_handler('thread')
def test_process_output(self):
self._test_output('process')
def test_process_to_ch_not_closed(self):
self._test_to_ch_not_closed('process')
def test_process_stop_consuming_from_ch(self):
self._test_stop_consuming_from_ch('process')
def test_thread_output_with_chunksize(self):
self._test_output_with_chunksize('process')
def test_process_ex_handler(self):
self._test_ex_handler('process')
class TestPipelineAsync(unittest.TestCase):
def test_pipeline_async(self):
def thread(val, result_ch):
result_ch.b_put(val)
time.sleep(0.2)
result_ch.b_put(str(val))
result_ch.close()
def af(val, result_ch):
threading.Thread(target=thread, args=[val, result_ch]).start()
async def main():
to_ch = chan(8)
start_time = time.time()
finished_ch = c.pipeline_async(2, to_ch, af, c.to_chan([1, 2, 3, 4]))
self.assertIs(await finished_ch.get(), None)
self.assertTrue(0.3 < time.time() - start_time < 0.5)
self.assertEqual(await a_list(to_ch),
[1, '1', 2, '2', 3, '3', 4, '4'])
asyncio.run(main())
def test_pipeline_async_no_close(self):
def af(_, result_ch):
result_ch.close()
async def main():
to_ch = chan(1)
finished_ch = c.pipeline_async(2, to_ch, af,
c.to_chan([1, 2, 3, 4]),
close=False)
self.assertIs(await finished_ch.get(), None)
await to_ch.put('success')
to_ch.close()
self.assertEqual(await to_ch.get(), 'success')
self.assertIs(await to_ch.get(), None)
asyncio.run(main())
class TestReduce(unittest.TestCase):
def test_empty_ch(self):
async def main():
ch = chan()
ch.close()
result_ch = c.reduce(lambda: None, 'init', ch)
self.assertEqual(await result_ch.get(), 'init')
asyncio.run(main())
def test_non_empty_ch(self):
async def main():
in_ch = c.to_chan(range(4))
result_ch = c.reduce(lambda x, y: x + y, 100, in_ch)
self.assertEqual(await result_ch.get(), 106)
asyncio.run(main())
def test_no_init_non_empty(self):
async def main():
in_ch = c.to_chan(range(4))
result_ch = c.reduce(xf.multi_arity(lambda: 100,
xf.identity,
lambda x, y: x + y),
in_ch)
self.assertEqual(await result_ch.get(), 106)
asyncio.run(main())
def test_no_init_empty(self):
async def main():
in_ch = chan()
in_ch.close()
result_ch = c.reduce(xf.multi_arity(lambda: 100,
xf.identity,
lambda x, y: x + y),
in_ch)
self.assertEqual(await result_ch.get(), 100)
asyncio.run(main())
def test_no_init_no_zero_arity(self):
async def main():
in_ch = c.to_chan(range(4))
with self.assertRaises(TypeError):
c.reduce(xf.multi_arity(None,
xf.identity,
lambda x, y: x + y),
in_ch)
asyncio.run(main())
def test_reduced(self):
async def main():
in_ch = c.to_chan(range(4))
def rf(result, val):
if val == 2:
return xf.reduced(result + 2)
return result + val
result_ch = c.reduce(rf, 100, in_ch)
self.assertEqual(await result_ch.get(), 103)
asyncio.run(main())
class TestTransduce(unittest.TestCase):
def test_xform_is_flushed_on_completion(self):
async def main():
ch = c.to_chan([1, 2, 3])
def rf(result, val=None):
if val is None:
return result
result.append(val)
return result
result_ch = c.transduce(xf.partition_all(2), rf, [], ch)
self.assertEqual(await result_ch.get(), [(1, 2), (3,)])
asyncio.run(main())
def test_xform_early_termination(self):
async def main():
ch = c.to_chan([1, 2, 3])
def rf(result, val=None):
if val is None:
return result
result.append(val)
return result
result_ch = c.transduce(xf.take(2), rf, [], ch)
self.assertEqual(await result_ch.get(), [1, 2])
asyncio.run(main())
def test_no_init_non_empty(self):
async def main():
in_ch = c.to_chan(range(4))
result_ch = c.transduce(xf.filter(lambda x: x % 2 == 0),
xf.multi_arity(lambda: 100,
xf.identity,
lambda x, y: x + y),
in_ch)
self.assertEqual(await result_ch.get(), 102)
asyncio.run(main())
def test_no_init_empty(self):
async def main():
in_ch = chan()
in_ch.close()
result_ch = c.transduce(xf.filter(lambda x: x % 2 == 0),
xf.multi_arity(lambda: 100,
xf.identity,
lambda x, y: x + y),
in_ch)
self.assertEqual(await result_ch.get(), 100)
asyncio.run(main())
def test_no_init_no_zero_arity(self):
async def main():
in_ch = c.to_chan(range(4))
with self.assertRaises(TypeError):
c.transduce(xf.filter(lambda x: x % 2 == 0),
xf.multi_arity(None,
xf.identity,
lambda x, y: x + y),
in_ch)
asyncio.run(main())
class TestMerge(unittest.TestCase):
def test_merge_unbuffered(self):
async def main():
src1, src2 = chan(), chan()
m = c.merge([src1, src2], 2)
await src1.put('src1')
await src2.put('src2')
src1.close()
src2.close()
self.assertEqual([x async for x in m], ['src1', 'src2'])
asyncio.run(main())
class TestMap(unittest.TestCase):
def test_map_unbuffered(self):
async def main():
letter_ch = c.to_chan(['a', 'b', 'c', 'd', 'e'])
number_ch = c.to_chan(['1', '2', '3'])
result_ch = c.map(lambda x, y: x + y, [letter_ch, number_ch])
self.assertEqual(await a_list(result_ch), ['a1', 'b2', 'c3'])
self.assertEqual(await a_list(letter_ch), ['e'])
asyncio.run(main())
def test_map_buffered(self):
async def main():
letter_ch = c.to_chan(['a', 'b', 'c', 'd', 'e'])
number_ch = c.to_chan(['1', '2', '3'])
result_ch = c.map(lambda x, y: x + y,
[letter_ch, number_ch],
c.sliding_buffer(2))
await asyncio.sleep(0.1)
self.assertEqual(await a_list(result_ch), ['b2', 'c3'])
self.assertEqual(await a_list(letter_ch), ['e'])
asyncio.run(main())
class TestSplit(unittest.TestCase):
def test_chans_close_with_closed_source(self):
async def main():
src_ch = chan()
src_ch.close()
t_ch, f_ch = c.split(lambda _: True, src_ch)
self.assertIsNone(await t_ch.get())
self.assertIsNone(await f_ch.get())
asyncio.run(main())
def test_true_false_chans(self):
async def main():
t_ch, f_ch = c.split(lambda x: x % 2 == 0,
c.to_chan([1, 2, 3, 4]))
self.assertEqual(await f_ch.get(), 1)
self.assertEqual(await t_ch.get(), 2)
self.assertEqual(await f_ch.get(), 3)
self.assertEqual(await t_ch.get(), 4)
self.assertIsNone(await f_ch.get())
self.assertIsNone(await t_ch.get())
asyncio.run(main())
def test_bufs(self):
async def main():
t_ch, f_ch = c.split(lambda x: x % 2 == 0,
c.to_chan([1, 2, 3, 4, 5]),
2, 3)
self.assertEqual(await a_list(t_ch), [2, 4])
self.assertEqual(await a_list(f_ch), [1, 3, 5])
asyncio.run(main())
if __name__ == '__main__':
unittest.main()
```
#### File: chanpy/tests/test_transducers.py
```python
import unittest
from chanpy import transducers as xf
sum_rf = xf.multi_arity(lambda: 0, xf.identity, lambda x, y: x + y)
class TestPartitionAll(unittest.TestCase):
def test_partition_every(self):
xform = xf.partition_all(1)
self.assertEqual(list(xf.xiter(xform, range(3))), [(0,), (1,), (2,)])
def test_partition_pos(self):
xform = xf.partition_all(3)
self.assertEqual(list(xf.xiter(xform, range(6))),
[(0, 1, 2), (3, 4, 5)])
def test_partition_empty(self):
xform = xf.partition_all(1)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_partition_n_fraction(self):
with self.assertRaises(ValueError):
xf.partition_all(1.5)
def test_partition_n_zero(self):
with self.assertRaises(ValueError):
xf.partition_all(0)
def test_partition_n_neg(self):
with self.assertRaises(ValueError):
xf.partition_all(-1)
def test_partition_step_fraction(self):
with self.assertRaises(ValueError):
xf.partition_all(1, 1.5)
def test_partition_step_zero(self):
with self.assertRaises(ValueError):
xf.partition_all(1, 0)
def test_partition_step_neg(self):
with self.assertRaises(ValueError):
xf.partition_all(1, -1)
def test_reduced_without_step(self):
xform = xf.comp(xf.partition_all(1), xf.take(2))
self.assertEqual(list(xf.xiter(xform, range(12))), [(0,), (1,)])
def test_reduced_with_step(self):
xform = xf.comp(xf.partition_all(2, 1), xf.take(1))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [(1, 2)])
def test_arity_zero(self):
self.assertEqual(xf.partition_all(1)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.partition_all(3)
self.assertEqual(list(xf.xiter(xform, range(5))), [(0, 1, 2), (3, 4)])
def test_partition_with_smaller_step(self):
xform = xf.partition_all(3, 1)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5])),
[(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5), (5,)])
def test_partition_with_smaller_step_reduced_during_complete(self):
xform = xf.comp(xf.partition_all(3, 1), xf.take(4))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5])),
[(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5)])
def test_partition_with_larger_step(self):
xform = xf.partition_all(2, 4)
self.assertEqual(list(xf.xiter(xform, range(1, 10))),
[(1, 2), (5, 6), (9,)])
class TestPartition(unittest.TestCase):
def test_no_pad(self):
xform = xf.partition(2)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5])),
[(1, 2), (3, 4)])
def test_no_pad_empty(self):
xform = xf.partition(2)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_n_fraction(self):
with self.assertRaises(ValueError):
xf.partition(1.5)
def test_n_zero(self):
with self.assertRaises(ValueError):
xf.partition(0)
def test_n_neg(self):
with self.assertRaises(ValueError):
xf.partition(-1)
def test_step_pos(self):
xform = xf.partition(2, 1)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [(1, 2), (2, 3)])
def test_step_fraction(self):
with self.assertRaises(ValueError):
xf.partition(1, 1.5)
def test_step_zero(self):
with self.assertRaises(ValueError):
xf.partition(1, 0)
def test_step_neg(self):
with self.assertRaises(ValueError):
xf.partition(1, -1)
def test_pad_not_iter(self):
with self.assertRaises(TypeError):
xform = xf.partition(2, 2, 1)
list(xf.xiter(xform, [1, 2, 3]))
def test_pad_with_iterator(self):
xform = xf.partition(3, 3, iter(['pad', 'pad']))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])),
[(1, 2, 3), (4, 'pad', 'pad')])
def test_pad_too_small(self):
xform = xf.partition(3, 3, ['pad'])
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])),
[(1, 2, 3), (4, 'pad')])
def test_pad_too_large(self):
xform = xf.partition(3, 3, ['pad'] * 5)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])),
[(1, 2, 3), (4, 'pad', 'pad')])
def test_reduced_last_element(self):
xform = xf.comp(xf.partition(3, 3, ['pad', 'pad']), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])),
[(1, 2, 3), (4, 'pad', 'pad')])
def test_complete(self):
xform = xf.comp(xf.partition(2), xf.partition_all(10))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])),
[((1, 2), (3, 4),)])
class TestTake(unittest.TestCase):
def test_take_pos(self):
taken = list(xf.xiter(xf.take(2), [1, 2, 3, 4]))
self.assertEqual(taken, [1, 2])
def test_take_too_many(self):
taken = list(xf.xiter(xf.take(10), [1, 2, 3, 4]))
self.assertEqual(taken, [1, 2, 3, 4])
def test_take_zero(self):
taken = list(xf.xiter(xf.take(0), [1, 2, 3, 4]))
self.assertEqual(taken, [])
def test_take_neg(self):
taken = list(xf.xiter(xf.take(-1), [1, 2, 3, 4]))
self.assertEqual(taken, [])
def test_arity_zero(self):
self.assertEqual(xf.take(1)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.take(3), xf.partition_all(2))
taken = list(xf.xiter(xform, [1, 2, 3, 4]))
self.assertEqual(list(taken), [(1, 2), (3,)])
class TestTakeLast(unittest.TestCase):
def test_n_too_large(self):
xform = xf.take_last(5)
self.assertEqual(list(xf.xiter(xform, [1, 2])), [1, 2])
def test_n_too_small(self):
xform = xf.take_last(2)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5])), [4, 5])
def test_n_fraction(self):
xform = xf.take_last(2.5)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5])), [4, 5])
def test_n_zero(self):
xform = xf.take_last(0)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [])
def test_n_neg(self):
xform = xf.take_last(-1)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [])
def test_empty(self):
xform = xf.take_last(2)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.take_last(4), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5, 6, 7])), [4, 5])
def test_arity_zero(self):
self.assertEqual(xf.take_last(1)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.take_last(3), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [(2, 3), (4,)])
class TestTakeWhile(unittest.TestCase):
def test_take_some(self):
taken = list(xf.xiter(xf.take_while(lambda x: x < 3), [1, 2, 3, 4]))
self.assertEqual(taken, [1, 2])
def test_take_none(self):
taken = list(xf.xiter(xf.take_while(lambda x: x < 0), [1, 2, 3, 4]))
self.assertEqual(taken, [])
def test_pred_ignored_after_first_drop(self):
xform = xf.take_while(lambda x: x < 0)
taken = list(xf.xiter(xform, [-1, -2, 3, -4, -5]))
self.assertEqual(taken, [-1, -2])
def test_arity_zero(self):
self.assertEqual(xf.take_while(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.take_while(lambda x: x < 4), xf.partition_all(2))
dropped = list(xf.xiter(xform, [1, 2, 3, 4, 5]))
self.assertEqual(list(dropped), [(1, 2), (3,)])
class TestTakeNth(unittest.TestCase):
def test_take_every(self):
xform = xf.take_nth(1)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [1, 2, 3, 4])
def test_take_few(self):
xform = xf.take_nth(3)
self.assertEqual(list(xf.xiter(xform, range(12))), [0, 3, 6, 9])
def test_empty(self):
xform = xf.take_nth(1)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_take_fraction(self):
with self.assertRaises(ValueError):
xf.take_nth(1.5)
def test_take_zero(self):
with self.assertRaises(ValueError):
xf.take_nth(0)
def test_take_nega(self):
with self.assertRaises(ValueError):
xf.take_nth(-1)
def test_reduced(self):
xform = xf.comp(xf.take_nth(3), xf.take(2))
self.assertEqual(list(xf.xiter(xform, range(12))), [0, 3])
def test_arity_zero(self):
self.assertEqual(xf.take_nth(1)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.take_nth(1), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [(1, 2), (3,)])
class TestDrop(unittest.TestCase):
def test_drop_pos(self):
dropped = list(xf.xiter(xf.drop(2), [1, 2, 3, 4]))
self.assertEqual(dropped, [3, 4])
def test_drop_too_many(self):
dropped = list(xf.xiter(xf.drop(10), [1, 2, 3, 4]))
self.assertEqual(dropped, [])
def test_drop_zero(self):
dropped = list(xf.xiter(xf.drop(0), [1, 2, 3, 4]))
self.assertEqual(dropped, [1, 2, 3, 4])
def test_drop_neg(self):
dropped = list(xf.xiter(xf.drop(-1), [1, 2, 3, 4]))
self.assertEqual(dropped, [1, 2, 3, 4])
def test_reduced(self):
xform = xf.comp(xf.drop(2), xf.take(2))
dropped = list(xf.xiter(xform, range(8)))
self.assertEqual(dropped, [2, 3])
def test_arity_zero(self):
self.assertEqual(xf.drop(1)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.drop(2), xf.partition_all(2))
dropped = list(xf.xiter(xform, [1, 2, 3, 4, 5]))
self.assertEqual(list(dropped), [(3, 4), (5,)])
class TestDropLast(unittest.TestCase):
def test_n_too_large(self):
xform = xf.drop_last(5)
self.assertEqual(list(xf.xiter(xform, [1, 2])), [])
def test_n_too_small(self):
xform = xf.drop_last(2)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5])), [1, 2, 3])
def test_n_fraction(self):
xform = xf.drop_last(2.5)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5])), [1, 2, 3])
def test_n_zero(self):
xform = xf.drop_last(0)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [1, 2, 3])
def test_n_neg(self):
xform = xf.drop_last(-1)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [1, 2, 3])
def test_empty(self):
xform = xf.drop_last(2)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.drop_last(4), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5, 6, 7])), [1, 2])
def test_arity_zero(self):
self.assertEqual(xf.drop_last(1)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.drop_last(1), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [(1, 2), (3,)])
class TestDropWhile(unittest.TestCase):
def test_drop_some(self):
dropped = list(xf.xiter(xf.drop_while(lambda x: x < 3), [1, 2, 3, 4]))
self.assertEqual(dropped, [3, 4])
def test_drop_none(self):
dropped = list(xf.xiter(xf.drop_while(lambda x: x < 0), [1, 2, 3, 4]))
self.assertEqual(dropped, [1, 2, 3, 4])
def test_pred_ignored_after_first_take(self):
dropped = list(xf.xiter(xf.drop_while(lambda x: x < 3),
[1, 2, 3, -4, -5]))
self.assertEqual(dropped, [3, -4, -5])
def test_reduced(self):
xform = xf.comp(xf.drop_while(lambda x: x < 3), xf.take(2))
dropped = list(xf.xiter(xform, range(8)))
self.assertEqual(list(dropped), [3, 4])
def test_arity_zero(self):
self.assertEqual(xf.drop_while(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.drop_while(lambda x: x < 3), xf.partition_all(2))
dropped = list(xf.xiter(xform, range(8)))
self.assertEqual(list(dropped), [(3, 4), (5, 6), (7,)])
class TestMap(unittest.TestCase):
def test_map_some(self):
xform = xf.map(lambda x: x * 2)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [2, 4, 6])
def test_map_none(self):
xform = xf.map(None)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.map(lambda x: x * 2), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [2, 4])
def test_arity_zero(self):
self.assertEqual(xf.map(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.map(lambda x: x * 2), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [(2, 4), (6,)])
class TestMapIndexed(unittest.TestCase):
def test_map_some(self):
xform = xf.map_indexed(lambda i, x: {i: x})
self.assertEqual(list(xf.xiter(xform, ['zero', 'one', 'two'])),
[{0: 'zero'}, {1: 'one'}, {2: 'two'}])
def test_map_empty(self):
xform = xf.map_indexed(None)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.map_indexed(lambda i, x: {i: x}), xf.take(2))
self.assertEqual(list(xf.xiter(xform, ['zero', 'one', '_', '_'])),
[{0: 'zero'}, {1: 'one'}])
def test_arity_zero(self):
self.assertEqual(xf.map_indexed(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.map_indexed(lambda i, x: {i: x}),
xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, ['zero', 'one', 'two'])),
[({0: 'zero'}, {1: 'one'}), ({2: 'two'},)])
class TestFilter(unittest.TestCase):
def test_filter_some(self):
xform = xf.filter(lambda x: x % 2 == 0)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [2, 4])
def test_filter_none(self):
xform = xf.filter(lambda x: x % 2 == 0)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.filter(lambda x: x % 2 == 0), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5, 6])), [2, 4])
def test_arity_zero(self):
self.assertEqual(xf.filter(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.filter(lambda x: x % 2 == 0), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [2, 4, 5, 6])), [(2, 4), (6,)])
class TestFilterIndexed(unittest.TestCase):
def even_i_pos_v(self, index, val):
return index % 2 == 0 and val > 0
def test_not_empty(self):
xform = xf.filter_indexed(self.even_i_pos_v)
self.assertEqual(list(xf.xiter(xform, [-1, 2, 3, 4, 5])), [3, 5])
def test_empty(self):
xform = xf.filter_indexed(lambda i, v: True)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.filter_indexed(self.even_i_pos_v), xf.take(1))
self.assertEqual(list(xf.xiter(xform, [-1, 2, 3, 4, 5])), [3])
def test_arity_zero(self):
self.assertEqual(xf.filter_indexed(None)(lambda: 'success')(),
'success')
def test_complete(self):
xform = xf.comp(xf.filter_indexed(self.even_i_pos_v),
xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [-1, 2, 3, 4, 5, 6, 7])),
[(3, 5), (7,)])
class TestRemove(unittest.TestCase):
def test_remove_some(self):
xform = xf.remove(lambda x: x % 2 == 0)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [1, 3])
def test_remove_none(self):
xform = xf.remove(lambda x: x % 2 == 0)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.remove(lambda x: x % 2 == 0), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5])), [1, 3])
def test_arity_zero(self):
self.assertEqual(xf.remove(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.remove(lambda x: x % 2 == 0), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 5])), [(1, 3), (5,)])
class TestRemoveIndexed(unittest.TestCase):
def even_i_pos_v(self, index, val):
return index % 2 == 0 and val > 0
def test_not_empty(self):
xform = xf.remove_indexed(self.even_i_pos_v)
self.assertEqual(list(xf.xiter(xform, [1, -2, 3, 4, -5])), [-2, 4, -5])
def test_empty(self):
xform = xf.remove_indexed(lambda i, v: False)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.remove_indexed(self.even_i_pos_v), xf.take(1))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [2])
def test_arity_zero(self):
self.assertEqual(xf.remove_indexed(None)(lambda: 'success')(),
'success')
def test_complete(self):
xform = xf.comp(xf.remove_indexed(self.even_i_pos_v),
xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, -3, 4, 5])),
[(2, -3), (4,)])
class TestKeep(unittest.TestCase):
def test_keep_some(self):
xform = xf.keep(lambda x: x if x % 2 == 0 else None)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [2, 4])
def test_keep_empty(self):
xform = xf.keep(lambda x: x if x % 2 == 0 else None)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.keep(lambda x: x if x % 2 == 0 else None),
xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4, 5, 6])), [2, 4])
def test_arity_zero(self):
self.assertEqual(xf.keep(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.keep(lambda x: x if x % 2 == 0 else None),
xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [2, 4, 5, 6])), [(2, 4), (6,)])
class TestKeepIndexed(unittest.TestCase):
@staticmethod
def even_set(i, x):
return {x} if x % 2 == 0 else None
def test_keep_some(self):
xform = xf.keep_indexed(self.even_set)
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [{2}, {4}])
def test_keep_empty(self):
xform = xf.keep_indexed(None)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.keep_indexed(self.even_set), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [2, 3, 4, 5, 6])), [{2}, {4}])
def test_arity_zero(self):
self.assertEqual(xf.keep_indexed(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.keep_indexed(self.even_set), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [2, 4, 5, 6])),
[({2}, {4}), ({6},)])
class TestCat(unittest.TestCase):
def test_cat_some(self):
self.assertEqual(list(xf.xiter(xf.cat, [[1, 2, 3], [4, 5]])),
[1, 2, 3, 4, 5])
def test_cat_none(self):
self.assertEqual(list(xf.xiter(xf.cat, [])), [])
def test_reduced(self):
xform = xf.comp(xf.cat, xf.take(2))
self.assertEqual(list(xf.xiter(xform, [[1, 2], [3]])), [1, 2])
def test_arity_zero(self):
self.assertEqual(xf.cat(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.cat, xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [[1, 2], [3]])), [(1, 2), (3,)])
class TestMapcat(unittest.TestCase):
def test_mapcat_some(self):
xform = xf.mapcat(lambda x: [x, x * 2])
self.assertEqual(list(xf.xiter(xform, [1, 4, 16])),
[1, 2, 4, 8, 16, 32])
def test_mapcat_none(self):
xform = xf.mapcat(lambda x: [x, x * 2])
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.mapcat(lambda x: [x, x * 2]), xf.take(3))
self.assertEqual(list(xf.xiter(xform, [1, 4, 16])), [1, 2, 4])
def test_arity_zero(self):
self.assertEqual(xf.mapcat(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.mapcat(lambda x: [x, x * 2, x * 3]),
xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1])), [(1, 2), (3,)])
class TestDistinct(unittest.TestCase):
def test_remove_duplicates(self):
self.assertEqual(list(xf.xiter(xf.distinct, [1, 2, 3, 2, 1, 3, 4, 5])),
[1, 2, 3, 4, 5])
def test_none(self):
self.assertEqual(list(xf.xiter(xf.distinct, [])), [])
def test_reduced(self):
xform = xf.comp(xf.distinct, xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 1, 2, 3, 4, 5])), [1, 2])
def test_arity_zero(self):
self.assertEqual(xf.distinct(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.distinct, xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 1, 2, 3, 3])),
[(1, 2), (3,)])
class TestDedupe(unittest.TestCase):
def test_remove_duplicates(self):
self.assertEqual(list(xf.xiter(xf.dedupe, [1, 1, 1, 2, 2, 3, 2, 3])),
[1, 2, 3, 2, 3])
def test_none(self):
self.assertEqual(list(xf.xiter(xf.dedupe, [])), [])
def test_reduced(self):
xform = xf.comp(xf.dedupe, xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 1, 2, 2, 3, 4])), [1, 2])
def test_arity_zero(self):
self.assertEqual(xf.dedupe(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.dedupe, xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 2, 3])), [(1, 2), (3,)])
class TestPartitionBy(unittest.TestCase):
def test_partition_some(self):
xform = xf.partition_by(lambda x: x % 2 == 0)
self.assertEqual(list(xf.xiter(xform, [1, 3, 5, 2, 4, 8, 9])),
[(1, 3, 5), (2, 4, 8), (9,)])
def test_partition_none(self):
xform = xf.partition_by(None)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.partition_by(lambda x: x % 2 == 0), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 3, 2, 4, 5, 7])),
[(1, 3), (2, 4)])
def test_arity_zero(self):
self.assertEqual(xf.partition_by(None)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.partition_by(lambda x: x % 2 == 0), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [2, 4, 6, 1, 3, 5, 8])),
[(2, 4, 6), (1, 3, 5)])
class TestReductions(unittest.TestCase):
def test_reductions_some(self):
xform = xf.reductions(lambda x, y: x + y, 1)
self.assertEqual(list(xf.xiter(xform, [2, 3])), [1, 3, 6])
def test_reductions_init_only(self):
xform = xf.reductions(lambda x, y: x + y, 'success')
self.assertEqual(list(xf.xiter(xform, [])), ['success'])
def test_reductions_init_only_complete(self):
xform = xf.comp(xf.reductions(lambda x, y: x + y, [1, 2, 3]),
xf.cat,
xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [])), [(1, 2), (3,)])
def test_reductions_init_only_reduced(self):
xform = xf.comp(xf.reductions(lambda x, y: x + y, 'success'),
xf.take(1))
self.assertEqual(list(xf.xiter(xform, [])), ['success'])
def test_reductions_no_init(self):
xform = xf.reductions(xf.multi_arity(lambda: 100, None, sum_rf))
self.assertEqual(list(xf.xiter(xform, [1, 2])), [100, 101, 103])
def test_reductions_reduced(self):
xform = xf.comp(xf.reductions(lambda x, y: x + y, 1), xf.take(3))
self.assertEqual(list(xf.xiter(xform, [2, 3, 4, 5])), [1, 3, 6])
def test_arity_zero(self):
self.assertEqual(xf.reductions(xf.identity, 1)(lambda: 'success')(),
'success')
def test_complete(self):
xform = xf.comp(xf.reductions(lambda x, y: x + y, 1),
xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [2, 3])), [(1, 3), (6,)])
class TestInterpose(unittest.TestCase):
def test_interpose_some(self):
xform = xf.interpose('s')
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [1, 's', 2, 's', 3])
def test_interpose_empty(self):
xform = xf.interpose('s')
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.interpose('s'), xf.take(4))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [1, 's', 2, 's'])
def test_arity_zero(self):
self.assertEqual(xf.interpose('s')(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.interpose('s'), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2])), [(1, 's'), (2,)])
class TestReplace(unittest.TestCase):
def test_replace_some(self):
xform = xf.replace({1: 'one', 2: 'two'})
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), ['one', 'two', 3])
def test_empty(self):
xform = xf.replace({1: 'one'})
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.replace({1: 'one'}), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), ['one', 2])
def test_arity_zero(self):
self.assertEqual(xf.replace({})(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.replace({1: 'one'}), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [('one', 2), (3,)])
class TestRandomSample(unittest.TestCase):
def test_1(self):
xform = xf.random_sample(1)
self.assertEqual(list(xf.xiter(xform, range(100))), list(range(100)))
def test_0(self):
xform = xf.random_sample(0)
self.assertEqual(list(xf.xiter(xform, range(100))), [])
def test_neg(self):
xform = xf.random_sample(-1)
self.assertEqual(list(xf.xiter(xform, range(100))), [])
def test_gt_1(self):
xform = xf.random_sample(2)
self.assertEqual(list(xf.xiter(xform, range(100))), list(range(100)))
def test_fraction(self):
xform = xf.random_sample(0.5)
vals = set(range(1000))
results = set(xf.xiter(xform, vals))
self.assertTrue(results.issubset(vals))
self.assertTrue(0 < len(results) < 1000) # Very unlikely to be false
def test_empty(self):
xform = xf.random_sample(1)
self.assertEqual(list(xf.xiter(xform, [])), [])
def test_reduced(self):
xform = xf.comp(xf.random_sample(1), xf.take(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3, 4])), [1, 2])
def test_arity_zero(self):
self.assertEqual(xf.random_sample(1)(lambda: 'success')(), 'success')
def test_complete(self):
xform = xf.comp(xf.random_sample(1), xf.partition_all(2))
self.assertEqual(list(xf.xiter(xform, [1, 2, 3])), [(1, 2), (3,)])
class TestCompleting(unittest.TestCase):
def test_default_cf(self):
rf = xf.completing(xf.multi_arity(lambda: 0, None, lambda x, y: x + y))
self.assertEqual(rf(), 0)
self.assertEqual(rf(1, 2), 3)
self.assertEqual(rf('success'), 'success')
def test_custom_cf(self):
rf = xf.completing(xf.multi_arity(lambda: 0, None, lambda x, y: x + y),
str)
self.assertEqual(rf(), 0)
self.assertEqual(rf(1, 2), 3)
self.assertEqual(rf(100), '100')
class TestIreduce(unittest.TestCase):
def test_some_no_init(self):
result = xf.ireduce(sum_rf, [1, 2, 3, 8])
self.assertEqual(result, 14)
def test_empty_no_init(self):
result = xf.ireduce(sum_rf, [])
self.assertEqual(result, 0)
def test_no_init_no_arity_zero(self):
with self.assertRaises(TypeError):
result = xf.ireduce(lambda x, y: x + y, [])
self.assertEqual(result, 0)
class TestItransduce(unittest.TestCase):
def test_itransduce_some(self):
result = xf.itransduce(xf.filter(lambda x: x % 2 == 0),
sum_rf,
1,
[2, 3, 8])
self.assertEqual(result, 11)
def test_itransduce_some_no_init(self):
result = xf.itransduce(xf.filter(lambda x: x % 2 == 0),
sum_rf,
[1, 2, 3, 8])
self.assertEqual(result, 10)
def test_itransduce_empty_no_init(self):
result = xf.itransduce(xf.filter(lambda x: x % 2 == 0), sum_rf, [])
self.assertEqual(result, 0)
def test_itransduce_empty_no_init_no_arity_zero(self):
with self.assertRaises(TypeError):
xf.itransduce(xf.filter(lambda x: x % 2 == 0),
lambda x, y: x + y,
[])
def test_itransduce_init_only(self):
result = xf.itransduce(xf.filter(None), xf.identity, 1, [])
self.assertEqual(result, 1)
def test_itransduce_init_only_complete(self):
def xform(rf):
return lambda result: rf(result + 100)
result = xf.itransduce(xform, xf.identity, 1, [])
self.assertEqual(result, 101)
def test_itransduce_reduced(self):
result = xf.itransduce(xf.take(2), sum_rf, 1, [2, 3, 100])
self.assertEqual(result, 6)
def test_complete(self):
result = xf.itransduce(xf.partition_all(2), xf.append, [1, 2, 3])
self.assertEqual(result, [(1, 2), (3,)])
class TestInto(unittest.TestCase):
def test_into(self):
appendable = [1, 2]
xform = xf.map(lambda x: x + 1)
self.assertIs(xf.into(appendable, xform, [3, 4]), appendable)
self.assertEqual(appendable, [1, 2, 4, 5])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmage-rs/koadic",
"score": 3
} |
#### File: core/commands/exit.py
```python
DESCRIPTION = "exits the program"
def autocomplete(shell, line, text, state):
return None
def help(shell):
pass
def convert_to_parsable(obj):
if isinstance(obj, dict):
new_obj = {}
for key in obj:
if isinstance(key, tuple):
new_obj['/'.join(key)] = obj[key]
elif isinstance(key, str):
new_obj[key] = obj[key]
elif isinstance(obj, list):
new_obj = []
for val in obj:
if isinstance(val, tuple):
new_obj.append('/'.join(val))
elif isinstance(val, str):
new_obj.append(val)
else:
new_obj = []
return new_obj
def execute(shell, cmd):
restore_map = {}
restore_map['creds'] = convert_to_parsable(shell.creds)
restore_map['creds_keys'] = convert_to_parsable(shell.creds_keys)
restore_map['domain_info'] = convert_to_parsable(shell.domain_info)
restore_map['jobs'] = []
for j in shell.jobs:
new_j = {}
new_j['results'] = j.results
new_j['id'] = j.id
new_j['session_id'] = -1
new_j['completed'] = j.completed
new_j['ip'] = j.ip
new_j['name'] = j.name
restore_map['jobs'].append(new_j)
restore_map['sessions'] = []
for s in [vars(session) for stager in shell.stagers for session in stager.sessions]:
new_s = dict(s)
try:
new_s.pop('stager')
new_s.pop('shell')
except:
pass
new_s['status'] = 0
restore_map['sessions'].append(new_s)
restore = open('restore.json', 'w')
import json
restore.write(json.dumps(restore_map)+"\n")
restore.close()
import sys
sys.exit(0)
``` |
{
"source": "jmaggio14/imagepypelines_image",
"score": 3
} |
#### File: imagepypelines_image/imagepypelines_image/Resize.py
```python
from .util import dtype_type_check,\
interpolation_type_check,\
channel_type_check,\
get_cv2_interp_type
from .imports import import_opencv
from .blocks import ImageBlock
cv2 = import_opencv()
import numpy as np
import imagepypelines as ip
class Resize(ImageBlock):
"""splits images into separate component channels
Attributes:
w_scale_type(str): type of scaling used for image width, either
"proportional" or "absolute"
h_scale_type(str): type of scaling used for image height, either
"proportional" or "absolute"
h_param(int,float): vertical scale or absolute height to resize
image to
w_param(int,float): horizontal scale or absolute height to resize
image to
interp(str): interpolation type for resizing. One of
'nearest', 'linear', 'area', 'cubic', 'lanczos4'
Default Enforcement:
1) image
type: np.ndarray
shapes: [(None,None,None),(None,None)]
notes: image must be ordered [height,width,channels]
Batch Size:
"each"
"""
def __init__(self, h=None, w=None, scale_h=None, scale_w=None, interp='nearest'):
"""Instantiates the object
Args:
w(None,int): width to scale image to, must be None is scale_w is
defined
h(None,int): height to scale image to, must be None is scale_h is
defined
scale_h(None,float): vertical scale for the image, must be None
if 'h' is defined
scale_w(None,float): horizontal scale for the image, must be None
if 'w' is defined
interp(str): interpolation type for image scaling, must be one of:
'nearest', 'linear', 'area', 'cubic', 'lanczos4'
"""
super().__init__(order="HWC")
# make sure either h or scale_h is defined
if (h is None) and (scale_h is None):
raise ValueError("'h' or 'scale_h' must be defined")
# make sure either w or scale_w is defined
if (w is None) and (scale_w is None):
raise ValueError("'w' or 'scale_w' must be defined")
# make sure only h or scale_h is defined
if (not h is None) and (not scale_h is None):
raise ValueError("only 'h' or 'scale_h' can be defined")
# make sure only w or scale_w is defined
if (not w is None) and (not scale_w is None):
raise ValueError("only 'w' or 'scale_w' can be defined")
# set w instance variables
if w is None:
self.w_scale_type = 'proportional'
self.w_param = scale_w
else:
self.w_scale_type = 'absolute'
self.w_param = w
# set h instance variables
if h is None:
self.h_scale_type = 'proportional'
self.h_param = scale_h
else:
self.h_scale_type = 'absolute'
self.h_param = h
self.__cv2_interp = get_cv2_interp_type(interp)
self.interp = interp
self.enforce('image', np.ndarray, [(None,None,None),(None,None)])
def process(self, image):
"""Resizes the image to the specified dimensions
Args:
image(np.ndarray): image to resize, must be shaped
[height,width,channels]
Returns:
np.ndarray: resized image
"""
# get h dimension
if self.h_scale_type == "proportional":
new_h = round(self.h_param * image.shape[0], 0)
else:
new_h = self.h_param
# get w dimension
if self.w_scale_type == "proportional":
new_w = round(self.w_param * image.shape[1], 0)
else:
new_w = self.w_param
return cv2.rezize(image, (new_w,new_h), interpolation=self.__cv2_interp)
# END
```
#### File: imagepypelines_image/imagepypelines_image/util.py
```python
from .constants import CV2_INTERPOLATION_TYPES, NUMPY_TYPES
from imagepypelines import BlockError
from .imports import import_opencv
cv2 = import_opencv()
"""
Helper functions that contain canned tests or checks that we will run
frequently
"""
INTERPS = {
'nearest':cv2.INTER_NEAREST,
'linear':cv2.INTER_LINEAR,
'area':cv2.INTER_AREA,
'cubic':cv2.INTER_CUBIC,
'lanczos4':cv2.INTER_LANCZOS4,
}
################################################################################
def interpolation_type_check(interp):
"""
checks to see if the interpolation type is one of the acceptable
values specified in opencv, otherwise raises a BlockError
"""
if interp not in CV2_INTERPOLATION_TYPES:
raise BlockError("Invalid interpolation type")
return True
################################################################################
def dtype_type_check(dtype):
"""
checks to see if the interpolation type is one of the acceptable
values specified in opencv, otherwise raises a BlockError
"""
if dtype not in NUMPY_TYPES:
raise BlockError("Invalid Numpy type")
return True
################################################################################
def channel_type_check(channel_type):
"""checks if the channel_type is one of ("channels_first","channels_last"),
otherwise raises a BlockError"""
if channel_type not in ("channels_first","channels_last"):
raise BlockError("invalid channel type, must be one of ('channels_first','channels_last')")
################################################################################
def get_cv2_interp_type(interp):
"""fetches the cv2 constant associated with the string interpolation type"""
if interp in INTERPS:
return INTERPS[interp]
raise RuntimeError(f"no interpolation type {interp}, must be one of f{INTERPS.keys()}")
# END
``` |
{
"source": "jmaggio14/jefftools",
"score": 2
} |
#### File: imagepypelines/core/imports.py
```python
import sys
from ..Logger import MASTER_LOGGER
def import_tensorflow():
"""Direct tensorflow imports are discouraged for imagepypelines developers
because it is not automatically installed alongside imagepypelines, and
therefore may cause confusing errors to users.
This function will check if tensorflow is installed and import it if
possible. If tensorflow is not importable, it will print out installation
instructions.
Returns:
module: module reference to tensorflow
"""
try:
import tensorflow as tf
except ImportError:
MASTER_LOGGER.error("tensorflow must be installed!")
MASTER_LOGGER.error("'pip install tensorflow --user' (for CPU only)")
MASTER_LOGGER.error("'pip install tensorflow-gpu --user' (for CPU+GPU)")
MASTER_LOGGER.error("see README: https://github.com/jmaggio14/imagepypelines")
sys.exit(1)
return tf
def import_opencv():
"""Direct opencv imports are discouraged for imagepypelines developers
because it is not automatically installed alongside imagepypelines, and
therefore may cause confusing errors to users.
This function will check if opencv is installed and import it if
possible. If opencv is not importable, it will print out installation
instructions.
Returns:
module: module reference to opencv
"""
try:
import cv2
except ImportError:
MASTER_LOGGER.error("imagepypelines requires opencv to be installed separately!")
MASTER_LOGGER.error("see README: https://github.com/jmaggio14/imagepypelines")
sys.exit(1)
return cv2
```
#### File: imagepypelines/core/pipeline_tools.py
```python
from .block_subclasses import FuncBlock
################################################################################
def blockify(kwargs={},
batch_type="each",
types=None,
shapes=None,
containers=None):
"""decorator which converts a normal function into a un-trainable
block which can be added to a pipeline. The function can still be used
as normal after blockification (the __call__ method is setup such that
unfettered access to the function is permitted)
Args:
**kwargs: hardcode keyword arguments for a function, these arguments
will not have to be used to. defaults to {}
types(:obj:`dict`,None): Dictionary of input types. If arg doesn't
exist as a key, or if the value is None, then no checking is
done. If not provided, then will default to args as keys, None
as values.
shapes(:obj:`dict`,None): Dictionary of input shapes. If arg doesn't
exist as a key, or if the value is None, then no checking is
done. If not provided, then will default to args as keys, None
as values.
containers(:obj:`dict`,None): Dictionary of input containers. If arg
doesn't exist as a key, or if the value is None, then no
checking is done. If not provided, then will default to args as
keys, None as values.
*if batch_type is "each", then the container is irrelevant and can
be safely ignored!*
batch_type(str, int): the type of the batch processing for your
process function. Either "all" or "each". `all` means that all
argument data will be passed into to your function at once,
`each` means that each argument datum will be passed in
individually
Example:
>>> import imagepypelines as ip
>>>
>>> @ip.blockify( kwargs=dict(value=10) )
>>> def add_value(datum, value):
... return datum + value
>>>
>>> type(add_value)
<class 'FuncBlock'>
"""
def _blockify(func):
return FuncBlock(func,
kwargs,
batch_type=batch_type,
types=types,
shapes=shapes,
containers=containers)
return _blockify
```
#### File: jefftools/imagepypelines/__init__.py
```python
import time
from uuid import uuid4
import os
import pkg_resources
import sys
init_time = time.time()
"""unix time initiatization time for this imagepypelines session"""
session_uuid = uuid4().hex
"""a universally unique id for this imagepypelines session"""
# ----------- Setup the Root ImagePypelines Logger ---------------
# import the master logger
from .Logger import MASTER_LOGGER, get_logger, ImagepypelinesLogger
# import master logger convienence function
# NOTE: import logging constants our users can modify to change color behavior
# ---------- import imagepypelines ----------
from .version_info import *
from .core import *
# ---------- import plugins ----------
from collections import OrderedDict
LOADED_PLUGINS = OrderedDict()
"""module level OrderedDict that contains the all loaded modules in the order in
which they were loaded"""
# define a function to load all the plugins so it's easier to keep the namespace
# clean
def load_plugins():
"""Load all installed plugins to the imagepypelines namespace"""
# load in all installed python packages with our plugin entry_point
required_objects = []
plugins = {
entry_point.name: entry_point.load()
for entry_point
in pkg_resources.iter_entry_points('imagepypelines.plugins')
}
for plugin_name in sorted( plugins.keys() ):
ip_module = sys.modules[__name__]
plugin_module = plugins[plugin_name]
# check that the module has the required objects
for req in required_objects:
if not hasattr(plugin_module, req):
raise PluginError(
"Plugin '%s' doesn't meet requirements" % plugin_name)
elif not callable( getattr(plugin_module, req) ):
raise PluginError(
"Plugin '%s' doesn't meet requirements" % plugin_name)
MASTER_LOGGER.warning(
"loading plugin '{0}' - it will be available as imagepypelines.{0}"\
.format(plugin_name))
# add the plugin to the current namespace
setattr(ip_module, plugin_name, plugin_module)
# add the plugin name to a global list for debugging
LOADED_PLUGINS[plugin_name] = plugin_module
# load all of our plugins
load_plugins()
# define a function to check if a plugin is loaded
def require(plugin_name):
"""check to make sure the given plugin is loaded and raise an error if it
is not in the imagepypelines namespace
"""
import sys
ip_module = sys.modules[__name__]
if not hasattr(ip_module, plugin_name):
raise PluginError('unable to find required plugin "%s"' % plugin_name)
# ---------- delete namespace pollutants ----------
del pkg_resources, os, uuid4, time, OrderedDict, sys
``` |
{
"source": "jmaggio14/physops",
"score": 3
} |
#### File: jmaggio14/physops/definitions.py
```python
import numpy as np
import physops
def one(x_grid,y_grid):
wavefront = np.ones(x_grid.shape)
return wavefront
def cyl(x_grid,y_grid,diameter):
radii = np.sqrt(x_grid**2 + y_grid**2)
wavefront = np.zeros(x_grid.shape)
wavefront[np.where(radii < diameter/2)] = 1
return wavefront
#
# def rect(x_grid,y_grid,width,height):
# tmp = np.zeros(x_grid.shape)
```
#### File: jmaggio14/physops/Wavefront.py
```python
import numpy as np
import physops
import scipy
class Wavefront(np.ndarray):
def __new__(cls,definition=None,title="wavefront",wavelength=None,size=(1000,1000),x_range=None,y_range=None,definition_kwargs={}):
if x_range == None:
x_range = np.linspace(-size[0]//2,size[0]//2,size[0])
if y_range == None:
y_range = np.linspace(-size[1]//2,size[1]//2,size[1])
if wavelength == None:
wavelength = physops.DEFAULT_WAVELENGTH
x_grid,y_grid = np.meshgrid(x_range,y_range)
wavefront = np.asarray(definition(x_grid,y_grid,**definition_kwargs),np.complex128)
obj = np.asarray(wavefront).view(cls)
obj.x_grid = x_grid
obj.y_grid = y_grid
obj.wavelength = wavelength
obj.definition = definition
obj.definition_kwargs = definition_kwargs
obj.title = title
return obj
def __array_finalize__(self,obj):
if obj is None:
return None
self.x_grid = getattr(obj,"x_grid",None)
self.y_grid = getattr(obj,"y_grid",None)
self.wavelength = getattr(obj,"wavelength",None)
self.definition = getattr(obj,"definition",None)
self.definition_kwargs = getattr(obj,"definition_kwargs",None)
self.title = getattr(obj,"title",None)
def __array_wrap__(self,out_arr,context=None):
return np.ndarray.__array_wrap__(self,out_arr,context)
def __gt__(self,z):
"""
overloaded operator to propagate an operation with rayleigh-sommerfeld
impulse response
"""
if isinstance(z,(int,float)):
impulse_response = physops.rayleighSommerfeld(x_grid=self.x_grid,y_grid=self.y_grid,z=z,wavelength=self.wavelength)
new_wavefront = scipy.signal.convolve2d(self,impulse_response,mode="same")
new_wavefront.title += ">{0}".format(z)
return new_wavefront
def __rrshift__(self,z):
if isinstance(z,(int,float)):
impulse_response = physops.fresnel(x_grid=x_grid,y_grid=y_grid,z=z,wavelength=self.wavelength)
new_wavefront = scipy.signal.convolve2d(self,impulse_response,mode="same")
new_wavefront.title += ">>{0}".format(z)
return new_wavefront
# def __or__(self,z):
#
``` |
{
"source": "jmagic/magic_dxlink_configurator",
"score": 3
} |
#### File: magic_dxlink_configurator/scripts/auto_update.py
```python
from requests import get as r_get
from distutils.version import StrictVersion
from threading import Thread
from pydispatch import dispatcher
class AutoUpdate(Thread):
def __init__(self, server_url="http://magicsoftware.ornear.com", program_name="", program_version=""):
Thread.__init__(self)
self.server_url = server_url
self.program_name = program_name # "Magic Amino Configurator"
self.program_version = program_version # "v0.0.3"
def run(self):
"""Checks on line for updates"""
try:
update_url = f'{self.server_url}/updates/{"_".join(self.program_name.lower().split())}/current_version'
print('update url: ', update_url)
webpage = r_get(update_url)
# webpage.raise_for_status()
online_version = webpage.text
if StrictVersion(online_version[1:]) > StrictVersion(self.program_version[1:]):
# print('requires update')
url = f'{self.server_url}/updates/{"_".join(self.program_name.lower().split())}'
self.send(message='update', url=url)
else:
# print('no update')
# self.send(message='no update')
return
except Exception as error:
print("Error in update_check: ", error)
return
def send(self, message, url=None):
"""Sends the result of the check"""
dispatcher.send(signal="Software Update", message=message, url=url)
def main():
test = AutoUpdate(server_url="http://magicsoftware.ornear.com", program_name="Magic DXLink Configurator", program_version="v0.0.1")
test.start()
test.join()
if __name__ == '__main__':
main()
```
#### File: magic_dxlink_configurator/scripts/telnet_class.py
```python
import telnetlib
from pydispatch import dispatcher
from threading import Thread
import datetime
import subprocess
# import serial
# import io
import time
from dataclasses import dataclass, field
@dataclass
class MSEValues:
report_time: datetime = datetime.datetime.now()
mse: list = field(default_factory=list)
obj: object = None
class Telnetjobs(Thread):
def __init__(self, parent, queue):
Thread.__init__(self)
self.queue = queue
self.parent = parent
def run(self):
while True:
# gets the job from the queue
job = self.queue.get()
getattr(self, job[0])(job)
# send a signal to the queue that the job is done
self.queue.task_done()
########################################################################
def establish_telnet(self, ip_address):
"""Creates the telnet instance"""
telnet_session = telnetlib.Telnet(ip_address, 23, 5)
telnet_session.set_option_negotiation_callback(self.call_back)
return telnet_session
def call_back(self, sock, cmd, opt):
""" Turns on server side echoing"""
if opt == telnetlib.ECHO and cmd in (telnetlib.WILL, telnetlib.WONT):
sock.sendall(telnetlib.IAC + telnetlib.DO + telnetlib.ECHO)
def get_config_info(self, job):
"""Gets serial number, firmware from device"""
obj = job[1]
self.set_status(obj, "Connecting")
try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'Welcome to', int(job[2]))
intro = telnet_session.read_until(b'>', int(job[2])).split()
obj.model = intro[0].decode()
obj.firmware = intro[1].decode()
telnet_session.write(b'get sn \r')
telnet_session.read_until(b'Number:', int(job[2]))
obj.serial = telnet_session.read_until(b'>', int(job[2])).split()[0].decode()
telnet_session.write(b'get device \r')
telnet_session.read_until(b'Value:', int(job[2]))
obj.device = telnet_session.read_until(b'>', int(job[2])).split()[0].decode()
telnet_session.write(b'get ip \r')
telnet_session.read_until(b'HostName:', int(job[2]))
ip_host = telnet_session.read_until(b'Type:').decode().split()
if len(ip_host) == 1:
obj.hostname = ''
else:
obj.hostname = ' '.join(ip_host[:-1])
ip_type = telnet_session.read_until(b'IP').split()
if ip_type[0] == b"Static":
obj.ip_type = "s"
if ip_type[0] == b"DHCP":
obj.ip_type = "d"
ip_subnet = telnet_session.read_until(b'Gateway').split()
obj.subnet = ip_subnet[-2].decode()
ip_gateway = telnet_session.read_until(b'MAC').split()
obj.gateway = ip_gateway[-2].decode()
ip_mac = telnet_session.read_until(b'>', int(job[2])).split()
obj.mac_address = ip_mac[1].decode()
self.get_connection(obj, telnet_session, int(job[2]))
telnet_session.write(b'exit\r')
telnet_session.close()
self.set_status(obj, "Success")
except (IOError, Exception) as error:
self.error_processing(obj, error)
def reset_factory(self, job):
"""Sets unit to factory defaults"""
obj = job[1]
self.set_status(obj, "Connecting")
try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', int(job[2]))
telnet_session.write(b'reset factory\r')
telnet_session.read_until(b'>', int(job[2]))
telnet_session.close()
self.set_status(obj, "Success")
except IOError as error:
self.error_processing(obj, error)
def set_watchdog(self, job):
"""Enable or disables watchdog"""
obj = job[1]
enable = job[3]
self.set_status(obj, "Connecting")
try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', int(job[2]))
if enable:
telnet_session.write(b'WD ON\r')
else:
telnet_session.write(b'WD OFF\r')
telnet_session.read_until(b'>', int(job[2]))
telnet_session.write(b'reboot \r')
telnet_session.read_until(b'Rebooting....', int(job[2]))
telnet_session.close()
self.set_status(obj, "Success")
except IOError as error:
self.error_processing(obj, error)
def reboot(self, job):
obj = job[1]
self.set_status(obj, "Connecting")
try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', int(job[2]))
telnet_session.write(b'reboot\r')
telnet_session.read_until(b'Rebooting....', int(job[2]))
telnet_session.close()
self.set_status(obj, "Success")
except Exception as error:
self.error_processing(obj, error)
def set_device_config(self, job):
# print job
obj = job[1]
delay = int(job[2])
setdhcp = job[3]
hostname = job[4]
# ip_org = job[5]
ip_new = job[6]
subnet = job[7]
gateway = job[8]
conn_type = job[9]
master_number = job[10]
master = job[11]
device = job[12]
master_user = job[13]
master_password = job[14]
self.set_status(obj, "Connecting")
try:
if setdhcp:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', delay)
telnet_session.write(b'set ip \r')
telnet_session.read_until(b'Name:', delay)
telnet_session.write(hostname.encode('ascii') + b'\r')
telnet_session.read_until(b'Enter:', delay)
telnet_session.write(b'd\r')
telnet_session.read_until(b'Enter', delay)
telnet_session.write(b'y\r')
telnet_session.read_until(b'>', delay)
else:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', delay)
telnet_session.write(b'set ip \r')
telnet_session.read_until(b'Name:', delay)
telnet_session.write(hostname.encode('ascii') + b'\r')
telnet_session.read_until(b'Enter:', delay)
telnet_session.write(b's\r')
telnet_session.read_until(b'Address:', delay)
telnet_session.write(ip_new.encode('ascii') + b'\r')
telnet_session.read_until(b'Mask:', delay)
telnet_session.write(subnet.encode('ascii') + b'\r')
telnet_session.read_until(b'IP:', delay)
telnet_session.write(gateway.encode('ascii') + b'\r')
telnet_session.read_until(b'Enter ->', delay)
telnet_session.write(b'y\r')
telnet_session.read_until(b'settings.', delay)
telnet_session.read_until(b'>', delay)
if conn_type == "TCP" or conn_type == "UDP":
telnet_session.write(b'set connection\r')
telnet_session.read_until(b'Enter:', delay)
if conn_type == "TCP":
telnet_session.write(b't\r')
else:
telnet_session.write(b'u\r')
telnet_session.read_until(b'URL:', delay)
telnet_session.write(master.encode('ascii') + b'\r')
telnet_session.read_until(b'Port:', delay)
telnet_session.write(b'\r')
telnet_session.read_until(b'User:', delay)
if master_user == '':
telnet_session.write(b'\r')
else:
telnet_session.write(master_user.encode('ascii') + b'\r')
telnet_session.read_until(b'Password:', delay)
if master_password == '':
telnet_session.write(b'\r')
telnet_session.read_until(b'Password:', delay)
telnet_session.write(b'\r')
else:
telnet_session.write(master_password.encode('ascii') + b'\r')
telnet_session.read_until(b'Password:', delay)
telnet_session.write(master_password.encode('ascii') + b'\r')
telnet_session.read_until(b'Enter ->', delay)
telnet_session.write(b'y\r')
telnet_session.read_until(b'written.', delay)
telnet_session.read_until(b'>', delay)
telnet_session.write('set device {}'.format(device).encode('ascii') + b'\r')
telnet_session.read_until(b'device', delay)
telnet_session.read_until(b'>', delay)
telnet_session.write(b'reboot \r')
telnet_session.read_until(b'Rebooting....', delay)
telnet_session.close()
if conn_type == "AUTO":
telnet_session.write(b'set connection\r')
telnet_session.read_until(b'Enter:', delay)
telnet_session.write(b'a\r')
telnet_session.read_until(b'Number:', delay)
telnet_session.write(master_number.encode('ascii') + b'\r')
telnet_session.read_until(b'Port:', delay)
telnet_session.write(b'\r')
telnet_session.read_until(b'User:', delay)
if master_user == '':
telnet_session.write(b'\r')
else:
telnet_session.write(master_user.encode('ascii') + b'\r')
telnet_session.read_until(b'Password:', delay)
if master_password == '':
telnet_session.write(b'\r')
telnet_session.read_until(b'Password:', delay)
telnet_session.write(b'\r')
else:
telnet_session.write(master_password.encode('ascii') + b'\r')
telnet_session.read_until(b'Password:', delay)
telnet_session.write(master_password.encode('ascii') + b'\r')
telnet_session.read_until(b'Enter ->', delay)
telnet_session.write(b'y\r')
telnet_session.read_until(b'written.', delay)
telnet_session.read_until(b'>', delay)
telnet_session.write('set device {}'.format(device).encode('ascii') + b'\r')
telnet_session.read_until(b'device', delay)
telnet_session.read_until(b'>', delay)
telnet_session.write(b'reboot\r')
telnet_session.read_until(b'Rebooting....', delay)
telnet_session.close()
if conn_type == "NDP":
telnet_session.write(b'set connection\r')
telnet_session.read_until(b'Enter:', delay)
telnet_session.write(b'n\r')
telnet_session.read_until(b'Port:', delay)
telnet_session.write(b'\r')
telnet_session.read_until(b'User:', delay)
if master_user == '':
telnet_session.write(b'\r')
else:
telnet_session.write(master_user.encode('ascii') + b'\r')
telnet_session.read_until(b'Password:', delay)
if master_password == '':
telnet_session.write(b'\r')
telnet_session.read_until(b'Password:', delay)
telnet_session.write(b'\r')
else:
telnet_session.write(master_password.encode('ascii') + b'\r')
telnet_session.read_until(b'Password:', delay)
telnet_session.write(master_password.encode('ascii') + b'\r')
telnet_session.read_until(b'Enter ->', delay)
telnet_session.write(b'y\r')
telnet_session.read_until(b'written.', delay)
telnet_session.read_until(b'>', delay)
telnet_session.write('set device {}'.format(device) + b'\r')
telnet_session.read_until(b'device', delay)
telnet_session.read_until(b'>', delay)
telnet_session.write(b'reboot\r')
telnet_session.read_until(b'Rebooting....', delay)
telnet_session.close()
self.set_status(obj, "Success")
except Exception as error:
self.error_processing(obj, error)
def factory_av(self, job):
"""Sets unit audio visual to factory defaults"""
obj = job[1]
self.set_status(obj, "Connecting")
try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', int(job[2]))
self.get_connection(obj, telnet_session, int(job[2]))
command = f"send_command {obj.device}:1:{obj.system},\"\'FACTORYAV\'\""
telnet_session.write(command.encode('ascii') + b'\r')
telnet_session.read_until(b'Sending', int(job[2]))
result_raw = telnet_session.read_until(b'>', int(job[2]))
if result_raw.split()[0] != b'command:':
raise Exception('Command not sent')
telnet_session.write(b'reboot \r')
telnet_session.read_until(b'Rebooting....', int(job[2]))
telnet_session.close()
self.set_status(obj, "Success")
except Exception as error:
self.error_processing(obj, error)
# def get_dipswitch(self, job):
# """Gets the dipswitch values"""
# obj = job[1]
# self.set_status(obj, "Connecting")
# try:
# telnet_session = self.establish_telnet(obj.ip_address)
# telnet_session.read_until(b'>', int(job[2]))
# telnet_session.write(b'dipswitch\r')
# telnet_session.read_until(b'=', int(job[2]))
# result = telnet_session.read_until(b'>', int(job[2]))
# for idx, item in enumerate(result.split()):
# if item == 'ON':
# obj.dipswitch[idx] = 1
# elif item == 'OFF':
# obj.dipswitch[idx] = 0
# else:
# obj.dipswitch[idx] = 2 # error
# self.set_status(obj, "Success")
# except Exception as error:
# self.error_processing(obj, error)
def multiple_send_command(self, job):
"""Sends multiple commands in a single session"""
obj = job[1]
command_list = job[3]
if obj.device == " ":
device = 0
else:
device = obj.device
if obj.system == " ":
system = 0
else:
system = obj.system
self.set_status(obj, "Connecting")
self.notify_send_command_window(obj)
# try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', int(job[2]))
total = len(command_list)
count = 0
error = 0
for command in command_list:
count += 1
output = f"send_command {device}:{command[1]}:{system},\"\'{command[0]}\'\""
telnet_session.write(output.encode('ascii') + b"\r")
result_raw = telnet_session.read_until(b'>', int(job[2]))
if result_raw.split()[0] != b'command:':
dispatcher.send(
signal="send_command result",
sender=((True, 'Sending ' + result_raw.decode()[:-1])))
self.set_status(
obj, ('Sent ' + str(count) + ' of ' + str(total)))
self.notify_send_command_window(obj)
else:
error += 1
dispatcher.send(signal="send_command result",
sender=((False, 'Failed to send command')))
telnet_session.close()
if not error:
self.set_status(obj, 'Success')
self.notify_send_command_window(obj)
else:
self.set_status(obj, 'Failed')
self.notify_send_command_window(obj)
# except Exception as error:
# self.error_processing(obj, error)
# self.notify_send_command_window(obj)
def send_command(self, job):
obj = job[1]
command_sent = job[3]
self.set_status(obj, "Connecting")
# try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', int(job[2]))
# self.get_connection(obj, telnet_session, int(job[2]))
command = command_sent.encode('ascii') + b"\r"
# print command
telnet_session.write(command)
telnet_session.read_until(b'Sending', int(job[2]))
result_raw = telnet_session.read_until(b'>', int(job[2]))
# print result_raw.split()
if result_raw.split()[0] != b'command:':
raise Exception('Command not sent')
else:
dispatcher.send(signal="send_command result",
sender=(('Sending ' + str(result_raw)[:-1])))
telnet_session.close()
self.set_status(obj, "Success")
self.notify_send_command_window(obj)
# except Exception as error:
# self.error_processing(obj, error)
def turn_on_leds(self, job):
"""Turns on LEDs"""
obj = job[1]
self.set_status(obj, "Connecting")
try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', int(job[2]))
telnet_session.write(b'led on \r')
telnet_session.read_until(b'ON', int(job[2]))
telnet_session.close()
self.set_status(obj, "Success")
except Exception as error:
self.error_processing(obj, error)
def turn_off_leds(self, job):
"""Turns off leds"""
obj = job[1]
self.set_status(obj, "Connecting")
try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', int(job[2]))
telnet_session.write(b'led off \r')
telnet_session.read_until(b'OFF', int(job[2]))
telnet_session.close()
self.set_status(obj, "Success")
except Exception as error:
self.error_processing(obj, error)
def get_dxlink_mse(self, job):
"""Gathers MSE values"""
# print('in get dxlink mse')
obj = job[1]
self.set_status(obj, "Connecting")
try:
telnet_session = self.establish_telnet(obj.ip_address)
telnet_session.read_until(b'>', 2)
# telnet_session.read_very_eager()
self.set_status(obj, "MSE")
while obj.mac_address in self.parent.mse_active_list:
my_values = MSEValues(obj=obj)
telnet_session.write(b'show vs100 stats \r')
telnet_session.read_until(b'MSE(db)')
stats = telnet_session.read_until(b'VS100').split()
for i in range(len(stats)):
if stats[i] == b"ChA:":
my_values.mse.append(int(stats[i + 1][:-1].decode()))
my_values.mse.append(int(stats[i + 3][:-1].decode()))
my_values.mse.append(int(stats[i + 5][:-1].decode()))
my_values.mse.append(int(stats[i + 7].decode()))
if my_values.mse != []:
dispatcher.send(signal="Incoming MSE", data=my_values)
telnet_session.read_until(b'>', 2)
self.set_status(obj, "Success")
except Exception as error:
time.sleep(2) # wait for gui to start
# print('Telnet MSE error: ', error)
dispatcher.send(signal="MSE error", sender=obj.mac_address)
self.set_status(obj, "Failed")
def ping(self, job):
"""Ping devices constantly for troubleshooting"""
obj = job[1]
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
ping = subprocess.Popen(['ping', obj.ip_address, '-t'], shell=False,
stdout=subprocess.PIPE, startupinfo=startupinfo)
while self.parent.ping_active:
for line in iter(ping.stdout.readline, ''):
result = line.rstrip()
if len(result) < 10:
continue
if result == '':
continue
elif result == '\n':
continue
elif result[:7] == 'Pinging':
continue
elif result.split()[-1] == 'unreachable.' or result == 'Request timed out.':
success = 'No'
ms_delay = "N/A"
data = (obj, [datetime.datetime.now(), ms_delay, success])
if self.parent.ping_active:
dispatcher.send(signal="Incoming Ping", sender=data)
elif result.split()[-1][:3] == 'TTL':
temp = result.split()[-2]
ms_delay = ''.join([str(s) for s in temp if s.isdigit()])
success = 'Yes'
data = (obj, [datetime.datetime.now(), ms_delay, success])
if self.parent.ping_active:
dispatcher.send(signal="Incoming Ping", sender=data)
else:
success = 'No'
ms_delay = "N/A"
data = (obj, [datetime.datetime.now(), ms_delay, success])
if self.parent.ping_active:
dispatcher.send(signal="Incoming Ping", sender=data)
if not self.parent.ping_active:
break
ping.kill()
def get_connection(self, obj, session, timeout):
""" Function to get connection information """
session.write(b'get connection \r')
session.read_until(b'Mode:', timeout)
connection_info = session.read_until(b'>', timeout).split()
if connection_info[0] == b'NDP' or connection_info[0] == b'AUTO':
if connection_info[7] == b'(n/a)' or connection_info[3] == b'(not':
obj.master = 'not connected'
obj.system = '0'
else:
obj.master = connection_info[6].decode()
obj.system = connection_info[3].decode()
if connection_info[0] == b'TCP' or connection_info[0] == b'UDP':
if connection_info[8] == b'(n/a)':
obj.master = 'not connected'
obj.system = '0'
else:
obj.master = connection_info[7].decode()
obj.system = connection_info[4].decode()
def set_status(self, obj, status):
"""Updates progress in main"""
data = (obj, status)
dispatcher.send(signal="Status Update", sender=data)
def notify_send_command_window(self, obj):
"""updates send_command window"""
dispatcher.send(signal="Update Window", sender=obj)
def error_processing(self, obj, error):
"""Send notification of error to main"""
if str(error) == 'Not an AMX device':
data = (obj, 'Warning, not a recognized dxlink device')
else:
data = (obj, str(error))
dispatcher.send(signal="Collect Errors", sender=data)
``` |
{
"source": "jmagine/rf-selection",
"score": 2
} |
#### File: rf-selection/rf/spec_utils.py
```python
import matplotlib.pyplot as plt
import numpy as np
import struct
import time
from scipy import signal
'''[Global vars]------------------------------------------------------------'''
FILE_IQ_SAMP = 'rx.dat'
#FILE_IQ_SAMP = 'cap_30000000_2415000000.0_48.0.dat'
FREQ_SAMP = 25e6
N_SAMP = 3000
PLOT = True
t_start = time.time()
'''[read_binary]---------------------------------------------------------------
Read binary IQ samples from file
filename - path to file
n_samples - number of samples to read
return - array containing complex IQ samples
----------------------------------------------------------------------------'''
def read_binary(filename, n_samples):
print('[%f][rb] start' % (time.time() - t_start))
s = np.fromfile(filename, count=n_samples, dtype=np.complex64)
print('[%f][rb] complete' % (time.time() - t_start))
return s
'''[spectrogram]---------------------------------------------------------------
Return a spectrogram of IQ samples
samples - IQ samples
fs - sampling frequency
return - frequencies, times, power array indexed over freqs and times
----------------------------------------------------------------------------'''
def spectrogram(samples, fs):
print('[%f][sg] start' % (time.time() - t_start))
f,t,sxx = signal.spectrogram(samples, fs=fs, return_onesided=False)
print('[%f][sg] complete' % (time.time() - t_start))
return f, t, sxx
'''[analyze]-------------------------------------------------------------------
Analysis function for spectrum
f - frequencies
t - times
sxx - powers indexed by freqs and times
----------------------------------------------------------------------------'''
def analyze(f, t, sxx, plot):
print('[%f][a ] start' % (time.time() - t_start))
sxx_binary = sxx.copy()
sxx_max = sxx.copy()
thresh = np.percentile(sxx, 95)
print(thresh)
#find min/max values in each time instance
print(np.argmax(sxx, axis=0))
print(np.max(sxx, axis=0))
print(np.argmin(sxx, axis=0))
print(np.min(sxx, axis=0))
#TODO redundant
#determine min and max freqs for each time step
for f_i in range(len(sxx)):
max_val = -1e9
max_t = -1
for t_i in range(len(sxx[f_i])):
if sxx[f_i][t_i] > max_val:
max_val = sxx[f_i][t_i]
max_t = t[t_i]
if max_val > thresh:
print("f: %E max_t: %E max_val: %E" % (f[f_i], max_t, max_val))
for i in range(len(sxx[f_i])):
sxx_binary[f_i][i] = 1
for i in range(len(sxx[f_i])):
sxx_max[f_i][i] = max_val
print('[%f][a ] complete' % (time.time() - t_start))
#plot spectrogram
if plot:
plt.figure()
plt.pcolormesh(np.fft.fftshift(f), t, np.transpose(np.fft.fftshift(sxx, axes=0)))
plt.ylabel("Time")
plt.xlabel("Freq")
plt.figure()
plt.pcolormesh(np.fft.fftshift(f), t, np.transpose(np.fft.fftshift(sxx_binary, axes=0)))
plt.ylabel("Time")
plt.xlabel("Freq")
plt.figure()
plt.pcolormesh(np.fft.fftshift(f), t, np.transpose(np.fft.fftshift(sxx_max, axes=0)))
plt.ylabel("Time")
plt.xlabel("Freq")
plt.show()
s = read_binary(FILE_IQ_SAMP, N_SAMP)
#TODO add in a filter step where only blocks of samples are returned?
#s = filter_samples(s)
f, t, sxx = spectrogram(s, FREQ_SAMP)
analyze(f, t, sxx, PLOT)
```
#### File: rf-selection/sim/env.py
```python
import math
import numpy as np
import random
from scipy.spatial import Voronoi, voronoi_plot_2d, KDTree
import sys
import time
import drone
import utils
import vis
#TODO keep track of where broadcasts are occuring
#TODO radio propagation model
#TODO point of interest model
BANDWIDTH = 1.0
class env():
def __init__(self, n_drones, p_bounds, M, F, v_max):
self.p_bounds = p_bounds
self.n_drones = n_drones
self.M = M
self.F = F
self.v_max = v_max
self.drn = []
#self.poi = []
#self.poi_active = []
self.tx = {}
self.bs = []
self.t = 0
def setup(self):
#self.bs = [bs.base_station([0,0])] #set up for multiple base stations in future work
#generate or load in situation, including drone positions, pois, freqs
'''
for i in range(self.n_drones):
x = random.uniform(self.p_bounds[0][0], self.p_bounds[0][1])
y = random.uniform(self.p_bounds[1][0], self.p_bounds[1][1])
self.drn.append(drone.drone(i, [x, y], 1))
#self.g.add_node(len(self.bs) + i, p=self.drn[i].pos)
'''
#for i in range(N_POI):
# self.poi.append(poi.poi([random.uniform(self.p_bounds[0][0], self.p_bounds[0][1]), random.uniform(self.p_bounds[1][0], self.p_bounds[1][1])], random.randint(0, 500), 500))
#sort pois by start time
#self.poi.sort(key=lambda x: x.t_start)
#random.seed(1)
self.gt = np.zeros((self.n_drones, 2))
'''
self.gt[0][0] = -200
self.gt[0][1] = -200
self.gt[1][0] = 100
self.gt[1][1] = -100
self.gt[2][0] = -100
self.gt[2][1] = 100
self.gt[3][0] = 100
self.gt[3][1] = 100
'''
'''
for i in range(self.n_drones):
#self.gt[i][0] = random.uniform(self.p_bounds[0][0], self.p_bounds[0][1])
#self.gt[i][1] = random.uniform(self.p_bounds[1][0], self.p_bounds[1][1])
self.gt[i][0] = np.clip(random.gauss(0, 150), self.p_bounds[0][0], self.p_bounds[0][1])
self.gt[i][1] = np.clip(random.gauss(0, 150), self.p_bounds[1][0], self.p_bounds[1][1])
'''
#line
self.gt[0][0] = 400
self.gt[0][1] = -400
self.gt[1][0] = 400
self.gt[1][1] = -300
self.gt[2][0] = 400
self.gt[2][1] = -200
self.gt[3][0] = 400
self.gt[3][1] = 400
#square
self.gt[4][0] = -400
self.gt[4][1] = 400
self.gt[5][0] = -400
self.gt[5][1] = 300
self.gt[6][0] = -300
self.gt[6][1] = 300
self.gt[7][0] = -300
self.gt[7][1] = 400
#for k in range(self.n_drones):
# print("\\addplot[color=green,mark=square] coordinates{(%.2f,%.2f)};" % (self.gt[k][0], self.gt[k][1]))
#'''
#drone trajectory init
self.init_q = np.zeros((self.n_drones, self.M, 2))
self.init_p = np.zeros((self.n_drones, self.M))
self.init_q[0][0][0] = -200
self.init_q[0][0][1] = -300
self.init_q[1][0][0] = -200
self.init_q[1][0][1] = -275
self.init_q[2][0][0] = -200
self.init_q[2][0][1] = -250
self.init_q[3][0][0] = -200
self.init_q[3][0][1] = -225
self.init_q[4][0][0] = -400
self.init_q[4][0][1] = -200
self.init_q[5][0][0] = -375
self.init_q[5][0][1] = -200
self.init_q[6][0][0] = -325
self.init_q[6][0][1] = -200
self.init_q[7][0][0] = -350
self.init_q[7][0][1] = -200
'''
self.init_q[0][0][0] = 400
self.init_q[0][0][1] = -400
self.init_q[1][0][0] = 300
self.init_q[1][0][1] = -400
self.init_q[2][0][0] = 200
self.init_q[2][0][1] = -400
self.init_q[3][0][0] = 100
self.init_q[3][0][1] = -400
self.init_q[4][0][0] = -400
self.init_q[4][0][1] = -400
self.init_q[5][0][0] = -300
self.init_q[5][0][1] = -400
self.init_q[6][0][0] = -200
self.init_q[6][0][1] = -400
self.init_q[7][0][0] = -100
self.init_q[7][0][1] = -400
'''
for i in range(self.n_drones):
#src = [random.uniform(self.p_bounds[0][0], self.p_bounds[0][1]),
# random.uniform(self.p_bounds[0][0], self.p_bounds[0][1])]
src = [self.init_q[i][0][0], self.init_q[i][0][1]]
#dest = [random.uniform(self.p_bounds[0][0], self.p_bounds[0][1]), random.uniform(self.p_bounds[0][0], self.p_bounds[0][1])]
dest = [self.gt[i][0], self.gt[i][1]]
self.traj_line(i, src, dest)
'''
self.init_q[i][0][0] = random.uniform(self.p_bounds[0][0], self.p_bounds[0][1])
self.init_q[i][0][1] = random.uniform(self.p_bounds[1][0], self.p_bounds[1][1])
dist = utils.dist(self.gt[i], self.init_q[i][0])
x_step = (self.gt[i][0] - self.init_q[i][0][0]) * self.v_max / dist
y_step = (self.gt[i][1] - self.init_q[i][0][1]) * self.v_max / dist
for n in range(self.M):
if n < dist / self.v_max:
self.init_q[i][n][0] = self.init_q[i][0][0] + x_step * n
self.init_q[i][n][1] = self.init_q[i][0][1] + y_step * n
else:
self.init_q[i][n][0] = self.gt[i][0]
self.init_q[i][n][1] = self.gt[i][1]
'''
#drone power init
for n in range(self.M):
for k in range(self.n_drones):
self.init_p[k][n] = 100
'''
dist = utils.dist(self.init_q[k][n], self.gt[k])
if dist > 0:
self.init_p[k][n] = min(1, 1.0 / dist)
else:
self.init_p[k][n] = 1
'''
#print(self.init_p, self.init_q)
print(self.gt)
def traj_line(self, i, src, dest):
#self.init_q[i][0][0] = src[0]
#self.init_q[i][0][1] = src[1]
dist = utils.dist(self.gt[i], self.init_q[i][0])
x_step = (dest[0] - src[0]) * self.v_max / dist
y_step = (dest[1] - src[1]) * self.v_max / dist
for n in range(self.M):
if n < dist / self.v_max:
self.init_q[i][n][0] = src[0] + x_step * n
self.init_q[i][n][1] = src[1] + y_step * n
else:
self.init_q[i][n][0] = dest[0]
self.init_q[i][n][1] = dest[1]
def tick(self):
t_start = time.time()
#update positions of all drones
for d in self.drn:
d.tick()
#every few ticks, print heartbeat
if self.t % 5 == 0:
print('[tick] t: %.3f' % (time.time() - t_start))
self.t += 1
'''
#evaluate capacity of network
#evaluate data sent through network
#check for poi expiration if applicable
#check for any new pois
while len(self.poi) > 0 and self.t >= self.poi[0].t_start:
#print(self.poi[0].t_start, self.t)
self.poi_active.append(self.poi[0])
del self.poi[0] #TODO replace with something more efficient than this if slow
#run assignment and network topology generation on new poi set
#every few ticks, update voronoi partitioning
if self.t % 5 == 0:
vor_points = [d.pos for d in self.drn] + [b.pos for b in self.bs]
vor, self.centroids = self.cvt(vor_points)
self.centroid_kdt = KDTree(self.centroids)
#plot the voronoi partitioning
for d in self.drn:
dist, idx = self.centroid_kdt.query(d.pos)
d.set_pos_ref(self.centroid_kdt.data[idx])
plotter.plot_vor(self.drn, self.centroids, vor)
print('[tick] t: %.3f' % (time.time() - t_start))
#print(self.poi)
#print(self.poi_active)
'''
#TODO move to utils
def cvt(self, vor_points):
t_start = time.time()
#mirror the points across bounds so the bounds become voronoi edges
points_center = vor_points
points_left = np.copy(points_center)
points_right = np.copy(points_center)
points_down = np.copy(points_center)
points_up = np.copy(points_center)
points_left[:, 0] = self.p_bounds[0][0] - (points_left[:, 0] - self.p_bounds[0][0])
points_right[:, 0] = self.p_bounds[0][1] + (self.p_bounds[0][1] - points_right[:, 0])
points_down[:, 1] = self.p_bounds[1][0] - (points_down[:, 1] - self.p_bounds[1][0])
points_up[:, 1] = self.p_bounds[1][1] + (self.p_bounds[1][1] - points_up[:, 1])
points = np.append(points_center,
np.append(np.append(points_left,
points_right,
axis=0),
np.append(points_down,
points_up,
axis=0),
axis=0),
axis=0)
vor = Voronoi(points)
# Filter regions and select corresponding points
regions = []
points_to_filter = [] # we'll need to gather points too
ind = np.arange(np.array(points).shape[0])
ind = np.expand_dims(ind, axis= 1)
for i,region in enumerate(vor.regions): # enumerate the regions
if not region: # nicer to skip the empty region altogether
continue
flag = True
for index in region:
if index == -1:
flag = False
break
else:
x = vor.vertices[index, 0]
y = vor.vertices[index, 1]
if not(self.p_bounds[0][0] - 0.01 <= x and x <= self.p_bounds[0][1] + 0.01 and
self.p_bounds[1][0] - 0.01 <= y and y <= self.p_bounds[1][1] + 0.01):
#print("out of bound")
flag = False
break
if flag:
regions.append(region)
# find the point which lies inside
points_to_filter.append(vor.points[vor.point_region == i][0,:])
vor.filtered_points = np.array([vor.point_region[:vor.npoints//5]])
vor.filtered_regions = regions
centroids = []
for region in vor.filtered_regions:
vertices = vor.vertices[region + [region[0]], :]
A = 0
C_x = 0
C_y = 0
for i in range(0, len(vertices) - 1):
s = (vertices[i, 0] * vertices[i + 1, 1] - vertices[i + 1, 0] * vertices[i, 1])
A += s
C_x += (vertices[i, 0] + vertices[i + 1, 0]) * s
C_y += (vertices[i, 1] + vertices[i + 1, 1]) * s
A *= 0.5
C_x *= (1.0 / (6.0 * A))
C_y *= (1.0 / (6.0 * A))
centroids.append([C_x, C_y])
print("t: %d cvt t: %.3f" % (self.t, time.time() - t_start)) #, centroids, len(vor.filtered_regions))
return vor, np.array(centroids)
#print diagnostics for current situation
def print(self):
print("[env] drones: %d" % (len(self.drn)))
def capacity(self, freq, pos_tx, pos_rx, pw_tx=20, noise=-90):
noise_total = noise
pw_rx = utils.power_fspl(pw_tx, freq, utils.dist(pos_tx, pos_rx))
#cross interference
if tuple(freq) in self.tx:
for pos_noise in self.tx[tuple(freq)]:
noise_total += utils.power_fspl(pw_tx, freq, utils.dist(pos_noise, pos_rx))
return BANDWIDTH * math.log2(1 + pw_rx / noise_total)
```
#### File: rf-selection/sim/pid.py
```python
import time
import matplotlib.animation as anim
import matplotlib.pyplot as plt
import threading
import math
import numpy as np
'''[Global Vars]------------------------------------------------------------'''
ORIGIN_X = 0.0
ORIGIN_Y = 0.0
C_R = 10
#plt.autoscale(enable=True, axis="both")
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
scat = ax.scatter([], [])
ax.set_xlim([-1 * C_R - 1, C_R + 1])
ax.set_ylim([-1 * C_R - 1, C_R + 1])
scat.set_facecolors(['g', 'r'])
scat.set_sizes([31, 31])
prev_time = time.time()
vel = np.array([0.0, 0.0])
errors = [0, 1]
error_plot, = ax2.plot([i for i in range(len(errors))], errors, color="g")
class drone():
def __init__(self, p, vel):
self.pos = np.array(p)
self.v = np.array(vel)
self.prev_error = np.zeros((2))
self.integral = np.zeros((2))
self.dt = 0.01
self.kp = 0.8 * 2.0
self.ki = 0
self.kd = 0
#self.ki = 2.0 * self.kp / 2.0
#self.kd = self.kp * 2.0 / 8.0
#self.ki = 2 * self.kp / 1.0
#self.kd = self.kp * 0.01 / 8
def callback(self):
pass
def run(self, ref_pos, vx=None, vy=None):
self.pos += self.v
#print(self.integral)
if vx:
self.v[0] = vx
if vy:
self.v[1] = vy
#compute PID output
error = ref_pos - self.pos
self.integral = self.integral * 0.99 + error * self.dt
'''
for i in range(2):
if self.integral[i] > 1:
self.integral[i] = 1
elif self.integral[i] < -1:
self.integral[i] = -1
'''
#print(self.integral)
derivative = (error - self.prev_error) / self.dt
for i in range(2):
if derivative[i] > 0.1:
derivative[i] = 0.1
elif derivative[i] < -0.1:
derivative[i] = -0.1
self.prev_error = error
pid_output = (self.kp * error) + (self.ki * self.integral) + (self.kd * derivative)
print(self.pos, pid_output, self.kp * error, self.ki * self.integral, self.kd * derivative)
#print(error[0])
#errors.append(error[0])
return pid_output
d = drone([ORIGIN_X + C_R, ORIGIN_Y], [0.0, 0.0])
def dist(x1, y1, x2, y2):
return ((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))**(1/2)
def dist(p1, p2):
assert len(p1) == len(p2)
dims = len(p1)
total = 0
for i in range(dims):
total += (p2[i] - p1[i]) * (p2[i] - p1[i])
return (total)**(1/2)
#def pid_angle(x, y, ref_x, ref_y, d):
# return math.atan(-1 * (C_R - dist(x, y, ORIGIN_X, ORIGIN_Y)) / d) + math.atan((y - ORIGIN_Y) / (x - ORIGIN_X)) + math.pi / 2
def ref(t):
return np.array([ORIGIN_X + C_R * math.cos(t), ORIGIN_Y + C_R * math.sin(t)])
def update(i):
global prev_time, vel
#update reference point position
curr_time = time.time()
ref_point = ref(i / 25.0)
#ref_x = ref_point[0]
#ref_y = ref_point[1]
out = d.run(ref_point)
for i in range(2):
if out[i] > 10 or out[i] < -10:
out = out * 10 / out[i]
#print(d.pos, out)
d.v = out
while time.time() - prev_time < d.dt:
time.sleep(d.dt / 10)
prev_time = time.time()
#print the desired angle of drone
#pid_ang = pid_angle(d.x, d.y, ref_point[0], ref_point[1], 0.05)
#print(math.cos(pid_ang), math.sin(pid_ang))
#d.run(math.cos(pid_ang), math.sin(pid_ang))
scat.set_offsets([[ref_point[0], ref_point[1]], [d.pos[0], d.pos[1]]])
errors.append(dist(ref_point, d.pos))
error_plot.set_xdata([i for i in range(len(errors))])
error_plot.set_ydata(errors)
ax2.set_xlim([-1, len(errors) + 1])
ax2.set_ylim([1, min(errors)])
def main():
d = drone(ORIGIN_X + C_R, ORIGIN_Y, 1)
if __name__ == '__main__':
#main()
a = anim.FuncAnimation(fig, update, range(1000), interval=1, blit=False, repeat=False)
plt.show()
```
#### File: rf-selection/sim/poi.py
```python
import numpy as np
class poi():
def __init__(self, p, t_start, t_window):
self.pos = np.array(p)
self.t_start = t_start
self.t_window = t_window
self.data = 100
``` |
{
"source": "jmagnusson/nav-requests",
"score": 3
} |
#### File: nav/wrappers/json.py
```python
import collections
import datetime
import decimal
from json import JSONDecodeError # noqa
import json as json_impl
class JsonExtendedEncoder(json_impl.JSONEncoder):
"""
Needed for the json module to understand what to do with the types we
use in this project.
"""
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(
obj,
(
datetime.time,
datetime.date,
datetime.datetime,
),
):
return str(obj)
# Handle case where zeep returns the undocumented _raw_elements key
# which is of type `collections.deque`. As the name suggestions this
# object contains raw elements, which json will be unable to process,
# therefore we iterate over the object and return a string
# representation of it.
elif isinstance(obj, collections.deque):
return str([x for x in obj])
return super().default(obj)
def dump(*args, **kw):
if 'cls' not in kw:
kw['cls'] = JsonExtendedEncoder
return json_impl.dump(*args, **kw)
def dumps(*args, **kw):
if 'cls' not in kw:
kw['cls'] = JsonExtendedEncoder
return json_impl.dumps(*args, **kw)
load = json_impl.load
loads = json_impl.loads
```
#### File: nav-requests/tests/test_base.py
```python
import os
import re
import subprocess as subp
import lxml.etree
import pytest
import responses
import nav
BASE_URL = 'http://navtest:7080/DynamicsNAV/WS/CRONUS-Company-Ltd/'
PAGE_READMULTIPLE_RESPONSE_DATA = """
<Soap:Envelope xmlns:Soap="http://schemas.xmlsoap.org/soap/envelope/">
<Soap:Body>
<ReadMultiple_Result xmlns="urn:microsoft-dynamics-schemas/page/customerlist">
<ReadMultiple_Result>
<CustomerList>
<No>123</No>
<Name>Customer #1</Name>
</CustomerList>
<CustomerList>
<No>456</No>
<Name>Customer #2</Name>
</CustomerList>
</ReadMultiple_Result>
</ReadMultiple_Result>
</Soap:Body>
</Soap:Envelope>
"""
PAGE_CREATEMULTIPLE_RESPONSE_DATA = """
<Soap:Envelope xmlns:Soap="http://schemas.xmlsoap.org/soap/envelope/">
<Soap:Body>
<CreateMultiple_Result xmlns="urn:microsoft-dynamics-schemas/page/customerlist">
<CustomerList_List>
<CustomerList>
<No>234567</No>
<Name>Happy Customer Inc</Name>
</CustomerList>
<CustomerList>
<No>345678</No>
<Name>Cool Guy Inc</Name>
</CustomerList>
</CustomerList_List>
</CreateMultiple_Result>
</Soap:Body>
</Soap:Envelope>
"""
CODEUNIT_RESPONSE_DATA = """
<Soap:Envelope xmlns:Soap="http://schemas.xmlsoap.org/soap/envelope/">
<Soap:Body>
<HelloWorld_Result xmlns="urn:microsoft-dynamics-schemas/codeunit/IntegrationEntry">
<return_value>true</return_value>
<oGreeting>Test greeting</oGreeting>
</HelloWorld_Result>
</Soap:Body>
</Soap:Envelope>
"""
def dummy_request_callback(request):
if '/Codeunit' in request.url:
data = CODEUNIT_RESPONSE_DATA
elif '/Page/' in request.url:
if nav.CreateMultiple in request.headers['SOAPAction']:
data = PAGE_CREATEMULTIPLE_RESPONSE_DATA
else:
data = PAGE_READMULTIPLE_RESPONSE_DATA
else:
raise RuntimeError
return (200, {}, data)
@pytest.fixture
def add_responses():
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
rsps.add(
responses.GET,
re.compile(BASE_URL + 'Page/CustomerList'),
body=open(os.path.join(
os.path.dirname(__file__),
'wsdl/page-CustomerList.xml',
)).read(),
content_type='application/xml',
)
rsps.add(
responses.GET,
re.compile(BASE_URL + 'Codeunit/IntegrationEntry'),
body=open(os.path.join(
os.path.dirname(__file__),
'wsdl/codeunit-IntegrationEntry.xml',
)).read(),
content_type='application/xml',
)
rsps.add_callback(
responses.POST,
re.compile(BASE_URL + '(Page|Codeunit)/.+'),
callback=dummy_request_callback,
content_type='application/xml'
)
yield
@pytest.mark.usefixtures('add_responses')
def test_nav_class():
nv = nav.NAV(BASE_URL, 'x', 'y')
data = nv.page('CustomerList', nav.ReadMultiple)
assert data[0]['No'] == '123'
assert data[1]['No'] == '456'
data = nv.codeunit(
'IntegrationEntry',
'HelloWorld',
func_args=dict(
iName='DISCARDED',
oGreeting='TEST'
),
)
assert data['oGreeting'] == 'Test greeting'
@pytest.mark.usefixtures('add_responses')
def test_nav_class_service_cache():
nv = nav.NAV(BASE_URL, 'x', 'y')
assert len(nv._service_cache) == 0
nv.page('CustomerList', nav.ReadMultiple)
assert len(nv._service_cache) == 1
nv.page('CustomerList', nav.ReadMultiple)
assert len(nv._service_cache) == 1
nv.page('CustomerList', nav.CreateMultiple, entries=[{}])
assert len(nv._service_cache) == 1 # Still same WS endpoint
nv.codeunit(
'IntegrationEntry',
'HelloWorld',
func_args=dict(
iName='DISCARDED',
oGreeting='TEST'
),
)
assert len(nv._service_cache) == 2
@pytest.mark.usefixtures('add_responses')
def test_service():
srvc = nav.service(BASE_URL, 'x', 'y', 'Codeunit', 'IntegrationEntry')
data = srvc.HelloWorld(iName='DISCARDED', oGreeting='TEST')
assert data['oGreeting'] == 'Test greeting'
@pytest.mark.usefixtures('add_responses')
def test_meta():
data = nav.meta(BASE_URL, 'x', 'y', 'Codeunit', 'IntegrationEntry')
data_bytes = lxml.etree.tostring(data)
assert b'HelloWorld_Result' in data_bytes
@pytest.mark.usefixtures('add_responses')
def test_codeunit_HelloWorld():
data = nav.codeunit(
BASE_URL,
'x',
'y',
'IntegrationEntry',
'HelloWorld',
func_args=dict(iName='DISCARDED', oGreeting='TEST'),
)
assert data['oGreeting'] == 'Test greeting'
@pytest.mark.usefixtures('add_responses')
def test_page_ReadMultiple():
data = nav.page(BASE_URL, 'x', 'y', 'CustomerList', nav.ReadMultiple)
assert data[0]['No'] == '123'
assert data[1]['No'] == '456'
@pytest.mark.usefixtures('add_responses')
def test_nav_class_read_multiple():
nv = nav.NAV(BASE_URL, 'x', 'y')
data1 = nv.page('CustomerList', nav.ReadMultiple)
data2 = nv.read_multiple('CustomerList')
assert data1 == data2
@pytest.mark.usefixtures('add_responses')
def test_page_CreateMultiple():
data = nav.page(
BASE_URL,
'x',
'y',
'CustomerList',
nav.CreateMultiple,
entries=[
{'No': 'DISCARDED', 'Name': 'DISCARDED'},
{'No': 'DISCARDED', 'Name': 'DISCARDED'},
],
)
assert data[0]['No'] == '234567'
assert data[1]['No'] == '345678'
@pytest.mark.usefixtures('add_responses')
def test_nav_class_create_multiple():
nv = nav.NAV(BASE_URL, 'x', 'y')
data1 = nav.page(
BASE_URL,
'x',
'y',
'CustomerList',
nav.CreateMultiple,
entries=[{}],
)
data2 = nv.create_multiple(
'CustomerList',
entries=[{}],
)
assert data1 == data2
def test_entry_point_runnable():
proc = subp.run(['nav'], stdout=subp.PIPE)
assert b'{interact,meta,codeunit,page}' in proc.stdout
``` |
{
"source": "jmagnusson/prodapi",
"score": 2
} |
#### File: prodapi/prodapi/applications.py
```python
from typing import Iterable, List, Optional
from fastapi import APIRouter, FastAPI
from starlette.middleware.cors import CORSMiddleware
from . import routes
from .sec import FastAPISecurity
__all__ = ("ProdAPI",)
class ProdAPI(FastAPI):
def __init__(
self,
*,
http_path_prefix: str = "",
title: str = "ProdAPI",
openapi_url: Optional[str] = "/openapi.json",
docs_url: Optional[str] = "/docs",
redoc_url: Optional[str] = "/redoc",
**kw,
):
if http_path_prefix.endswith("/"):
raise RuntimeError("`http_path_prefix` must not end with a slash")
super().__init__(
title=title,
openapi_url=self._prefixed_path(http_path_prefix, openapi_url),
docs_url=self._prefixed_path(http_path_prefix, docs_url),
redoc_url=self._prefixed_path(http_path_prefix, redoc_url),
**kw,
)
self.http_path_prefix = http_path_prefix
def include_router(self, router: APIRouter, **kw):
prefix = kw.pop("prefix", "")
super().include_router(
router, prefix=self._prefixed_path(self.http_path_prefix, prefix), **kw
)
def with_health_routes(
self,
*,
liveness_url: str = routes.health.DEFAULT_LIVENESS_URL,
readiness_url: str = routes.health.DEFAULT_READINESS_URL,
alive_checks: Iterable[routes.health.HealthCheckCallback] = (),
ready_checks: Iterable[routes.health.HealthCheckCallback] = (),
alive_tags: Optional[List[str]] = None,
ready_tags: Optional[List[str]] = None,
) -> "ProdAPI":
router = routes.health.make_router(
liveness_url=liveness_url,
readiness_url=readiness_url,
alive_checks=alive_checks,
ready_checks=ready_checks,
alive_tags=alive_tags,
ready_tags=ready_tags,
)
self.include_router(router)
return self
def with_user_routes(
self, security: FastAPISecurity, *, user_details_url: str = "/users/me"
) -> "ProdAPI":
self.include_router(
routes.user.make_router(security, user_details_url=user_details_url)
)
return self
def with_basic_cors(
self, *, expose_headers: Optional[Iterable[str]] = None
) -> "ProdAPI":
self.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
expose_headers=expose_headers,
)
return self
@staticmethod
def _prefixed_path(prefix, path) -> str:
if not path.startswith("/"):
path = f"/{path}"
p = f"{prefix}{path}"
if p.endswith("/"):
p = p[0:-1]
return p
```
#### File: tests/examples/test_app1.py
```python
from pathlib import Path
import pytest
import requests
from .helpers import run_example_app
try:
import uvicorn
except ImportError:
uvicorn = None
app1_path = Path("./examples/app1")
pytestmark = [
pytest.mark.skipif(not app1_path.exists(), reason="app1 example couldn't be found"),
pytest.mark.skipif(uvicorn is None, reason="`uvicorn` isn't installed"),
pytest.mark.slow,
]
basic_auth_env = {
"BASIC_AUTH_CREDENTIALS": '[{"username": "user1", "password": "<PASSWORD>"}]'
}
def test_users_me_basic_auth_anonymous():
with run_example_app("examples.app1:app", env=basic_auth_env) as base_url:
resp = requests.get(f"{base_url}/users/me")
assert resp.status_code == 200
data = resp.json()
assert data["auth"] == {
"subject": "anonymous",
"auth_method": "none",
"issuer": None,
"audience": [],
"issued_at": None,
"expires_at": None,
"scopes": [],
"permissions": [],
}
def test_users_me_basic_auth_authenticated():
with run_example_app("examples.app1:app", env=basic_auth_env) as base_url:
resp = requests.get(f"{base_url}/users/me", auth=("user1", "test"))
assert resp.status_code == 200
data = resp.json()
assert data["auth"] == {
"subject": "user1",
"auth_method": "basic_auth",
"issuer": None,
"audience": [],
"issued_at": None,
"expires_at": None,
"scopes": [],
"permissions": [],
}
def test_user_permissions_basic_auth_authenticated():
with run_example_app(
"examples.app1:app",
env={**basic_auth_env, "PERMISSION_OVERRIDES": '{"user1": ["*"]}'},
) as base_url:
resp = requests.get(f"{base_url}/users/me", auth=("user1", "test"))
assert resp.status_code == 200
data = resp.json()
assert set(data["auth"]["permissions"]) == {"products:create", "products:list"}
def test_create_product_unauthenticated():
with run_example_app("examples.app1:app", env=basic_auth_env) as base_url:
resp = requests.post(f"{base_url}/products")
assert resp.status_code == 401
data = resp.json()
assert data == {"detail": "Could not validate credentials"}
def test_create_product_authenticated():
with run_example_app(
"examples.app1:app",
env={**basic_auth_env, "PERMISSION_OVERRIDES": '{"user1": ["*"]}'},
) as base_url:
resp = requests.post(
f"{base_url}/products", auth=("user1", "test"), json={"name": "T-shirt"}
)
assert resp.status_code == 201
data = resp.json()
assert data == {"name": "T-shirt"}
def test_list_products_authenticated():
with run_example_app(
"examples.app1:app",
env={**basic_auth_env, "PERMISSION_OVERRIDES": '{"user1": ["*"]}'},
) as base_url:
url = f"{base_url}/products"
auth = ("user1", "test")
resp = requests.post(url, auth=auth, json={"name": "T-shirt"})
assert resp.status_code == 201
resp = requests.post(url, auth=auth, json={"name": "T-shirt"})
assert resp.status_code == 201
# resp = requests.get(url, auth=auth)
# assert resp.status_code == 200
# assert resp.json() == []
# # assert resp.json() == [{"name": "T-shirt"}]
```
#### File: tests/integration/test_info.py
```python
import re
from prodapi import __version__, info
def test_version():
assert __version__ == info.version
assert re.match(r"^\d+\.\d+\.\d+$", info.version)
def test_name():
assert info.name == "prodapi"
def test_environment():
assert info.environment == "dev"
def test_node():
assert isinstance(info.node, str)
assert len(info.node) > 0
``` |
{
"source": "jmagnusson/sentry-46elks",
"score": 2
} |
#### File: sentry-46elks/sentry_46elks/plugin.py
```python
from __future__ import unicode_literals
import re
import requests
import sentry_46elks
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.conf import settings
from sentry.plugins.bases.notify import NotificationPlugin
NOTSET = object()
class Sentry46ElksConfigurationForm(forms.Form):
api_endpoint = forms.CharField(
label=_('API Endpoint'),
required=True,
help_text=_('API URL used for sending the texts'),
initial='https://api.46elks.com/a1/SMS',
)
api_username = forms.CharField(
label=_('API username'),
required=True,
widget=forms.TextInput(attrs={'class': 'span6'}),
)
api_password = forms.CharField(
label=_('API password'),
required=True,
widget=forms.PasswordInput(
render_value=True,
attrs={'class': 'span6'},
),
)
sender = forms.CharField(
label=_('SMS Sender'),
required=True,
help_text=_('The number/name of the sender'),
widget=forms.TextInput(attrs={'placeholder': 'e.g. +46701234567'}),
)
receivers = forms.CharField(
label=_('SMS Receivers'),
required=True,
help_text=_('Recipient(s) phone numbers separated by commas '
'or line breaks'),
widget=forms.Textarea(
attrs={'placeholder': 'e.g. +46701234567, +46709876543'}
),
)
def clean_receivers(self):
data = self.cleaned_data['receivers']
phones = set(filter(bool, re.split(r'\s*,\s*|\s+', data)))
msg_tmpl = '{0} is not a valid phone number.'
for phone in phones:
if not re.match(r'^\+\d{10,}$', phone):
raise forms.ValidationError(msg_tmpl.format(phone))
return ','.join(phones)
def clean(self):
# TODO: Ping 46elks and check credentials (?)
return self.cleaned_data
class Sentry46ElksPlugin(NotificationPlugin):
author = '<NAME>'
author_url = 'https://github.com/jmagnusson'
version = sentry_46elks.__version__
description = 'A plugin for Sentry which sends SMS notifications via ' \
'46elks SMS API'
resource_links = (
('Documentation',
'https://github.com/jmagnusson/sentry-46elks/blob/master/README.md'),
('Bug Tracker',
'https://github.com/jmagnusson/sentry-46elks/issues'),
('Source',
'https://github.com/jmagnusson/sentry-46elks'),
('46elks',
'http://www.46elks.com/'),
)
slug = '46elks'
title = _('46elks (SMS)')
conf_title = title
conf_key = '46elks'
project_conf_form = Sentry46ElksConfigurationForm
def __init__(self, min_level=NOTSET, *args, **kwargs):
super(Sentry46ElksPlugin, self).__init__(*args, **kwargs)
if min_level is NOTSET:
min_level = getattr(settings, 'SMS_LEVEL')
self.min_level = min_level
def is_configured(self, request, project, **kwargs):
fields = ('api_baseurl', 'api_username', 'api_password', 'sender',
'receivers')
return all([self.get_option(o, project) for o in fields])
def get_send_to(self, *args, **kwargs):
# This doesn't depend on email permission... stuff.
return True
def notify_users(self, group, event):
project = group.project
error_level = event.get_level_display()
error = event.error().splitlines()
error = error[0] if len(error) else ''
body = 'Sentry [{0}] {1}: {2}'.format(project.name, error_level, error)
body = body[:160] # Truncate to 160 characters
endpoint = self.get_option('api_endpoint', project)
auth = (self.get_option('api_username', project),
self.get_option('api_password', project))
sender = self.get_option('sender', project)
receivers = self.get_option('receivers', project).split(',')
for receiver in receivers:
try:
requests.post(endpoint, auth=auth, data={
'from': sender,
'to': receiver,
'message': body,
})
except Exception as e:
# TODO: Handle
raise e
def get_option(self, key, *args, **kwargs):
super_ = super(Sentry46ElksPlugin, self)
value = super_.get_option(key, *args, **kwargs)
if value is None and key in ('min_level', ):
value = getattr(self, key)
return value
``` |
{
"source": "jmagnusson/sqlalchemy",
"score": 2
} |
#### File: dialects/mysql/json.py
```python
from __future__ import absolute_import
import json
from ...sql import elements
from ... import types as sqltypes
from ... import util
class JSON(sqltypes.JSON):
"""MySQL JSON type.
MySQL supports JSON as of version 5.7. Note that MariaDB does **not**
support JSON at the time of this writing.
The :class:`.mysql.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`.types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function at the database level.
.. versionadded:: 1.1
"""
@util.memoized_property
def _str_impl(self):
return sqltypes.String(convert_unicode=True)
def bind_processor(self, dialect):
string_process = self._str_impl.bind_processor(dialect)
json_serializer = dialect._json_serializer or json.dumps
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, elements.Null) or (
value is None and self.none_as_null
):
return None
serialized = json_serializer(value)
if string_process:
serialized = string_process(serialized)
return serialized
return process
def result_processor(self, dialect, coltype):
string_process = self._str_impl.result_processor(dialect, coltype)
json_deserializer = dialect._json_deserializer or json.loads
def process(value):
if value is None:
return None
if string_process:
value = string_process(value)
return json_deserializer(value)
return process
class JSONIndexType(sqltypes.JSON.JSONIndexType):
def bind_processor(self, dialect):
def process(value):
if isinstance(value, int):
return "$[%s]" % value
else:
return '$."%s"' % value
return process
class JSONPathType(sqltypes.JSON.JSONPathType):
def bind_processor(self, dialect):
def process(value):
return "$%s" % (
"".join([
"[%s]" % elem if isinstance(elem, int)
else '."%s"' % elem for elem in value
])
)
return process
``` |
{
"source": "jmaguiar/PyroNear",
"score": 2
} |
#### File: pyronear/models/resnet.py
```python
from torchvision.models.resnet import BasicBlock, Bottleneck, ResNet, model_urls as imagenet_urls
from torchvision.models.utils import load_state_dict_from_url
from .utils import cnn_model
__all__ = ['resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://srv-file6.gofile.io/download/5WANbz/resnet18-binary-classification.pth',
'resnet34': 'https://srv-file7.gofile.io/download/ay3i9I/resnet34-binary-classification.pth'
}
model_cut = -2
def _resnet(arch, block, layers, pretrained=False, progress=True,
imagenet_pretrained=False, num_classes=1, lin_features=512,
dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
# Model creation
base_model = ResNet(block, layers, num_classes=num_classes, **kwargs)
# Imagenet pretraining
if imagenet_pretrained:
if pretrained:
raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True')
state_dict = load_state_dict_from_url(imagenet_urls[arch],
progress=progress)
# Remove FC params from dict
for key in ('fc.weight', 'fc.bias'):
state_dict.pop(key, None)
missing, unexpected = base_model.load_state_dict(state_dict, strict=False)
if any(unexpected) or any(not elt.startswith('fc.') for elt in missing):
raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}")
# Cut at last conv layers
model = cnn_model(base_model, model_cut, base_model.fc.in_features, num_classes,
lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool)
# Parameter loading
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""ResNet-18 model for image classification from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def resnet34(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""ResNet-34 model for image classification from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def resnet50(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""ResNet-50 model for image classification from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def resnet101(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""ResNet-101 model for image classification from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def resnet152(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""ResNet-152 model for image classification from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def resnext50_32x4d(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress,
imagenet_pretrained, num_classes, lin_features, dropout_prob,
bn_final, concat_pool, **kwargs)
``` |
{
"source": "jmaguire/gaia_gps_downloader",
"score": 3
} |
#### File: jmaguire/gaia_gps_downloader/gaia.py
```python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import urllib.request
import time
import os
GAIA_URL = "https://www.gaiagps.com/datasummary/photos/"
GAIA_PHOTO_API = "https://www.gaiagps.com/api/objects/photo/"
def save_images(images_to_parse):
if not images_to_parse:
print('All images processed')
return
else:
url = images_to_parse[0]['url']
name = images_to_parse[0]['name']
print('Processing image', name)
driver.get(url)
#time.sleep(2)
urllib.request.urlretrieve(driver.current_url, 'gaia/' + name + ".jpg")
save_images(images_to_parse[1:])
def get_images(count = 0):
images = driver.find_elements_by_xpath(
"//a[starts-with(@href, '" + GAIA_PHOTO_API + "')]")
if not images:
'No images on page'
return
else:
image_count = len(images)
images_to_parse = [
{
'url': images[i].get_attribute('href').replace('/1000/','/full/'),
'name': str(i + count)
}
for i in range(image_count)
]
driver.switch_to.window(window_after)
time.sleep(2)
save_images(images_to_parse)
driver.switch_to.window(window_before)
time.sleep(2)
next_button = driver.find_element_by_link_text('Next')
if 'disabled' in next_button.find_element_by_xpath('..').get_attribute("class"):
print("We're done folks!")
return
print("Next Page!")
next_button.click()
time.sleep(1)
get_images(count + image_count)
driver = webdriver.Firefox()
driver.get(GAIA_URL)
email_value = input('Email: ')
password_value = input('Password: ')
email_field = driver.find_element_by_id("login-email")
password_field = driver.find_element_by_id("login-password")
email_field.send_keys(email_value)
password_field.send_keys(password_value)
driver.find_element_by_xpath("//button[@type='submit']").click()
time.sleep(2)
window_before = driver.window_handles[0]
driver.execute_script("window.open('" + GAIA_URL + "', 'new window')")
window_after = driver.window_handles[1]
driver.switch_to.window(window_before)
time.sleep(2)
if not os.path.exists('gaia'):
os.makedirs('gaia')
time.sleep(2)
get_images()
``` |
{
"source": "Jmahaja1/genieparser",
"score": 3
} |
#### File: junos/tests/test_show_ldp.py
```python
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Metaparser
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.junos.show_ldp import (ShowLDPSession,
ShowLdpNeighbor,
ShowLdpSessionIpaddressDetail,
ShowLdpDatabaseSessionIpaddress,
ShowLDPInterface,ShowLDPInterfaceDetail,
ShowLDPOverview)
# =================================
# Unit test for 'show ldp session'
# =================================
class TestShowLDPSession(unittest.TestCase):
'''unit test for "show ldp session'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'ldp-session-information': {
'ldp-session': [{
'ldp-neighbor-address': '10.34.2.250',
'ldp-session-state': 'Operational',
'ldp-connection-state': 'Open',
'ldp-remaining-time': '26',
'ldp-session-adv-mode': 'DU'
}]
}
}
golden_output = {
'execute.return_value':
'''
Address State Connection Hold time Adv. Mode
10.34.2.250 Operational Open 26 DU
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPSession(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPSession(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
# ===============================================
# Unit test for 'show ldp interface {interface}'
# ===============================================
class TestShowLDPInterface(unittest.TestCase):
'''unit test for "show ldp interface {interface}'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-interface-information": {
"ldp-interface": {
"interface-name": "ge-0/0/0.0",
"ldp-interface-local-address": "10.169.14.157",
"ldp-label-space-id": "10.169.14.240:0",
"ldp-neighbor-count": "1",
"ldp-next-hello": "3"
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp interface ge-0/0/0.0
Interface Address Label space ID Nbr Next
count hello
ge-0/0/0.0 10.169.14.157 10.169.14.240:0 1 3
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='ge-0/0/0.0')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPInterface(device=self.device)
parsed_output = obj.parse(interface='ge-0/0/0.0')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =====================================================
# Unit test for 'show ldp interface {interface} detail'
# =====================================================
class TestShowLDPInterfaceDetail(unittest.TestCase):
'''unit test for "show ldp interface {interface} detail'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-interface-information": {
"ldp-interface": {
"interface-name": "ge-0/0/0.0",
"ldp-interface-local-address": "10.169.14.157",
"ldp-label-space-id": "10.169.14.240:0",
"ldp-neighbor-count": "1",
"ldp-next-hello": "1",
"ldp-transport-address": "10.169.14.240",
"ldp-hello-interval": "5",
"ldp-holdtime": "15",
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp interface ge-0/0/0.0 detail
Interface Address Label space ID Nbr Next
count hello
ge-0/0/0.0 10.169.14.157 10.169.14.240:0 1 1
Hello interval: 5, Hold time: 15, Transport address: 10.169.14.240
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPInterfaceDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='ge-0/0/0.0')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPInterfaceDetail(device=self.device)
parsed_output = obj.parse(interface='ge-0/0/0.0')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp neighbor'
# =================================
class TestShowLDPSession(unittest.TestCase):
'''unit test for "show ldp session'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'ldp-neighbor-information':
{'ldp-neighbor': [
{'interface-name': 'ge-0/0/0.0',
'ldp-label-space-id': '10.34.2.250:0',
'ldp-neighbor-address': '10.169.14.158',
'ldp-remaining-time': '14'
}
]
}
}
golden_output = {
'execute.return_value':
'''
show ldp neighbor
Address Interface Label space ID Hold time
10.169.14.158 ge-0/0/0.0 10.34.2.250:0 14
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpNeighbor(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpNeighbor(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp database session ipaddress'
# =================================
class TestShowLDPSession(unittest.TestCase):
'''unit test for "show ldp database session ipaddress'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-database-information": {
"ldp-database": [
{
"ldp-binding": [
{
"ldp-label": "3",
"ldp-prefix": "10.34.2.250/32"
},
{
"ldp-label": "16",
"ldp-prefix": "10.169.14.240/32"
}
],
"ldp-database-type": "Input label database",
"ldp-label-received": "2",
"ldp-session-id": "10.169.14.240:0--10.34.2.250:0"
},
{
"ldp-binding": [
{
"ldp-label": "16",
"ldp-prefix": "10.34.2.250/32"
},
{
"ldp-label": "3",
"ldp-prefix": "10.169.14.240/32"
}
],
"ldp-database-type": "Output label database",
"ldp-label-advertised": "2",
"ldp-session-id": "10.169.14.240:0--10.34.2.250:0"
}
]
}
}
golden_output = {
'execute.return_value':
'''
show ldp database 10.34.2.250
Input label database, 10.169.14.240:0--10.34.2.250:0
Labels received: 2
Label Prefix
3 10.34.2.250/32
16 10.169.14.240/32
Output label database, 10.169.14.240:0--10.34.2.250:0
Labels advertised: 2
Label Prefix
16 10.34.2.250/32
3 10.169.14.240/32
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpDatabaseSessionIpaddress(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpDatabaseSessionIpaddress(device=self.device)
parsed_output = obj.parse(ipaddress='10.34.2.250')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp neighbor'
# =================================
class TestShowLdpNeighbor(unittest.TestCase):
'''unit test for "show ldp neighbor '''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'ldp-neighbor-information':
{'ldp-neighbor': [
{'interface-name': 'ge-0/0/0.0',
'ldp-label-space-id': '10.34.2.250:0',
'ldp-neighbor-address': '10.169.14.158',
'ldp-remaining-time': '14'
}
]
}
}
golden_output = {
'execute.return_value':
'''
show ldp neighbor
Address Interface Label space ID Hold time
10.169.14.158 ge-0/0/0.0 10.34.2.250:0 14
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpNeighbor(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpNeighbor(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp database session ipaddress'
# =================================
class TestShowLdpDatabaseSessionIpaddress(unittest.TestCase):
'''unit test for "show ldp database session ipaddress'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-database-information": {
"ldp-database": [
{
"ldp-binding": [
{
"ldp-label": "3",
"ldp-prefix": "10.34.2.250/32"
},
{
"ldp-label": "16",
"ldp-prefix": "10.169.14.240/32"
}
],
"ldp-database-type": "Input label database",
"ldp-label-received": "2",
"ldp-session-id": "10.169.14.240:0--10.34.2.250:0"
},
{
"ldp-binding": [
{
"ldp-label": "16",
"ldp-prefix": "10.34.2.250/32"
},
{
"ldp-label": "3",
"ldp-prefix": "10.169.14.240/32"
}
],
"ldp-database-type": "Output label database",
"ldp-label-advertised": "2",
"ldp-session-id": "10.169.14.240:0--10.34.2.250:0"
}
]
}
}
golden_output = {
'execute.return_value':
'''
show ldp database 10.34.2.250
Input label database, 10.169.14.240:0--10.34.2.250:0
Labels received: 2
Label Prefix
3 10.34.2.250/32
16 10.169.14.240/32
Output label database, 10.169.14.240:0--10.34.2.250:0
Labels advertised: 2
Label Prefix
16 10.34.2.250/32
3 10.169.14.240/32
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpDatabaseSessionIpaddress(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpDatabaseSessionIpaddress(device=self.device)
parsed_output = obj.parse(ipaddress='10.34.2.250')
self.assertEqual(parsed_output, self.golden_parsed_output)
# ===============================================
# Unit test for 'show ldp interface {interface}'
# ===============================================
class TestShowLDPInterface(unittest.TestCase):
'''unit test for "show ldp interface {interface}'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-interface-information": {
"ldp-interface": {
"interface-name": "ge-0/0/0.0",
"ldp-interface-local-address": "10.1.2.2",
"ldp-label-space-id": "10.204.14.100:0",
"ldp-neighbor-count": "1",
"ldp-next-hello": "3"
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp interface ge-0/0/0.0
Interface Address Label space ID Nbr Next
count hello
ge-0/0/0.0 10.1.2.2 10.204.14.100:0 1 3
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='ge-0/0/0.0')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPInterface(device=self.device)
parsed_output = obj.parse(interface='ge-0/0/0.0')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =====================================================
# Unit test for 'show ldp interface {interface} detail'
# =====================================================
class TestShowLDPInterfaceDetail(unittest.TestCase):
'''unit test for "show ldp interface {interface} detail'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-interface-information": {
"ldp-interface": {
"interface-name": "ge-0/0/0.0",
"ldp-interface-local-address": "10.1.2.2",
"ldp-label-space-id": "10.204.14.100:0",
"ldp-neighbor-count": "1",
"ldp-next-hello": "1",
"ldp-transport-address": "10.204.14.100",
"ldp-hello-interval": "5",
"ldp-holdtime": "15",
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp interface ge-0/0/0.0 detail
Interface Address Label space ID Nbr Next
count hello
ge-0/0/0.0 10.1.2.2 10.204.14.100:0 1 1
Hello interval: 5, Hold time: 15, Transport address: 10.204.14.100
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPInterfaceDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='ge-0/0/0.0')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPInterfaceDetail(device=self.device)
parsed_output = obj.parse(interface='ge-0/0/0.0')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp overview'
# =================================
class TestShowLDPOverview(unittest.TestCase):
'''unit test for "show ldp overview'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
show ldp overview
Instance: master
Reference count: 2
Router ID: 10.204.14.100
LDP inet: enabled
Transport preference: IPv4
Message id: 4
Configuration sequence: 1
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
LDP route acknowledgement: enabled
BGP export: enabled
LDP mtu discovery: disabled
LDP SR Mapping Client: disabled
Capabilities enabled: none
Egress FEC capabilities enabled: entropy-label-capability
Downstream unsolicited Sessions:
Operational: 1
Retention: liberal
Control: ordered
Auto targeted sessions:
Auto targeted: disabled
Dynamic tunnel session count: 0
P2MP:
Recursive route: disabled
No rsvp tunneling: disabled
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60, Make before break timeout: 30
Make before break switchover delay: 3
Link protection timeout: 120
Graceful restart:
Restart: disabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protection timeout: 0
Interface addresses advertising:
10.1.2.2
LDP Job:
Read job time quantum: 1000, Write job time quantum: 1000
Read job loop quantum: 100, Write job loop quantum: 100
Backup inbound read job time quantum: 1000, Backup outbound read job time quantum: 1000
Backup inbound read job loop quantum: 100, Backup outbound read job loop quantum: 100
Label allocation:
Current number of LDP labels allocated: 1
Total number of LDP labels allocated: 1
Total number of LDP labels freed: 0
Total number of LDP label allocation failure: 0
Current number of labels allocated by all protocols: 0
'''}
golden_parsed_output = {
'ldp-overview-information': {
'ldp-overview': {
'ldp-auto-targeted-session': {
'ldp-auto-targeted-dyn-tun-ses-count': 0,
'ldp-auto-targeted-session-enabled': 'disabled'
},
'ldp-bgp-export': 'enabled',
'ldp-configuration-sequence': 1,
'ldp-deaggregate': 'disabled',
'ldp-explicit-null': 'disabled',
'ldp-gr-overview': {
'ldp-gr-helper': 'enabled',
'ldp-gr-max-neighbor-reconnect-time': 120000,
'ldp-gr-max-neighbor-recovery-time': 240000,
'ldp-gr-reconnect-time': 60000,
'ldp-gr-recovery-time': 160000,
'ldp-gr-restart': 'disabled',
'ldp-gr-restarting': 'false'
},
'ldp-igp-overview': {
'ldp-igp-sync-session-up-delay': 10,
'ldp-tracking-igp-metric': 'disabled'
},
'ldp-inet': 'enabled',
'ldp-instance-capability': {
'ldp-capability': 'none'
},
'ldp-instance-egress-fec-capability': {
'ldp-egress-fec-capability': 'entropy-label-capability'
},
'ldp-instance-name': 'master',
'ldp-interface-address': {
'interface-address': '10.1.2.2'
},
'ldp-ipv6-tunneling': 'disabled',
'ldp-job-overview': {
'ldp-inbound-read-job-loop-quantum': 100,
'ldp-inbound-read-job-time-quantum': 1000,
'ldp-outbound-read-job-loop-quantum': 100,
'ldp-outbound-read-job-time-quantum': 1000,
'ldp-read-job-loop-quantum': 100,
'ldp-read-job-time-quantum': 1000,
'ldp-write-job-loop-quantum': 100,
'ldp-write-job-time-quantum': 1000
},
'ldp-label-allocation': {
'ldp-global-label-current-allocs': 0,
'ldp-label-alloc-failure': 0,
'ldp-label-current-allocs': 1,
'ldp-label-total-allocs': 1,
'ldp-label-total-frees': 0
},
'ldp-loopback-if-added': 'no',
'ldp-message-id': 4,
'ldp-mtu-discovery': 'disabled',
'ldp-p2mp': {
'ldp-p2mp-no-rsvp-tunneling-enabled': 'disabled',
'ldp-p2mp-recursive-route-enabled': 'disabled'
},
'ldp-p2mp-transit-lsp-chaining': 'disabled',
'ldp-reference-count': 2,
'ldp-route-acknowledgement': 'enabled',
'ldp-route-preference': 9,
'ldp-router-id': '10.204.14.100',
'ldp-session-count': {
'ldp-control-mode': 'ordered',
'ldp-retention-mode': 'liberal',
'ldp-session-operational': 1
},
'ldp-session-protect-overview': {
'ldp-session-protect': 'disabled',
'ldp-session-protect-timeout': 0
},
'ldp-sr-mapping-client': 'disabled',
'ldp-strict-targeted-hellos': 'disabled',
'ldp-te-overview': {
'ldp-te-bgp-igp': 'disabled',
'ldp-te-both-ribs': 'disabled',
'ldp-te-mpls-forwarding': 'disabled'
},
'ldp-timer-overview': {
'ldp-instance-keepalive-interval': 10,
'ldp-instance-keepalive-timeout': 30,
'ldp-instance-label-withdraw-delay': 60,
'ldp-instance-link-hello-hold-time': 15,
'ldp-instance-link-hello-interval': 5,
'ldp-instance-link-protection-timeout': 120,
'ldp-instance-make-before-break-switchover-delay': 3,
'ldp-instance-make-before-break-timeout': 30,
'ldp-instance-targeted-hello-hold-time': 45,
'ldp-instance-targeted-hello-interval': 15
},
'ldp-transit-lsp-route-stats': 'disabled',
'ldp-transport-preference': 'IPv4',
'ldp-unicast-transit-lsp-chaining': 'disabled'
}
}
}
golden_output_2 = {'execute.return_value': '''
show ldp overview
Instance: master
Router ID: 10.204.14.100
Message id: 345
Configuration sequence: 1
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
Capabilities enabled: none
Protocol modes:
Distribution: unsolicited
Retention: liberal
Control: ordered
Sessions:
Operational: 1
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60
Graceful restart:
Restart: enabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protecton timeout: 0
Interface addresses advertising:
10.1.2.2
'''}
golden_parsed_output_2 = {
'ldp-overview-information': {
'ldp-overview': {
'ldp-configuration-sequence': 1,
'ldp-deaggregate': 'disabled',
'ldp-explicit-null': 'disabled',
'ldp-gr-overview': {
'ldp-gr-helper': 'enabled',
'ldp-gr-max-neighbor-reconnect-time': 120000,
'ldp-gr-max-neighbor-recovery-time': 240000,
'ldp-gr-reconnect-time': 60000,
'ldp-gr-recovery-time': 160000,
'ldp-gr-restart': 'enabled',
'ldp-gr-restarting': 'false'
},
'ldp-igp-overview': {
'ldp-igp-sync-session-up-delay': 10,
'ldp-tracking-igp-metric': 'disabled'
},
'ldp-instance-capability': {
'ldp-capability': 'none'
},
'ldp-instance-name': 'master',
'ldp-interface-address': {
'interface-address': '10.1.2.2'
},
'ldp-ipv6-tunneling': 'disabled',
'ldp-loopback-if-added': 'no',
'ldp-message-id': 345,
'ldp-p2mp-transit-lsp-chaining': 'disabled',
'ldp-protocol-modes': {
'ldp-control-mode': 'ordered',
'ldp-distribution-mode': 'unsolicited',
'ldp-retention-mode': 'liberal'
},
'ldp-route-preference': 9,
'ldp-router-id': '10.204.14.100',
'ldp-session-count': {
'ldp-session-operational': 1
},
'ldp-session-protect-overview': {
'ldp-session-protect': 'disabled',
'ldp-session-protect-timeout': 0
},
'ldp-strict-targeted-hellos': 'disabled',
'ldp-te-overview': {
'ldp-te-bgp-igp': 'disabled',
'ldp-te-both-ribs': 'disabled',
'ldp-te-mpls-forwarding': 'disabled'
},
'ldp-timer-overview': {
'ldp-instance-keepalive-interval': 10,
'ldp-instance-keepalive-timeout': 30,
'ldp-instance-label-withdraw-delay': 60,
'ldp-instance-link-hello-hold-time': 15,
'ldp-instance-link-hello-interval': 5,
'ldp-instance-targeted-hello-hold-time': 45,
'ldp-instance-targeted-hello-interval': 15
},
'ldp-transit-lsp-route-stats': 'disabled',
'ldp-unicast-transit-lsp-chaining': 'disabled'
}
}
}
golden_output_3 = {'execute.return_value': '''
show ldp overview
Instance: master
Reference count: 2
Router ID: 10.204.14.100
LDP inet: enabled
Transport preference: IPv4
Message id: 4
Configuration sequence: 1
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
LDP route acknowledgement: enabled
BGP export: enabled
LDP mtu discovery: disabled
LDP SR Mapping Client: disabled
Capabilities enabled: none
Egress FEC capabilities enabled: entropy-label-capability
Downstream unsolicited Sessions:
Operational: 1
Retention: liberal
Control: ordered
Auto targeted sessions:
Auto targeted: disabled
Dynamic tunnel session count: 0
P2MP:
Recursive route: disabled
No rsvp tunneling: disabled
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60, Make before break timeout: 30
Make before break switchover delay: 3
Link protection timeout: 120
Graceful restart:
Restart: disabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protection timeout: 0
Interface addresses advertising:
10.1.2.2
LDP Job:
Read job time quantum: 1000, Write job time quantum: 1000
Read job loop quantum: 100, Write job loop quantum: 100
Backup inbound read job time quantum: 1000, Backup outbound read job time quantum: 1000
Backup inbound read job loop quantum: 100, Backup outbound read job loop quantum: 100
Label allocation:
Current number of LDP labels allocated: 1
Total number of LDP labels allocated: 1
Total number of LDP labels freed: 0
Total number of LDP label allocation failure: 0
Current number of labels allocated by all protocols: 0
'''}
golden_parsed_output_3 = {
'ldp-overview-information': {
'ldp-overview': {
'ldp-auto-targeted-session': {
'ldp-auto-targeted-dyn-tun-ses-count': 0,
'ldp-auto-targeted-session-enabled': 'disabled'
},
'ldp-bgp-export': 'enabled',
'ldp-configuration-sequence': 1,
'ldp-deaggregate': 'disabled',
'ldp-explicit-null': 'disabled',
'ldp-gr-overview': {
'ldp-gr-helper': 'enabled',
'ldp-gr-max-neighbor-reconnect-time': 120000,
'ldp-gr-max-neighbor-recovery-time': 240000,
'ldp-gr-reconnect-time': 60000,
'ldp-gr-recovery-time': 160000,
'ldp-gr-restart': 'disabled',
'ldp-gr-restarting': 'false'
},
'ldp-igp-overview': {
'ldp-igp-sync-session-up-delay': 10,
'ldp-tracking-igp-metric': 'disabled'
},
'ldp-inet': 'enabled',
'ldp-instance-capability': {
'ldp-capability': 'none'
},
'ldp-instance-egress-fec-capability': {
'ldp-egress-fec-capability': 'entropy-label-capability'
},
'ldp-instance-name': 'master',
'ldp-interface-address': {
'interface-address': '10.1.2.2'
},
'ldp-ipv6-tunneling': 'disabled',
'ldp-job-overview': {
'ldp-inbound-read-job-loop-quantum': 100,
'ldp-inbound-read-job-time-quantum': 1000,
'ldp-outbound-read-job-loop-quantum': 100,
'ldp-outbound-read-job-time-quantum': 1000,
'ldp-read-job-loop-quantum': 100,
'ldp-read-job-time-quantum': 1000,
'ldp-write-job-loop-quantum': 100,
'ldp-write-job-time-quantum': 1000
},
'ldp-label-allocation': {
'ldp-global-label-current-allocs': 0,
'ldp-label-alloc-failure': 0,
'ldp-label-current-allocs': 1,
'ldp-label-total-allocs': 1,
'ldp-label-total-frees': 0
},
'ldp-loopback-if-added': 'no',
'ldp-message-id': 4,
'ldp-mtu-discovery': 'disabled',
'ldp-p2mp': {
'ldp-p2mp-no-rsvp-tunneling-enabled': 'disabled',
'ldp-p2mp-recursive-route-enabled': 'disabled'
},
'ldp-p2mp-transit-lsp-chaining': 'disabled',
'ldp-reference-count': 2,
'ldp-route-acknowledgement': 'enabled',
'ldp-route-preference': 9,
'ldp-router-id': '10.204.14.100',
'ldp-session-count': {
'ldp-control-mode': 'ordered',
'ldp-retention-mode': 'liberal',
'ldp-session-operational': 1
},
'ldp-session-protect-overview': {
'ldp-session-protect': 'disabled',
'ldp-session-protect-timeout': 0
},
'ldp-sr-mapping-client': 'disabled',
'ldp-strict-targeted-hellos': 'disabled',
'ldp-te-overview': {
'ldp-te-bgp-igp': 'disabled',
'ldp-te-both-ribs': 'disabled',
'ldp-te-mpls-forwarding': 'disabled'
},
'ldp-timer-overview': {
'ldp-instance-keepalive-interval': 10,
'ldp-instance-keepalive-timeout': 30,
'ldp-instance-label-withdraw-delay': 60,
'ldp-instance-link-hello-hold-time': 15,
'ldp-instance-link-hello-interval': 5,
'ldp-instance-link-protection-timeout': 120,
'ldp-instance-make-before-break-switchover-delay': 3,
'ldp-instance-make-before-break-timeout': 30,
'ldp-instance-targeted-hello-hold-time': 45,
'ldp-instance-targeted-hello-interval': 15
},
'ldp-transit-lsp-route-stats': 'disabled',
'ldp-transport-preference': 'IPv4',
'ldp-unicast-transit-lsp-chaining': 'disabled'
}
}
}
golden_output_4 = {'execute.return_value': '''
show ldp overview
Instance: master
Reference count: 2
Router ID: 10.204.14.100
LDP inet: enabled
Transport preference: IPv4
Message id: 4
Configuration sequence: 1
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
LDP route acknowledgement: enabled
BGP export: enabled
LDP mtu discovery: disabled
LDP SR Mapping Client: disabled
Capabilities enabled: none
Egress FEC capabilities enabled: entropy-label-capability
Downstream unsolicited Sessions:
Nonexistent: 1
Retention: liberal
Control: ordered
Auto targeted sessions:
Auto targeted: disabled
Dynamic tunnel session count: 0
P2MP:
Recursive route: disabled
No rsvp tunneling: disabled
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60, Make before break timeout: 30
Make before break switchover delay: 3
Link protection timeout: 120
Graceful restart:
Restart: disabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protection timeout: 0
Interface addresses advertising:
10.1.2.2
LDP Job:
Read job time quantum: 1000, Write job time quantum: 1000
Read job loop quantum: 100, Write job loop quantum: 100
Backup inbound read job time quantum: 1000, Backup outbound read job time quantum: 1000
Backup inbound read job loop quantum: 100, Backup outbound read job loop quantum: 100
Label allocation:
Current number of LDP labels allocated: 0
Total number of LDP labels allocated: 0
Total number of LDP labels freed: 0
Total number of LDP label allocation failure: 0
Current number of labels allocated by all protocols: 0
'''}
golden_parsed_output_4 = {
'ldp-overview-information': {
'ldp-overview': {
'ldp-auto-targeted-session': {
'ldp-auto-targeted-dyn-tun-ses-count': 0,
'ldp-auto-targeted-session-enabled': 'disabled'
},
'ldp-bgp-export': 'enabled',
'ldp-configuration-sequence': 1,
'ldp-deaggregate': 'disabled',
'ldp-explicit-null': 'disabled',
'ldp-gr-overview': {
'ldp-gr-helper': 'enabled',
'ldp-gr-max-neighbor-reconnect-time': 120000,
'ldp-gr-max-neighbor-recovery-time': 240000,
'ldp-gr-reconnect-time': 60000,
'ldp-gr-recovery-time': 160000,
'ldp-gr-restart': 'disabled',
'ldp-gr-restarting': 'false'
},
'ldp-igp-overview': {
'ldp-igp-sync-session-up-delay': 10,
'ldp-tracking-igp-metric': 'disabled'
},
'ldp-inet': 'enabled',
'ldp-instance-capability': {
'ldp-capability': 'none'
},
'ldp-instance-egress-fec-capability': {
'ldp-egress-fec-capability': 'entropy-label-capability'
},
'ldp-instance-name': 'master',
'ldp-interface-address': {
'interface-address': '10.1.2.2'
},
'ldp-ipv6-tunneling': 'disabled',
'ldp-job-overview': {
'ldp-inbound-read-job-loop-quantum': 100,
'ldp-inbound-read-job-time-quantum': 1000,
'ldp-outbound-read-job-loop-quantum': 100,
'ldp-outbound-read-job-time-quantum': 1000,
'ldp-read-job-loop-quantum': 100,
'ldp-read-job-time-quantum': 1000,
'ldp-write-job-loop-quantum': 100,
'ldp-write-job-time-quantum': 1000
},
'ldp-label-allocation': {
'ldp-global-label-current-allocs': 0,
'ldp-label-alloc-failure': 0,
'ldp-label-current-allocs': 0,
'ldp-label-total-allocs': 0,
'ldp-label-total-frees': 0
},
'ldp-loopback-if-added': 'no',
'ldp-message-id': 4,
'ldp-mtu-discovery': 'disabled',
'ldp-p2mp': {
'ldp-p2mp-no-rsvp-tunneling-enabled': 'disabled',
'ldp-p2mp-recursive-route-enabled': 'disabled'
},
'ldp-p2mp-transit-lsp-chaining': 'disabled',
'ldp-reference-count': 2,
'ldp-route-acknowledgement': 'enabled',
'ldp-route-preference': 9,
'ldp-router-id': '10.204.14.100',
'ldp-session-count': {
'ldp-control-mode': 'ordered',
'ldp-retention-mode': 'liberal',
'ldp-session-nonexistent': 1
},
'ldp-session-protect-overview': {
'ldp-session-protect': 'disabled',
'ldp-session-protect-timeout': 0
},
'ldp-sr-mapping-client': 'disabled',
'ldp-strict-targeted-hellos': 'disabled',
'ldp-te-overview': {
'ldp-te-bgp-igp': 'disabled',
'ldp-te-both-ribs': 'disabled',
'ldp-te-mpls-forwarding': 'disabled'
},
'ldp-timer-overview': {
'ldp-instance-keepalive-interval': 10,
'ldp-instance-keepalive-timeout': 30,
'ldp-instance-label-withdraw-delay': 60,
'ldp-instance-link-hello-hold-time': 15,
'ldp-instance-link-hello-interval': 5,
'ldp-instance-link-protection-timeout': 120,
'ldp-instance-make-before-break-switchover-delay': 3,
'ldp-instance-make-before-break-timeout': 30,
'ldp-instance-targeted-hello-hold-time': 45,
'ldp-instance-targeted-hello-interval': 15
},
'ldp-transit-lsp-route-stats': 'disabled',
'ldp-transport-preference': 'IPv4',
'ldp-unicast-transit-lsp-chaining': 'disabled'
}
}
}
golden_output_5 = {'execute.return_value': '''
show ldp overview
Instance: master
Router ID: 10.204.1.100
Message id: 4
Configuration sequence: 1
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
Capabilities enabled: none
Protocol modes:
Distribution: unsolicited
Retention: liberal
Control: ordered
Sessions:
Connecting: 1
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60
Graceful restart:
Restart: enabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protecton timeout: 0
Interface addresses advertising:
10.1.2.2
'''}
golden_parsed_output_5 = {
'ldp-overview-information': {
'ldp-overview': {
'ldp-configuration-sequence': 1,
'ldp-deaggregate': 'disabled',
'ldp-explicit-null': 'disabled',
'ldp-gr-overview': {
'ldp-gr-helper': 'enabled',
'ldp-gr-max-neighbor-reconnect-time': 120000,
'ldp-gr-max-neighbor-recovery-time': 240000,
'ldp-gr-reconnect-time': 60000,
'ldp-gr-recovery-time': 160000,
'ldp-gr-restart': 'enabled',
'ldp-gr-restarting': 'false'
},
'ldp-igp-overview': {
'ldp-igp-sync-session-up-delay': 10,
'ldp-tracking-igp-metric': 'disabled'
},
'ldp-instance-capability': {
'ldp-capability': 'none'
},
'ldp-instance-name': 'master',
'ldp-interface-address': {
'interface-address': '10.1.2.2'
},
'ldp-ipv6-tunneling': 'disabled',
'ldp-loopback-if-added': 'no',
'ldp-message-id': 4,
'ldp-p2mp-transit-lsp-chaining': 'disabled',
'ldp-protocol-modes': {
'ldp-control-mode': 'ordered',
'ldp-distribution-mode': 'unsolicited',
'ldp-retention-mode': 'liberal'
},
'ldp-route-preference': 9,
'ldp-router-id': '10.204.1.100',
'ldp-session-count': {
'ldp-session-connecting': 1
},
'ldp-session-protect-overview': {
'ldp-session-protect': 'disabled',
'ldp-session-protect-timeout': 0
},
'ldp-strict-targeted-hellos': 'disabled',
'ldp-te-overview': {
'ldp-te-bgp-igp': 'disabled',
'ldp-te-both-ribs': 'disabled',
'ldp-te-mpls-forwarding': 'disabled'
},
'ldp-timer-overview': {
'ldp-instance-keepalive-interval': 10,
'ldp-instance-keepalive-timeout': 30,
'ldp-instance-label-withdraw-delay': 60,
'ldp-instance-link-hello-hold-time': 15,
'ldp-instance-link-hello-interval': 5,
'ldp-instance-targeted-hello-hold-time': 45,
'ldp-instance-targeted-hello-interval': 15
},
'ldp-transit-lsp-route-stats': 'disabled',
'ldp-unicast-transit-lsp-chaining': 'disabled'
}
}
}
golden_parsed_output_6 = {
"ldp-overview-information": {
"ldp-overview": {
"ldp-auto-targeted-session": {
"ldp-auto-targeted-dyn-tun-ses-count": 0,
"ldp-auto-targeted-session-enabled": "disabled"
},
"ldp-bgp-export": "enabled",
"ldp-configuration-sequence": 2,
"ldp-control-mode": "ordered",
"ldp-deaggregate": "disabled",
"ldp-explicit-null": "disabled",
"ldp-gr-overview": {
"ldp-gr-helper": "enabled",
"ldp-gr-max-neighbor-reconnect-time": 120000,
"ldp-gr-max-neighbor-recovery-time": 240000,
"ldp-gr-reconnect-time": 60000,
"ldp-gr-recovery-time": 160000,
"ldp-gr-restart": "enabled",
"ldp-gr-restarting": "false"
},
"ldp-igp-overview": {
"ldp-igp-sync-session-up-delay": 10,
"ldp-tracking-igp-metric": "disabled"
},
"ldp-inet": "enabled",
"ldp-instance-capability": {
"ldp-capability": "none"
},
"ldp-instance-egress-fec-capability": {
"ldp-egress-fec-capability": "entropy-label-capability"
},
"ldp-instance-name": "master",
"ldp-interface-address": {
"interface-address": "10.169.14.157"
},
"ldp-ipv6-tunneling": "disabled",
"ldp-job-overview": {
"ldp-inbound-read-job-loop-quantum": 100,
"ldp-inbound-read-job-time-quantum": 1000,
"ldp-outbound-read-job-loop-quantum": 100,
"ldp-outbound-read-job-time-quantum": 1000,
"ldp-read-job-loop-quantum": 100,
"ldp-read-job-time-quantum": 1000,
"ldp-write-job-loop-quantum": 100,
"ldp-write-job-time-quantum": 1000
},
"ldp-label-allocation": {
"ldp-global-label-current-allocs": 0,
"ldp-label-alloc-failure": 0,
"ldp-label-current-allocs": 3,
"ldp-label-total-allocs": 7,
"ldp-label-total-frees": 4
},
"ldp-loopback-if-added": "no",
"ldp-message-id": 10,
"ldp-mtu-discovery": "disabled",
"ldp-p2mp": {
"ldp-p2mp-no-rsvp-tunneling-enabled": "disabled",
"ldp-p2mp-recursive-route-enabled": "disabled"
},
"ldp-p2mp-transit-lsp-chaining": "disabled",
"ldp-reference-count": 3,
"ldp-retention-mode": "liberal",
"ldp-route-acknowledgement": "enabled",
"ldp-route-preference": 9,
"ldp-router-id": "10.169.14.240",
"ldp-session-count": {
"ldp-control-mode": "ordered",
"ldp-retention-mode": "liberal",
"ldp-session-nonexistent": 1
},
"ldp-session-operational": 1,
"ldp-session-protect-overview": {
"ldp-session-protect": "disabled",
"ldp-session-protect-timeout": 0
},
"ldp-sr-mapping-client": "disabled",
"ldp-strict-targeted-hellos": "disabled",
"ldp-te-overview": {
"ldp-te-bgp-igp": "disabled",
"ldp-te-both-ribs": "disabled",
"ldp-te-mpls-forwarding": "disabled"
},
"ldp-timer-overview": {
"ldp-instance-keepalive-interval": 10,
"ldp-instance-keepalive-timeout": 30,
"ldp-instance-label-withdraw-delay": 60,
"ldp-instance-link-hello-hold-time": 15,
"ldp-instance-link-hello-interval": 5,
"ldp-instance-link-protection-timeout": 120,
"ldp-instance-make-before-break-switchover-delay": 3,
"ldp-instance-make-before-break-timeout": 30,
"ldp-instance-targeted-hello-hold-time": 45,
"ldp-instance-targeted-hello-interval": 15
},
"ldp-transit-lsp-route-stats": "disabled",
"ldp-transport-preference": "IPv4",
"ldp-unicast-transit-lsp-chaining": "disabled"
}
}
}
golden_output_6 = {'execute.return_value': '''
show ldp overview
Instance: master
Reference count: 3
Router ID: 10.169.14.240
LDP inet: enabled
Transport preference: IPv4
Message id: 10
Configuration sequence: 2
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
LDP route acknowledgement: enabled
BGP export: enabled
LDP mtu discovery: disabled
LDP SR Mapping Client: disabled
Capabilities enabled: none
Egress FEC capabilities enabled: entropy-label-capability
Downstream unsolicited Sessions:
Nonexistent: 1
Retention: liberal
Control: ordered
Operational: 1
Retention: liberal
Control: ordered
Auto targeted sessions:
Auto targeted: disabled
Dynamic tunnel session count: 0
P2MP:
Recursive route: disabled
No rsvp tunneling: disabled
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60, Make before break timeout: 30
Make before break switchover delay: 3
Link protection timeout: 120
Graceful restart:
Restart: enabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protection timeout: 0
Interface addresses advertising:
10.169.14.121
10.169.14.157
LDP Job:
Read job time quantum: 1000, Write job time quantum: 1000
Read job loop quantum: 100, Write job loop quantum: 100
Backup inbound read job time quantum: 1000, Backup outbound read job time quantum: 1000
Backup inbound read job loop quantum: 100, Backup outbound read job loop quantum: 100
Label allocation:
Current number of LDP labels allocated: 3
Total number of LDP labels allocated: 7
Total number of LDP labels freed: 4
Total number of LDP label allocation failure: 0
Current number of labels allocated by all protocols: 0
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPOverview(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_golden_2(self):
self.device = Mock(**self.golden_output_2)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_2)
def test_golden_3(self):
self.device = Mock(**self.golden_output_3)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_3)
def test_golden_4(self):
self.device = Mock(**self.golden_output_4)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_4)
def test_golden_5(self):
self.device = Mock(**self.golden_output_5)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_5)
def test_golden_6(self):
self.device = Mock(**self.golden_output_6)
obj = ShowLDPOverview(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_6)
# =================================
# Unit test for 'show ldp session {ipaddress} detail'
# =================================
class TestShowLDPSessionIpaddressDetail(unittest.TestCase):
'''unit test for "show ldp session {ipaddress} detail'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-session-information": {
"ldp-session": {
"ldp-connection-state": "Open",
"ldp-graceful-restart-local": "disabled",
"ldp-graceful-restart-remote": "disabled",
"ldp-holdtime": "30",
"ldp-keepalive-interval": "10",
"ldp-keepalive-time": "3",
"ldp-local-address": "10.34.2.250",
"ldp-local-helper-mode": "enabled",
"ldp-local-label-adv-mode": "Downstream unsolicited",
"ldp-local-maximum-reconnect": "120000",
"ldp-local-maximum-recovery": "240000",
"ldp-mtu-discovery": "disabled",
"ldp-neg-label-adv-mode": "Downstream unsolicited",
"ldp-neighbor-address": "10.169.14.240",
"ldp-neighbor-count": "1",
"ldp-neighbor-types": {
"ldp-neighbor-type": "discovered"
},
"ldp-remaining-time": "23",
"ldp-remote-address": "10.169.14.240",
"ldp-remote-helper-mode": "enabled",
"ldp-remote-label-adv-mode": "Downstream unsolicited",
"ldp-retry-interval": "1",
"ldp-session-address": {
"interface-address": "10.169.14.157"
},
"ldp-session-capabilities-advertised": {
"ldp-capability": "none"
},
"ldp-session-capabilities-received": {
"ldp-capability": "none"
},
"ldp-session-flags": {
"ldp-session-flag": "none"
},
"ldp-session-id": "10.34.2.250:0--10.169.14.240:0",
"ldp-session-max-pdu": "4096",
"ldp-session-nsr-state": "Not in sync",
"ldp-session-protection": {
"ldp-session-protection-state": "disabled"
},
"ldp-session-role": "Passive",
"ldp-session-state": "Operational",
"ldp-up-time": "00:00:47"
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp session 10.169.14.240 detail
Address: 10.169.14.240, State: Operational, Connection: Open, Hold time: 23
Session ID: 10.34.2.250:0--10.169.14.240:0
Next keepalive in 3 seconds
Passive, Maximum PDU: 4096, Hold time: 30, Neighbor count: 1
Neighbor types: discovered
Keepalive interval: 10, Connect retry interval: 1
Local address: 10.34.2.250, Remote address: 10.169.14.240
Up for 00:00:47
Capabilities advertised: none
Capabilities received: none
Protection: disabled
Session flags: none
Local - Restart: disabled, Helper mode: enabled
Remote - Restart: disabled, Helper mode: enabled
Local maximum neighbor reconnect time: 120000 msec
Local maximum neighbor recovery time: 240000 msec
Local Label Advertisement mode: Downstream unsolicited
Remote Label Advertisement mode: Downstream unsolicited
Negotiated Label Advertisement mode: Downstream unsolicited
MTU discovery: disabled
Nonstop routing state: Not in sync
Next-hop addresses received:
10.169.14.157
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpSessionIpaddressDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(ipaddress='10.169.14.240')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpSessionIpaddressDetail(device=self.device)
parsed_output = obj.parse(ipaddress='10.169.14.240')
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmahlik/learning-flask",
"score": 3
} |
#### File: src/flaskr/form.py
```python
import os
from flask import (
Blueprint,
flash,
render_template,
request,
)
bp = Blueprint("form", __name__, url_prefix="/form")
@bp.route("/form", methods=("GET", "POST"))
def form():
if request.method == "POST":
image = os.path.join("/", "static", "images", f"{request.form['image']}.jpg")
error = None
if not image:
error = "image is required."
if error is None:
return render_template("form/image.html", image=image)
flash(error)
return render_template("form/form.html")
``` |
{
"source": "jmahlik/sagemaker-experiments",
"score": 2
} |
#### File: src/smexperiments/trial.py
```python
from smexperiments import api_types, _base_types, trial_component, _utils, tracker
import time
class Trial(_base_types.Record):
"""
An execution of a data-science workflow with an experiment.
Consists of a list of trial component objects, which document individual activities within the workflow.
Examples:
.. code-block:: python
from smexperiments import trial, experiment, tracker
my_experiment = experiment.Experiment.create(experiment_name='AutoML')
my_trial = trial.Trial.create('AutoML')
my_tracker = tracker.Tracker.create()
# log hyper parameter of learning rate
my_tracker.log_parameter('learning_rate', 0.01)
my_trial.add_trial_component(my_tracker)
for trial_component in my_trial.list_trial_components():
print(trial_component)
my_trial.remove_trial_component(my_tracker)
my_trial.delete()
Attributes:
trial_name (str): The name of the trial.
experiment_name (str): The name of the trial's experiment.
tags (List[dict[str, str]]): A list of tags to associate with the trial.
"""
trial_name = None
experiment_name = None
tags = None
_boto_create_method = "create_trial"
_boto_load_method = "describe_trial"
_boto_delete_method = "delete_trial"
_boto_update_method = "update_trial"
_boto_update_members = ["trial_name", "display_name"]
_boto_delete_members = ["trial_name"]
MAX_DELETE_ALL_ATTEMPTS = 3
@classmethod
def _boto_ignore(cls):
return super(Trial, cls)._boto_ignore() + ["CreatedBy"]
def save(self):
"""Save the state of this Trial to SageMaker.
Returns:
dict: Update trial response.
"""
return self._invoke_api(self._boto_update_method, self._boto_update_members)
def delete(self):
"""Delete this Trial from SageMaker.
Requires that this Trial contains no TrialComponents. Individual TrialComponents can be removed by
calling :meth:`~smexperiments.trial.Trial.remove_trial_component`.
Returns:
dict: Delete trial response.
"""
return self._invoke_api(self._boto_delete_method, self._boto_delete_members)
@classmethod
def load(cls, trial_name, sagemaker_boto_client=None):
"""Load an existing trial and return a ``Trial`` object.
Args:
trial_name: (str): Name of the Trial.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
Returns:
smexperiments.trial.Trial: A SageMaker ``Trial`` object
"""
return super(Trial, cls)._construct(
cls._boto_load_method, trial_name=trial_name, sagemaker_boto_client=sagemaker_boto_client
)
@classmethod
def create(cls, experiment_name, trial_name=None, sagemaker_boto_client=None, trial_components=None, tags=None):
"""Create a new trial and return a ``Trial`` object.
Args:
experiment_name: (str): Name of the experiment to create this trial in.
trial_name: (str, optional): Name of the Trial. If not specified, an auto-generated name will be used.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
trial_components (list): A list of trial component names, trial components, or trial component trackers.
tags (List[dict[str, str]]): A list of tags to associate with the trial.
Returns:
smexperiments.trial.Trial: A SageMaker ``Trial`` object
"""
trial_name = trial_name or _utils.name("Trial")
trial = super(Trial, cls)._construct(
cls._boto_create_method,
trial_name=trial_name,
experiment_name=experiment_name,
tags=tags,
sagemaker_boto_client=sagemaker_boto_client,
)
if trial_components:
for tc in trial_components:
trial.add_trial_component(tc)
return trial
@classmethod
def list(
cls,
experiment_name=None,
trial_component_name=None,
created_before=None,
created_after=None,
sort_by=None,
sort_order=None,
sagemaker_boto_client=None,
):
"""List all trials matching the specified criteria.
Args:
experiment_name (str, optional): Name of the experiment. If specified, only trials in
the experiment will be returned.
trial_component_name (str, optional): Name of the trial component. If specified, only
trials with this trial component name will be returned.
created_before (datetime.datetime, optional): Return trials created before this instant.
created_after (datetime.datetime, optional): Return trials created after this instant.
sort_by (str, optional): Which property to sort results by. One of 'Name',
'CreationTime'.
sort_order (str, optional): One of 'Ascending', or 'Descending'.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
Returns:
collections.Iterator[smexperiments.trial.TrialSummary]: An iterator over trials
matching the specified criteria.
"""
return super(Trial, cls)._list(
"list_trials",
api_types.TrialSummary.from_boto,
"TrialSummaries",
experiment_name=experiment_name,
trial_component_name=trial_component_name,
created_before=created_before,
created_after=created_after,
sort_by=sort_by,
sort_order=sort_order,
sagemaker_boto_client=sagemaker_boto_client,
)
@classmethod
def search(
cls,
search_expression=None,
sort_by=None,
sort_order=None,
max_results=None,
sagemaker_boto_client=None,
):
"""
Search experiments. Returns SearchResults in the account matching the search criteria.
Args:
search_expression: (dict, optional): A Boolean conditional statement. Resource objects
must satisfy this condition to be included in search results. You must provide at
least one subexpression, filter, or nested filter.
sort_by (str, optional): The name of the resource property used to sort the SearchResults.
The default is LastModifiedTime
sort_order (str, optional): How SearchResults are ordered. Valid values are Ascending or
Descending . The default is Descending .
max_results (int, optional): The maximum number of results to return in a SearchResponse.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker. If not
supplied, a default boto3 client will be used.
Returns:
collections.Iterator[SearchResult] : An iterator over search results matching the search criteria.
"""
return super(Trial, cls)._search(
search_resource="ExperimentTrial",
search_item_factory=api_types.TrialSearchResult.from_boto,
search_expression=None if search_expression is None else search_expression.to_boto(),
sort_by=sort_by,
sort_order=sort_order,
max_results=max_results,
sagemaker_boto_client=sagemaker_boto_client,
)
def add_trial_component(self, tc):
"""Add the specified trial component to this ``Trial``.
A trial component may belong to many trials and a trial may have many trial components.
Args:
tc (str or Tracker or TrialComponent or TrialComponentSummary): The trial component to
add. Can be one of a Tracker instance, a TrialComponent instance, or a string containing
the name of the trial component to add.
"""
if isinstance(tc, tracker.Tracker):
trial_component_name = tc.trial_component.trial_component_name
elif isinstance(tc, trial_component.TrialComponent):
trial_component_name = tc.trial_component_name
elif isinstance(tc, api_types.TrialComponentSummary):
trial_component_name = tc.trial_component_name
else:
trial_component_name = str(tc)
self.sagemaker_boto_client.associate_trial_component(
TrialName=self.trial_name, TrialComponentName=trial_component_name
)
def remove_trial_component(self, tc):
"""Remove the specified trial component from this trial.
Args:
tc (str or Tracker or TrialComponent or TrialComponentSummary): The trial component to
remove. Can be one of a Tracker instance, a TrialComponent instance, or a string
containing the name of the trial component to remove.
"""
if isinstance(tc, tracker.Tracker):
trial_component_name = tc.trial_component.trial_component_name
elif isinstance(tc, trial_component.TrialComponent):
trial_component_name = tc.trial_component_name
elif isinstance(tc, api_types.TrialComponentSummary):
trial_component_name = tc.trial_component_name
else:
trial_component_name = str(tc)
self.sagemaker_boto_client.disassociate_trial_component(
TrialName=self.trial_name, TrialComponentName=trial_component_name
)
def list_trial_components(
self, created_before=None, created_after=None, sort_by=None, sort_order=None, max_results=None, next_token=None
):
"""List trial components in this trial matching the specified criteria.
Args:
created_before (datetime.datetime, optional): Return trials created before this instant.
created_after (datetime.datetime, optional): Return trials created after this instant.
sort_by (str, optional): Which property to sort results by. One of 'Name',
'CreationTime'.
sort_order (str, optional): One of 'Ascending', or 'Descending'.
max_results (int, optional): maximum number of trial components to retrieve
next_token (str, optional): token for next page of results
Returns:
collections.Iterator[smexperiments.api_types.TrialComponentSummary] : An iterator over
trials matching the criteria.
"""
return trial_component.TrialComponent.list(
trial_name=self.trial_name,
created_before=created_before,
created_after=created_after,
sort_by=sort_by,
sort_order=sort_order,
max_results=max_results,
next_token=next_token,
sagemaker_boto_client=self.sagemaker_boto_client,
)
def delete_all(self, action):
"""
Force to delete the trial and associated trial components under.
Args:
action (str): pass in string '--force' to confirm delete the trial and all associated trial components.
"""
if action != "--force":
raise ValueError(
"Must confirm with string '--force' in order to delete the trial and " "associated trial components."
)
delete_attempt_count = 0
last_exception = None
while True:
if delete_attempt_count == self.MAX_DELETE_ALL_ATTEMPTS:
raise Exception("Failed to delete, please try again.") from last_exception
try:
for trial_component_summary in self.list_trial_components():
tc = trial_component.TrialComponent.load(
sagemaker_boto_client=self.sagemaker_boto_client,
trial_component_name=trial_component_summary.trial_component_name,
)
tc.delete(force_disassociate=True)
# to prevent throttling
time.sleep(1.2)
self.delete()
break
except Exception as ex:
last_exception = ex
finally:
delete_attempt_count = delete_attempt_count + 1
``` |
{
"source": "jmahmud/MyDjangoAngularSeed",
"score": 3
} |
#### File: MyDjangoAngularProject/restapi/models.py
```python
from django.db import models
# Create your models here.
class Album(models.Model):
name = models.CharField(max_length=200)
def __str__(self): # Python 3: def __str__(self):
return self.name
class Photo(models.Model):
url = models.CharField(max_length=200)
name = models.CharField(max_length=200)
album = models.ForeignKey(Album)
def __str__(self): # Python 3: def __str__(self):
return self.name
``` |
{
"source": "jmaidens/Codility",
"score": 3
} |
#### File: jmaidens/Codility/Fish.py
```python
def solution(A, B):
N = len(A)
survivor_count = 0
fish_downstream = []
for i in range(N):
if B[i] == 1:
fish_downstream.append(A[i])
if B[i] == 0:
# if there are no fish left downstream then fish i survives
if len(fish_downstream) == 0:
survivor_count += 1
# otherwise, it swims downstream eating the downstream fish until
# it either gets eaten or reaches the end and survives
else:
while len(fish_downstream) > 0 and fish_downstream[-1] < A[i]:
fish_downstream.pop()
if len(fish_downstream) == 0:
survivor_count += 1
# all the fish in the fish_downstream list that have not
# been eaten after all the fish swimming upstream pass also survive
survivor_count = survivor_count + len(fish_downstream)
return survivor_count
```
#### File: jmaidens/Codility/PermMissingElem.py
```python
def solution(A):
N = len(A)
# The array with the missing entry added should sum to (N+2)(N+1)/2
# so the missing entry can be determined by subtracting the sum of
# the entries of A
missing_element = (N+2)*(N+1)/2 - sum(A)
return missing_element
``` |
{
"source": "Jmaihuire/MTH_9821",
"score": 3
} |
#### File: Jmaihuire/MTH_9821/NMF_Binomial_Tree.py
```python
from __future__ import division
import numpy as np
from Option import *
from NMF_Black_Scholes import *
import copy
import matplotlib as plt
class BinomialTree:
def __init__(self, N, r, Op):
'''
Generates the binomial tree framework for a certain option
:param N: Time steps
:param r: risk free interest rate in units
:param Op: An Option class instance
'''
self.tstep = N
self.riskfree = r
dt = Op.maturity / N
self.u = np.exp(Op.vol * np.sqrt(dt))
self.d = 1 / self.u
self.p = (np.exp((r - Op.div_rate) * dt) - self.d) / (self.u - self.d)
def Binomial_Tree_Pricing(Op, BMT, Greek = False):
'''
Calculate the risk neutral price of a certain option with general Binomial Tree model.
:param Op: An instance from class Option
:param BMT: An binomial tree instance with step N, rate r, upfactor u and downfactor d
:return: The risk-neutral binomial tree price (by taking risk neutral expectation)
'''
S0, K, T, q = Op.spot, Op.strike, Op.maturity, Op.div_rate
N, r, u, d, p = BMT.tstep, BMT.riskfree, BMT.u, BMT.d, BMT.p
dt = T / N
# p = (np.exp((r - q) * dt) - d) / (u - d) # risk neutral probability for the stock price to go up
fv = Op.finalvalue(N, u, d)
for j in range(N - 1, -1, -1):
for i in range(0, j + 1):
if Op.ae == 'EU':
fv[i] = np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1])
elif Op.ae == 'AM':
Sc = S0 * u ** (j - i) * d ** i
if Op.cp == 'C':
fv[i] = max(np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1]), Sc - K)
elif Op.cp == 'P':
fv[i] = max(np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1]), K - Sc)
elif Op.ae == 'DNO': #Down and Out options
B = Op.Barrier
Sc = S0 * u ** (j - i) * d ** i
if Op.cp == 'C':
fv[i] = np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1]) * int(Sc > B)
elif Op.cp == 'P':
fv[i] = np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1]) * int(Sc > B)
if j == 2:
fv2 = [float(fv[0]), float(fv[1]), float(fv[2])]
fs2 = [S0 * u ** 2, S0 * u * d, S0 * d ** 2]
if j == 1:
fv1 = [float(fv[0]), float(fv[1])]
fs1 = [S0 * u, S0 * d]
if not Greek:
return fv[0][0]
else:
Delta = (fv1[0] - fv1[1]) / (fs1[0] - fs1[1])
Gamma = ((fv2[0] - fv2[1]) / (fs2[0] - fs2[1]) - (fv2[1] - fv2[2]) / (fs2[1] - fs2[2])) / ((fs2[0] - fs2[2]) / 2)
Theta = (fv2[1] - fv[0][0]) / (2 * dt)
return (fv[0][0], Delta, Gamma, Theta)
def Avg_Binomial_Tree_Pricing(Op, BMT, Greek = False):
'''
Calculate the risk neutral price of a certain option with Average Binomial Tree model.
:param Op: An instance from class Option
:param BMT: An binomial tree instance with step N, rate r, upfactor u and downfactor d
:return: The risk-neutral binomial tree price (by taking risk neutral expectation)
'''
BMT_1 = BinomialTree(BMT.tstep + 1, BMT.riskfree, Op)
if Greek:
old = Binomial_Tree_Pricing(Op, BMT, Greek)
new = Binomial_Tree_Pricing(Op, BMT_1, Greek)
avgbmt_tuple = tuple([(old[i] + new[i]) / 2 for i in range(4)])
return avgbmt_tuple
else:
return (Binomial_Tree_Pricing(Op, BMT) + Binomial_Tree_Pricing(Op, BMT_1)) / 2
def Binomial_Black_Scholes(Op, BMT, Greek = False):
'''
Calculate the risk neutral price of a certain option with Binomial Black Scholes model.
:param Op: An instance from class Option
:param BMT: An binomial tree instance with step N, rate r, upfactor u and downfactor d
:param Greek: A boolean variable whether the output should contain greeks
:return:
'''
S0, K, T, q = Op.spot, Op.strike, Op.maturity, Op.div_rate
N, r, u, d, p = BMT.tstep, BMT.riskfree, BMT.u, BMT.d, BMT.p
dt = T / N
# Generate the first step option value using BS formula
fv = np.zeros([N, 1])
Op.maturity = dt
Op.spot = u ** (N - 1) * S0
fv[0] = Black_Scholes_Pricing(Op, r)
for i in range(1, N):
Op.spot *= d / u
if Op.ae == 'EU':
fv[i] = Black_Scholes_Pricing(Op, r)
if Op.ae == 'AM':
if Op.cp == 'C':
fv[i] = max(Black_Scholes_Pricing(Op, r), Op.spot - K)
elif Op.cp == 'P':
fv[i] = max(Black_Scholes_Pricing(Op, r), K - Op.spot)
# print fv
for j in range(N - 2, -1, -1):
for i in range(0, j + 1):
if Op.ae == 'EU':
fv[i] = np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1])
if Op.ae == 'AM':
Sc = S0 * u ** (j - i) * d ** i
if Op.cp == 'C':
fv[i] = max(np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1]), Sc - K)
elif Op.cp == 'P':
fv[i] = max(np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1]), K - Sc)
# fv[i] = np.exp(-r * dt) * (p * fv[i] + (1 - p) * fv[i + 1])
# if j == 3:
# print Sc, K, K - Sc
if j == 2:
fv2 = [float(fv[0]), float(fv[1]), float(fv[2])]
fs2 = [S0 * u ** 2, S0 * u * d, S0 * d ** 2]
if j == 1:
fv1 = [float(fv[0]), float(fv[1])]
fs1 = [S0 * u, S0 * d]
# print fv
Op.spot = S0
Op.maturity = T
if not Greek:
return fv[0][0]
else:
Delta = (fv1[0] - fv1[1]) / (fs1[0] - fs1[1])
Gamma = ((fv2[0] - fv2[1]) / (fs2[0] - fs2[1]) - (fv2[1] - fv2[2]) / (fs2[1] - fs2[2])) / ((fs2[0] - fs2[2]) / 2)
Theta = (fv2[1] - fv[0][0]) / (2 * dt)
return (fv[0][0], Delta, Gamma, Theta)
def Binomial_Black_Scholes_Richardson(Op, BMT, Greek = False):
'''
Calculate the risk neutral price of a certain option using Binomial Black Scholes model with Richardson's extrapolation.
:param Op: An instance from class Option
:param BMT: An binomial tree instance with step N, rate r, upfactor u and downfactor d
:return:
'''
N, r = BMT.tstep, BMT.riskfree
BMT_2 = BinomialTree(int(N / 2), r, Op)
if Greek:
full = Binomial_Black_Scholes(Op, BMT, Greek)
half = Binomial_Black_Scholes(Op, BMT_2, Greek)
pbbsr_tuple = tuple([full[i] * 2 - half[i] for i in range(4)])
return pbbsr_tuple
else:
return Binomial_Black_Scholes(Op, BMT) * 2 - Binomial_Black_Scholes(Op, BMT_2)
def implied_vol(opt, r, p_m, sigma_0, sigma_n1, tol=10 ** -4, N=2500):
'''
Compute the implied volatility with secant method on a binomial tree
:param opt: Option, whose implied vol need to be determined
:param sigma_0: initial_guess 0
:param sigma_n1: initial_guess -1
:param tol: tolerance of iteration
:param p_m: market price of the option
:param N: time step of the tree, default 2500
:return: the implied vol
'''
def f(sigma):
opt.vol = sigma
bmt = BinomialTree(N, r, opt)
p_sigma = Binomial_Tree_Pricing(opt, bmt)
return p_sigma - p_m
sigma_old, sigma_new = sigma_n1, sigma_0
ic = 0
while abs(sigma_new - sigma_old) > tol:
sigma_oldest = sigma_old
sigma_old = sigma_new
sigma_new = sigma_old - f(sigma_old) * (sigma_old - sigma_oldest) / (f(sigma_old) - f(sigma_oldest))
ic += 1
print ic, sigma_new
# print "ic=", ic
return sigma_new
if __name__ == "__main__":
eup = Option(S0 = 41, K = 40, T = 1, q = 0.01, sigma = 0.3, cp = 'P', ae = 'EU')
amp = Option(S0 = 41, K = 40, T = 1, q = 0.01, sigma = 0.3, cp = 'P', ae = 'AM')
bmt = BinomialTree(N = 5, r = 0.03, Op = amp)
# peup = Binomial_Tree_Pricing(eup, bmt)
# pavgeup = Avg_Binomial_Tree_Pricing(eup, bmt)
# pbbs = Binomial_Black_Scholes(eup, bmt)
# pbbsr = Binomial_Black_Scholes_Richardson(eup, bmt)
# print peup
# print pavgeup
# print pbbs
# print pbbsr
#(peup, deltae, gammae, thetae) = Binomial_Tree_Pricing(eup, bmt, True)
#(pamp, deltaa, gammaa, thetaa) = Binomial_Tree_Pricing(amp, bmt, True)
#print [peup, deltae, gammae, thetae]
#print [pamp, deltaa, gammaa, thetaa]
# pabt, Deltaabt, Gammaabt, Thetaabt = Avg_Binomial_Tree_Pricing(eup, bmt, True)
# print pabt, Deltaabt, Gammaabt, Thetaabt
pbbs, Deltabbs, Gammabbs, Thetabbs = Binomial_Black_Scholes(amp, bmt, True)
print pbbs, Deltabbs, Gammabbs, Thetabbs
# pbbsr = Binomial_Black_Scholes_Richardson(eup, bmt, False)
# print pbbsr
``` |
{
"source": "jmailloux/pyEX",
"score": 2
} |
#### File: pyEX/common/timing.py
```python
from __future__ import print_function
import os
import os.path
import tempfile
import pytz
from temporalcache import expire, interval
_PYEX_CACHE_FOLDER = os.path.abspath(os.path.join(tempfile.gettempdir(), "pyEX"))
_UTC = pytz.UTC
_EST = pytz.timezone("EST")
def _expire(**temporal_args):
if not os.path.exists(_PYEX_CACHE_FOLDER):
os.makedirs(_PYEX_CACHE_FOLDER)
def _wrapper(foo):
temporal_args["persistent"] = os.path.join(_PYEX_CACHE_FOLDER, foo.__name__)
return expire(**temporal_args)(foo)
return _wrapper
def _interval(**temporal_args):
if not os.path.exists(_PYEX_CACHE_FOLDER):
os.makedirs(_PYEX_CACHE_FOLDER)
def _wrapper(foo):
temporal_args["persistent"] = os.path.join(_PYEX_CACHE_FOLDER, foo.__name__)
return interval(**temporal_args)(foo)
return _wrapper
```
#### File: premium/fraudfactors/__init__.py
```python
from functools import wraps
from ...common import _UTC, _expire
from ...stocks import timeSeries, timeSeriesDF
@_expire(hour=8, tz=_UTC)
def _base(id, symbol="", **kwargs):
"""internal"""
kwargs["id"] = id
kwargs["key"] = symbol or kwargs.pop("key", "")
return timeSeries(**kwargs)
@_expire(hour=8, tz=_UTC)
def _baseDF(id, symbol="", **kwargs):
"""internal"""
kwargs["id"] = id
kwargs["key"] = symbol or kwargs.pop("key", "")
return timeSeriesDF(**kwargs)
@wraps(timeSeries)
def similarityIndex(symbol="", **kwargs):
"""The Similarity Index quantifies the textual differences between a given company’s annual or quarterly filings on an “as disclosed” basis. For example, a similarity score is calculated by comparing a company’s 2017 10-K with the 2016 10-K; or a company’s 2017 Q3 10-Q compared to the 2016 Q3 10-Q a year ago.
Intuitively, firms breaking from routine phrasing and content in mandatory disclosures give clues about their future performance which eventually drive stock returns over time. This data set captures significant changes in disclosure texts in the form of low similarity scores.
Academic research has shown that a portfolio that shorts low similarity scores and longs high similarity scores earns non-trivial and uncorrelated returns over a period of 12-18 months.
Data available from 2001 with coverage of about 23,000 equities
https://iexcloud.io/docs/api/#similiarity-index
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_FRAUD_FACTORS_SIMILARITY_INDEX", symbol=symbol, **kwargs)
@wraps(timeSeries)
def similarityIndexDF(symbol="", **kwargs):
"""The Similarity Index quantifies the textual differences between a given company’s annual or quarterly filings on an “as disclosed” basis. For example, a similarity score is calculated by comparing a company’s 2017 10-K with the 2016 10-K; or a company’s 2017 Q3 10-Q compared to the 2016 Q3 10-Q a year ago.
Intuitively, firms breaking from routine phrasing and content in mandatory disclosures give clues about their future performance which eventually drive stock returns over time. This data set captures significant changes in disclosure texts in the form of low similarity scores.
Academic research has shown that a portfolio that shorts low similarity scores and longs high similarity scores earns non-trivial and uncorrelated returns over a period of 12-18 months.
Data available from 2001 with coverage of about 23,000 equities
https://iexcloud.io/docs/api/#similiarity-index
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_FRAUD_FACTORS_SIMILARITY_INDEX", symbol=symbol, **kwargs)
@wraps(timeSeries)
def nonTimelyFilings(symbol="", **kwargs):
"""The data set records the date in which a firm files a Non-Timely notification with the SEC.
Companies regulated by the SEC are required to file a Non-Timely notification when they are unable to file their annual or quarterly disclosures on time. In most cases, the inability to file annual/quarterly disclosures on time is a red-flag and thus a valuable signal for algorithmic strategies and fundamental investing alike.
Data available from 1994 with coverage of about 18,000 equities
https://iexcloud.io/docs/api/#non-timely-filings
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_FRAUD_FACTORS_NON_TIMELY_FILINGS", symbol=symbol, **kwargs)
@wraps(timeSeries)
def nonTimelyFilingsDF(symbol="", **kwargs):
"""The data set records the date in which a firm files a Non-Timely notification with the SEC.
Companies regulated by the SEC are required to file a Non-Timely notification when they are unable to file their annual or quarterly disclosures on time. In most cases, the inability to file annual/quarterly disclosures on time is a red-flag and thus a valuable signal for algorithmic strategies and fundamental investing alike.
Data available from 1994 with coverage of about 18,000 equities
https://iexcloud.io/docs/api/#non-timely-filings
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_FRAUD_FACTORS_NON_TIMELY_FILINGS", symbol=symbol, **kwargs
)
```
#### File: premium/valuengine/__init__.py
```python
from ...common import _EST, _expire
from ...files import files
@_expire(hour=10, tz=_EST)
def valuEngineStockResearchReport(symbol="", date=None, token="", version="stable"):
"""ValuEngine provides research on over 5,000 stocks with stock valuations, Buy/Hold/Sell recommendations, and forecasted target prices, so that you the individual investor can make informed decisions. Every ValuEngine Valuation and Forecast model for the U.S. equities markets has been extensively back-tested. ValuEngine’s performance exceeds that of many well-known stock-picking styles. Reports available since March 19th, 2020.
https://iexcloud.io/docs/api/#valuengine-stock-research-report
Args:
symbol (str): symbol to use
date (str): date to access
"""
return files(
id="VALUENGINE_REPORT", symbol=symbol, date=date, token=token, version=version
)
```
#### File: pyEX/stocks/iex.py
```python
from functools import wraps
import pandas as pd
from ..common import (
_get,
_getAsync,
_raiseIfNotStr,
_reindex,
_strOrDate,
_strToList,
_toDatetime,
json_normalize,
)
def iexTops(symbols=None, token="", version="stable", format="json"):
"""TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
symbols = _strToList(symbols)
if symbols:
return _get(
"tops?symbols=" + ",".join(symbols) + "%2b",
token=token,
version=version,
format=format,
)
return _get("tops", token=token, version=version, format=format)
@wraps(iexTops)
async def iexTopsAsync(symbols=None, token="", version="stable", format="json"):
symbols = _strToList(symbols)
if symbols:
return await _getAsync(
"tops?symbols=" + ",".join(symbols) + "%2b",
token=token,
version=version,
format=format,
)
return await _getAsync("tops", token=token, version=version, format=format)
@wraps(iexTops)
def iexTopsDF(*args, **kwargs):
return _reindex(_toDatetime(json_normalize(iexTops(*args, **kwargs))), "symbol")
def iexLast(symbols=None, token="", version="stable", format="json"):
"""Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
symbols = _strToList(symbols)
if symbols:
return _get(
"tops/last?symbols=" + ",".join(symbols) + "%2b",
token=token,
version=version,
format=format,
)
return _get("tops/last", token=token, version=version, format=format)
@wraps(iexLast)
async def iexLastAsync(symbols=None, token="", version="stable", format="json"):
symbols = _strToList(symbols)
if symbols:
return await _getAsync(
"tops/last?symbols=" + ",".join(symbols) + "%2b",
token=token,
version=version,
format=format,
)
return await _getAsync("tops/last", token=token, version=version, format=format)
@wraps(iexLast)
def iexLastDF(*args, **kwargs):
return _reindex(_toDatetime(json_normalize(iexLast(*args, **kwargs))), "symbol")
def iexDeep(symbol=None, token="", version="stable", format="json"):
"""DEEP is used to receive real-time depth of book quotations direct from IEX.
The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,
and do not indicate the size or number of individual orders at any price level.
Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.
DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.
https://iexcloud.io/docs/api/#deep
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep?symbols=" + symbol, token=token, version=version, format=format
)
return _get("deep", token=token, version=version, format=format)
@wraps(iexDeep)
async def iexDeepAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep?symbols=" + symbol, token=token, version=version, format=format
)
return await _getAsync("deep", token=token, version=version, format=format)
@wraps(iexDeep)
def iexDeepDF(*args, **kwargs):
return _toDatetime(json_normalize(iexDeep(*args, **kwargs)))
def iexAuction(symbol=None, token="", version="stable", format="json"):
"""DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions,
and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions.
https://iexcloud.io/docs/api/#deep-auction
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/auction?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/auction", token=token, version=version, format=format)
@wraps(iexAuction)
async def iexAuctionAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/auction?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync("deep/auction", token=token, version=version, format=format)
@wraps(iexAuction)
def iexAuctionDF(*args, **kwargs):
return _toDatetime(json_normalize(iexAuction(*args, **kwargs)))
def iexBook(symbol=None, token="", version="stable", format="json"):
"""Book shows IEX’s bids and asks for given symbols.
https://iexcloud.io/docs/api/#deep-book
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/book?symbols=" + symbol, token=token, version=version, format=format
)
return _get("deep/book", token=token, version=version, format=format)
@wraps(iexBook)
async def iexBookAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/book?symbols=" + symbol, token=token, version=version, format=format
)
return await _getAsync("deep/book", token=token, version=version, format=format)
@wraps(iexBook)
def iexBookDF(*args, **kwargs):
x = iexBook(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(json_normalize(data))
def iexOpHaltStatus(symbol=None, token="", version="stable", format="json"):
"""The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.
IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.
In the spin, IEX will send out an Operational Halt Message with “N” (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.
After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.
https://iexcloud.io/docs/api/#deep-operational-halt-status
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/op-halt-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/op-halt-status", token=token, version=version, format=format)
@wraps(iexOpHaltStatus)
async def iexOpHaltStatusAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/op-halt-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/op-halt-status", token=token, version=version, format=format
)
@wraps(iexOpHaltStatus)
def iexOpHaltStatusDF(*args, **kwargs):
x = iexOpHaltStatus(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexOfficialPrice(symbol=None, token="", version="stable", format="json"):
"""The Official Price message is used to disseminate the IEX Official Opening and Closing Prices.
These messages will be provided only for IEX Listed Securities.
https://iexcloud.io/docs/api/#deep-official-price
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/official-price?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/official-price", token=token, version=version, format=format)
@wraps(iexOfficialPrice)
async def iexOfficialPriceAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/official-price?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/official-price", token=token, version=version, format=format
)
@wraps(iexOfficialPrice)
def iexOfficialPriceDF(*args, **kwargs):
return _toDatetime(json_normalize(iexOfficialPrice(*args, **kwargs)))
def iexSecurityEvent(symbol=None, token="", version="stable", format="json"):
"""The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/security-event?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/security-event", token=token, version=version, format=format)
@wraps(iexSecurityEvent)
async def iexSecurityEventAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/security-event?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/security-event", token=token, version=version, format=format
)
@wraps(iexSecurityEvent)
def iexSecurityEventDF(*args, **kwargs):
x = iexSecurityEvent(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexSsrStatus(symbol=None, token="", version="stable", format="json"):
"""In association with Rule 201 of Regulation SHO, the Short Sale Price Test Message is used to indicate when a short sale price test restriction is in effect for a security.
IEX disseminates a full pre-market spin of Short sale price test status messages indicating the Rule 201 status of all securities.
After the pre-market spin, IEX will use the Short sale price test status message in the event of an intraday status change.
The IEX Trading System will process orders based on the latest short sale price test restriction status.
https://iexcloud.io/docs/api/#deep-short-sale-price-test-status
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/ssr-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/ssr-status", token=token, version=version, format=format)
@wraps(iexSsrStatus)
async def iexSsrStatusAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/ssr-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/ssr-status", token=token, version=version, format=format
)
@wraps(iexSsrStatus)
def iexSsrStatusDF(*args, **kwargs):
x = iexSsrStatus(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexSystemEvent(token="", version="stable", format="json"):
"""The System event message is used to indicate events that apply to the market or the data feed.
There will be a single message disseminated per channel for each System Event type within a given trading session.
https://iexcloud.io/docs/api/#deep-system-event
Args:
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
return _get("deep/system-event", token=token, version=version, format=format)
@wraps(iexSystemEvent)
async def iexSystemEventAsync(token="", version="stable", format="json"):
return await _getAsync(
"deep/system-event", token=token, version=version, format=format
)
@wraps(iexSystemEvent)
def iexSystemEventDF(*args, **kwargs):
return _toDatetime(json_normalize(iexSystemEvent(*args, **kwargs)))
def iexTrades(symbol=None, token="", version="stable", format="json"):
"""Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.
https://iexcloud.io/docs/api/#deep-trades
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/trades?symbols=" + symbol, token=token, version=version, format=format
)
return _get("deep/trades", token=token, version=version, format=format)
@wraps(iexTrades)
async def iexTradesAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/trades?symbols=" + symbol, token=token, version=version, format=format
)
return await _getAsync("deep/trades", token=token, version=version, format=format)
@wraps(iexTrades)
def iexTradesDF(*args, **kwargs):
x = iexTrades(*args, **kwargs)
data = []
for key in x:
dat = x[key]
for d in dat:
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexTradeBreak(symbol=None, token="", version="stable", format="json"):
"""Trade break messages are sent when an execution on IEX is broken on that same trading day. Trade breaks are rare and only affect applications that rely upon IEX execution based data.
https://iexcloud.io/docs/api/#deep-trade-break
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/trade-breaks?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/trade-breaks", token=token, version=version, format=format)
@wraps(iexTradeBreak)
async def iexTradeBreakAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/trade-breaks?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/trade-breaks", token=token, version=version, format=format
)
@wraps(iexTradeBreak)
def iexTradeBreakDF(*args, **kwargs):
return _toDatetime(json_normalize(iexTradeBreak(*args, **kwargs)))
def iexTradingStatus(symbol=None, token="", version="stable", format="json"):
"""The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/trading-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/trading-status", token=token, version=version, format=format)
@wraps(iexTradingStatus)
async def iexTradingStatusAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/trading-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _get(
"deep/trading-status", token=token, version=version, format=format
)
@wraps(iexTradingStatus)
def iexTradingStatusDF(*args, **kwargs):
x = iexTradingStatus(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexHist(date=None, token="", version="stable", format="json"):
"""
Args:
date (datetime): Effective date
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
if date is None:
return _get("hist", token=token, version=version, format=format)
else:
date = _strOrDate(date)
return _get("hist?date=" + date, token=token, version=version, format=format)
@wraps(iexHist)
async def iexHistAsync(date=None, token="", version="stable", format="json"):
if date is None:
return await _get("hist", token=token, version=version, format=format)
else:
date = _strOrDate(date)
return await _getAsync(
"hist?date=" + date, token=token, version=version, format=format
)
@wraps(iexHist)
def iexHistDF(*args, **kwargs):
x = iexHist(*args, **kwargs)
data = []
for key in x:
dat = x[key]
for item in dat:
item["date"] = key
data.append(item)
return _reindex(_toDatetime(pd.DataFrame(data)), "date")
``` |
{
"source": "Jmainguy/openshift-ansible",
"score": 2
} |
#### File: src/ooinstall/oo_config.py
```python
import os
import sys
import yaml
from pkg_resources import resource_filename
CONFIG_PERSIST_SETTINGS = [
'ansible_ssh_user',
'ansible_callback_facts_yaml',
'ansible_config',
'ansible_inventory_path',
'ansible_log_path',
'deployment',
'version',
'variant',
'variant_version',
]
DEPLOYMENT_VARIABLES_BLACKLIST = [
'hosts',
'roles',
]
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
class OOConfigFileError(Exception):
"""The provided config file path can't be read/written
"""
pass
class OOConfigInvalidHostError(Exception):
""" Host in config is missing both ip and hostname. """
pass
class Host(object):
""" A system we will or have installed OpenShift on. """
def __init__(self, **kwargs):
self.ip = kwargs.get('ip', None)
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
self.preconfigured = kwargs.get('preconfigured', None)
self.schedulable = kwargs.get('schedulable', None)
self.new_host = kwargs.get('new_host', None)
self.containerized = kwargs.get('containerized', False)
self.node_labels = kwargs.get('node_labels', '')
# allowable roles: master, node, etcd, storage, master_lb, new
self.roles = kwargs.get('roles', [])
self.other_variables = kwargs.get('other_variables', {})
if self.connect_to is None:
raise OOConfigInvalidHostError(
"You must specify either an ip or hostname as 'connect_to'")
def __str__(self):
return self.connect_to
def __repr__(self):
return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', 'connect_to',
'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels',
'other_variables']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
return d
def is_master(self):
return 'master' in self.roles
def is_node(self):
return 'node' in self.roles
def is_master_lb(self):
return 'master_lb' in self.roles
def is_storage(self):
return 'storage' in self.roles
def is_etcd_member(self, all_hosts):
""" Will this host be a member of a standalone etcd cluster. """
if not self.is_master():
return False
masters = [host for host in all_hosts if host.is_master()]
if len(masters) > 1:
return True
return False
def is_dedicated_node(self):
""" Will this host be a dedicated node. (not a master) """
return self.is_node() and not self.is_master()
def is_schedulable_node(self, all_hosts):
""" Will this host be a node marked as schedulable. """
if not self.is_node():
return False
if not self.is_master():
return True
masters = [host for host in all_hosts if host.is_master()]
nodes = [host for host in all_hosts if host.is_node()]
if len(masters) == len(nodes):
return True
return False
class Role(object):
""" A role that will be applied to a host. """
def __init__(self, name, variables):
self.name = name
self.variables = variables
def __str__(self):
return self.name
def __repr__(self):
return self.name
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['name', 'variables']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
return d
class Deployment(object):
def __init__(self, **kwargs):
self.hosts = kwargs.get('hosts', [])
self.roles = kwargs.get('roles', {})
self.variables = kwargs.get('variables', {})
class OOConfig(object):
default_dir = os.path.normpath(
os.environ.get('XDG_CONFIG_HOME',
os.environ['HOME'] + '/.config/') + '/openshift/')
default_file = '/installer.cfg.yml'
def __init__(self, config_path):
if config_path:
self.config_path = os.path.normpath(config_path)
else:
self.config_path = os.path.normpath(self.default_dir +
self.default_file)
self.deployment = Deployment(hosts=[], roles={}, variables={})
self.settings = {}
self._read_config()
self._set_defaults()
def _read_config(self):
try:
if os.path.exists(self.config_path):
with open(self.config_path, 'r') as cfgfile:
loaded_config = yaml.safe_load(cfgfile.read())
# Use the presence of a Description as an indicator this is
# a legacy config file:
if 'Description' in self.settings:
self._upgrade_legacy_config()
try:
host_list = loaded_config['deployment']['hosts']
role_list = loaded_config['deployment']['roles']
except KeyError as e:
print "Error loading config, no such key: {}".format(e)
sys.exit(0)
for setting in CONFIG_PERSIST_SETTINGS:
try:
self.settings[setting] = str(loaded_config[setting])
except KeyError:
continue
for setting in loaded_config['deployment']:
try:
if setting not in DEPLOYMENT_VARIABLES_BLACKLIST:
self.deployment.variables[setting] = \
str(loaded_config['deployment'][setting])
except KeyError:
continue
# Parse the hosts into DTO objects:
for host in host_list:
self.deployment.hosts.append(Host(**host))
# Parse the roles into Objects
for name, variables in role_list.iteritems():
self.deployment.roles.update({name: Role(name, variables)})
except IOError, ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
ferr.strerror))
except yaml.scanner.ScannerError:
raise OOConfigFileError(
'Config file "{}" is not a valid YAML document'.format(self.config_path))
def _upgrade_legacy_config(self):
new_hosts = []
remove_settings = ['validated_facts', 'Description', 'Name',
'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
if 'validated_facts' in self.settings:
for key, value in self.settings['validated_facts'].iteritems():
value['connect_to'] = key
if 'masters' in self.settings and key in self.settings['masters']:
value['master'] = True
if 'nodes' in self.settings and key in self.settings['nodes']:
value['node'] = True
new_hosts.append(value)
self.settings['hosts'] = new_hosts
for s in remove_settings:
if s in self.settings:
del self.settings[s]
# A legacy config implies openshift-enterprise 3.0:
self.settings['variant'] = 'openshift-enterprise'
self.settings['variant_version'] = '3.0'
def _upgrade_v1_config(self):
#TODO write code to upgrade old config
return
def _set_defaults(self):
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = self._default_ansible_inv_dir()
if not os.path.exists(self.settings['ansible_inventory_directory']):
os.makedirs(self.settings['ansible_inventory_directory'])
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = \
resource_filename(__name__, 'ansible_plugins')
if 'version' not in self.settings:
self.settings['version'] = 'v2'
if 'ansible_callback_facts_yaml' not in self.settings:
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
self.settings['ansible_inventory_directory']
if 'ansible_ssh_user' not in self.settings:
self.settings['ansible_ssh_user'] = ''
self.settings['ansible_inventory_path'] = \
'{}/hosts'.format(os.path.dirname(self.config_path))
# clean up any empty sets
for setting in self.settings.keys():
if not self.settings[setting]:
self.settings.pop(setting)
def _default_ansible_inv_dir(self):
return os.path.normpath(
os.path.dirname(self.config_path) + "/.ansible")
def calc_missing_facts(self):
"""
Determine which host facts are not defined in the config.
Returns a hash of host to a list of the missing facts.
"""
result = {}
for host in self.deployment.hosts:
missing_facts = []
if host.preconfigured:
required_facts = PRECONFIGURED_REQUIRED_FACTS
else:
required_facts = DEFAULT_REQUIRED_FACTS
for required_fact in required_facts:
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
out_file = open(self.config_path, 'w')
out_file.write(self.yaml())
out_file.close()
def persist_settings(self):
p_settings = {}
for setting in CONFIG_PERSIST_SETTINGS:
if setting in self.settings and self.settings[setting]:
p_settings[setting] = self.settings[setting]
p_settings['deployment'] = {}
p_settings['deployment']['hosts'] = []
p_settings['deployment']['roles'] = {}
for host in self.deployment.hosts:
p_settings['deployment']['hosts'].append(host.to_dict())
for name, role in self.deployment.roles.iteritems():
p_settings['deployment']['roles'][name] = role.variables
for setting in self.deployment.variables:
if setting not in DEPLOYMENT_VARIABLES_BLACKLIST:
p_settings['deployment'][setting] = self.deployment.variables[setting]
try:
p_settings['variant'] = self.settings['variant']
p_settings['variant_version'] = self.settings['variant_version']
if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir():
p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory']
except KeyError as e:
print "Error persisting settings: {}".format(e)
sys.exit(0)
return p_settings
def yaml(self):
return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
def __str__(self):
return self.yaml()
def get_host(self, name):
for host in self.deployment.hosts:
if host.connect_to == name:
return host
return None
``` |
{
"source": "Jmainguy/pyHackpySlash",
"score": 3
} |
#### File: Jmainguy/pyHackpySlash/dice.py
```python
import random
def attackroll():
attackdice = 1
attackdicesides = 20
RANDOM = random.randint(1, attackdicesides)
return RANDOM
def damageroll():
damagedice = 1
damagesides = 6
RANDOM = random.randint(1, damagesides)
return RANDOM
```
#### File: Jmainguy/pyHackpySlash/equip.py
```python
from ConfigParser import RawConfigParser
config = RawConfigParser()
def weapon(file):
config.read(file)
weapon = config.get('character', 'weapon')
return weapon
def name(file):
config.read(file)
name = config.get('character', 'name')
return name
def health(file):
config.read(file)
health = config.get('character', 'health')
return health
def dex(file):
config.read(file)
dex = config.get('character', 'dexterity')
return dex
``` |
{
"source": "jma/invenio-oauthclient",
"score": 2
} |
#### File: invenio_oauthclient/views/client.py
```python
from __future__ import absolute_import
from flask import Blueprint, abort, current_app, flash, redirect, request, \
url_for
from flask_oauthlib.client import OAuthException
from invenio_db import db
from itsdangerous import BadData
from werkzeug.local import LocalProxy
from .._compat import _create_identifier
from ..errors import OAuthRemoteNotFound
from ..handlers import set_session_next_url
from ..handlers.rest import response_handler
from ..proxies import current_oauthclient
from ..utils import get_safe_redirect_target, serializer
blueprint = Blueprint(
'invenio_oauthclient',
__name__,
url_prefix='/oauth',
static_folder='../static',
template_folder='../templates',
)
rest_blueprint = Blueprint(
'invenio_oauthclient',
__name__,
url_prefix='/oauth',
static_folder='../static',
template_folder='../templates',
)
@blueprint.record_once
def post_ext_init(state):
"""Setup blueprint."""
app = state.app
app.config.setdefault(
'OAUTHCLIENT_SITENAME',
app.config.get('THEME_SITENAME', 'Invenio'))
app.config.setdefault(
'OAUTHCLIENT_BASE_TEMPLATE',
app.config.get('BASE_TEMPLATE',
'invenio_oauthclient/base.html'))
app.config.setdefault(
'OAUTHCLIENT_COVER_TEMPLATE',
app.config.get('COVER_TEMPLATE',
'invenio_oauthclient/base_cover.html'))
app.config.setdefault(
'OAUTHCLIENT_SETTINGS_TEMPLATE',
app.config.get('SETTINGS_TEMPLATE',
'invenio_oauthclient/settings/base.html'))
def _login(remote_app, authorized_view_name):
"""Send user to remote application for authentication."""
oauth = current_oauthclient.oauth
if remote_app not in oauth.remote_apps:
raise OAuthRemoteNotFound()
# Get redirect target in safe manner.
next_param = get_safe_redirect_target(arg='next')
# Redirect URI - must be registered in the remote service.
callback_url = url_for(
authorized_view_name,
remote_app=remote_app,
_external=True,
_scheme="https"
)
# Create a JSON Web Token that expires after OAUTHCLIENT_STATE_EXPIRES
# seconds.
state_token = serializer.dumps({
'app': remote_app,
'next': next_param,
'sid': _create_identifier(),
})
return oauth.remote_apps[remote_app].authorize(
callback=callback_url,
state=state_token,
)
@blueprint.route('/login/<remote_app>/')
def login(remote_app):
"""Send user to remote application for authentication."""
try:
return _login(remote_app, '.authorized')
except OAuthRemoteNotFound:
return abort(404)
@rest_blueprint.route('/login/<remote_app>/')
def rest_login(remote_app):
"""Send user to remote application for authentication."""
try:
return _login(remote_app, '.rest_authorized')
except OAuthRemoteNotFound:
abort(404)
def _authorized(remote_app=None):
"""Authorized handler callback."""
if remote_app not in current_oauthclient.handlers:
return abort(404)
state_token = request.args.get('state')
# Verify state parameter
assert state_token
# Checks authenticity and integrity of state and decodes the value.
state = serializer.loads(state_token)
# Verify that state is for this session, app and that next parameter
# have not been modified.
assert state['sid'] == _create_identifier()
assert state['app'] == remote_app
# Store next URL
set_session_next_url(remote_app, state['next'])
handler = current_oauthclient.handlers[remote_app]()
return handler
@blueprint.route('/authorized/<remote_app>/')
def authorized(remote_app=None):
"""Authorized handler callback."""
try:
return _authorized(remote_app)
except OAuthRemoteNotFound:
return abort(404)
except (AssertionError, BadData):
if current_app.config.get('OAUTHCLIENT_STATE_ENABLED', True) or (
not(current_app.debug or current_app.testing)):
abort(403)
except OAuthException as e:
if e.type == 'invalid_response':
abort(500)
else:
raise
@rest_blueprint.route('/authorized/<remote_app>/')
def rest_authorized(remote_app=None):
"""Authorized handler callback."""
try:
return _authorized(remote_app)
except OAuthRemoteNotFound:
abort(404)
except (AssertionError, BadData):
if current_app.config.get('OAUTHCLIENT_STATE_ENABLED', True) or (
not(current_app.debug or current_app.testing)):
return response_handler(
None,
current_app.config[
'OAUTHCLIENT_REST_DEFAULT_ERROR_REDIRECT_URL'],
payload=dict(
message="Invalid state.",
code=403
)
)
except OAuthException as e:
if e.type == 'invalid_response':
return response_handler(
None,
current_app.config[
'OAUTHCLIENT_REST_DEFAULT_ERROR_REDIRECT_URL'],
payload=dict(
message="Invalid response.",
code=500
)
)
else:
raise
def _signup(remote_app):
"""Extra signup step."""
if remote_app not in current_oauthclient.signup_handlers:
raise OAuthRemoteNotFound()
return current_oauthclient.signup_handlers[remote_app]['view']()
@blueprint.route('/signup/<remote_app>/', methods=['GET', 'POST'])
def signup(remote_app):
"""Extra signup step."""
try:
res = _signup(remote_app)
return abort(404) if res is None else res
except OAuthRemoteNotFound:
return abort(404)
@rest_blueprint.route('/signup/<remote_app>/', methods=['GET', 'POST'])
def rest_signup(remote_app):
"""Extra signup step."""
try:
res = _signup(remote_app)
return abort(404) if res is None else res
except OAuthRemoteNotFound:
abort(404)
def _disconnect(remote_app):
"""Extra signup step."""
if remote_app not in current_oauthclient.signup_handlers:
raise OAuthRemoteNotFound()
ret = current_oauthclient.disconnect_handlers[remote_app]()
db.session.commit()
return ret
@blueprint.route('/disconnect/<remote_app>/')
def disconnect(remote_app):
"""Disconnect user from remote application.
Removes application as well as associated information.
"""
try:
return _disconnect(remote_app)
except OAuthRemoteNotFound:
abort(404)
@rest_blueprint.route('/disconnect/<remote_app>/')
def rest_disconnect(remote_app):
"""Disconnect user from remote application.
Removes application as well as associated information.
"""
try:
return _disconnect(remote_app)
except OAuthRemoteNotFound:
abort(404)
``` |
{
"source": "jmairal/arsenic",
"score": 2
} |
#### File: arsenic/cyanure/estimators.py
```python
from abc import abstractmethod, ABC
import math
import inspect
import warnings
import platform
from collections import defaultdict
import numpy as np
import scipy.sparse
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.extmath import safe_sparse_dot, softmax
from sklearn.exceptions import ConvergenceWarning
import cyanure_lib
from cyanure.data_processing import check_input_fit, check_input_inference
from cyanure.logger import setup_custom_logger
logger = setup_custom_logger("INFO")
class ERM(BaseEstimator, ABC):
"""
The generic class for empirical risk minimization problems.
For univariates problems, minimizes
min_{w,b} (1/n) sum_{i=1}^n L( y_i, <w, x_i> + b) + psi(w)
"""
def _more_tags(self):
return {"requires_y": True}
def _warm_start(self, X, initial_weight, nclasses):
if self.warm_start and hasattr(self, "coef_"):
if self.verbose:
logger.info("Restart")
if self.fit_intercept:
initial_weight[-1, ] = self.intercept_
initial_weight[0:-1, ] = np.squeeze(self.coef_)
else:
initial_weight = np.squeeze(self.coef_)
if self.warm_start and self.solver in ('auto', 'miso', 'catalyst-miso', 'qning-miso'):
n = X.shape[0]
# TODO Ecrire test pour dual surtout défensif
reset_dual = np.any(self.dual is None)
if not reset_dual and self._binary_problem:
reset_dual = self.dual.shape[0] != n
if not reset_dual and not self._binary_problem:
reset_dual = np.any(self.dual.shape != [n, nclasses])
if reset_dual and self._binary_problem:
self.dual = np.zeros(
n, dtype=X.dtype, order='F')
if reset_dual and not self._binary_problem:
self.dual = np.zeros(
[n, nclasses], dtype=X.dtype, order='F')
return initial_weight
def _initialize_weight(self, X, labels):
nclasses = 0
p = X.shape[1] + 1 if self.fit_intercept else X.shape[1]
if self._binary_problem:
initial_weight = np.zeros((p), dtype=X.dtype)
yf = np.squeeze(labels.astype(X.dtype))
else:
if labels.squeeze().ndim > 1:
nclasses = labels.squeeze().shape[1]
yf = np.asfortranarray(labels.T)
else:
nclasses = int(np.max(labels) + 1)
if platform.system() == "Windows":
yf = np.squeeze(np.intc(np.float64(labels)))
else:
yf = np.squeeze(np.int32(labels))
initial_weight = np.zeros(
[p, nclasses], dtype=X.dtype, order='F')
initial_weight = self._warm_start(X, initial_weight, nclasses)
return initial_weight, yf, nclasses
def __init__(self, loss='square', penalty='l2', fit_intercept=False, dual=None, tol=1e-3,
solver="auto", random_state=0, max_iter=2000, fista_restart=60,
verbose=True, warm_start=False, limited_memory_qning=50, multi_class="auto",
lambda_1=0, lambda_2=0, lambda_3=0, duality_gap_interval=5, n_threads=-1):
r"""
Instantiate the ERM class.
Parameters
----------
loss: string, default='square'
Loss function to be used. Possible choices are
- 'square'
:math:`L(y,z) = \\frac{1}{2} ( y-z)^2`
- 'logistic'
:math:`L(y,z) = \\log(1 + e^{-y z} )`
- 'sqhinge' or 'squared_hinge'
:math:`L(y,z) = \\frac{1}{2} \\max( 0, 1- y z)^2`
- 'safe-logistic'
:math:`L(y,z) = e^{ yz - 1 } - y z ~\\text{if}~ yz
\\leq 1~~\\text{and}~~0` otherwise
- 'multiclass-logistic'
which is also called multinomial or softmax logistic:
.. math::`L(y, W^\\top x + b) = \\sum_{j=1}^k
\\log\\left(e^{w_j^\\top + b_j} - e^{w_y^\\top + b_y} \\right)`
penalty (string): default='none'
Regularization function psi. Possible choices are
For binary_problem problems:
- 'none'
:math:`psi(w) = 0`
- 'l2'
:math:`psi(w) = \\frac{\\lambda_1}{2} ||w||_2^2`
- 'l1'
:math:`psi(w) = \\lambda_1 ||w||_1`
- 'elasticnet'
:math:`psi(w) = \\lambda_1 ||w||_1 + \\frac{\\lambda_2}{2}||w||_2^2`
- 'fused-lasso'
:math:`psi(w) = \\lambda_3 \\sum_{i=2}^p |w[i]-w[i-1]| +
\\lambda_1||w||_1 + \\frac{\\lambda_2}{2}||w||_2^2`
- 'l1-ball'
encodes the constraint :math:`||w||_1 <= \\lambda`
- 'l2-ball'
encodes the constraint :math:`||w||_2 <= \\lambda`
For multivariate problems, the previous penalties operate on each
individual (e.g., class) predictor.
.. math::
\\psi(W) = \\sum_{j=1}^k \\psi(w_j).
In addition, multitask-group Lasso penalties are provided for
multivariate problems (w is then a matrix)
- 'l1l2', which is the multi-task group Lasso regularization
.. math::
\\psi(W) = \\lambda \\sum_{j=1}^p \\|W^j\\|_2~~~~
\\text{where}~W^j~\\text{is the j-th row of}~W.
- 'l1linf'
.. math::
\\psi(W) = \\lambda \\sum_{j=1}^p \\|W^j\\|_\\infty.
- 'l1l2+l1', which is the multi-task group Lasso regularization + l1
.. math::
\\psi(W) = \\sum_{j=1}^p \\lambda
\\|W^j\\|_2 + \\lambda_2 \\|W^j\\|_1 ~~~~
\\text{where}~W^j~\\text{is the j-th row of}~W.
fit_intercept (boolean): default='False'
Learns an unregularized intercept b (or several intercepts for
multivariate problems)
lambda_1 (float): default=0
First regularization parameter
lambda_2 (float): default=0
Second regularization parameter, if needed
lambda_3 (float): default=0
Third regularization parameter, if needed
solver (string): default='auto'
Optimization solver. Possible choices are
- 'ista'
- 'fista'
- 'catalyst-ista'
- 'qning-ista' (proximal quasi-Newton method)
- 'svrg'
- 'catalyst-svrg' (accelerated SVRG with Catalyst)
- 'qning-svrg' (quasi-Newton SVRG)
- 'acc-svrg' (SVRG with direct acceleration)
- 'miso'
- 'catalyst-miso' (accelerated MISO with Catalyst)
- 'qning-miso' (quasi-Newton MISO)
- 'auto'
see the Latex documentation for more details.
If you are unsure, use 'auto'
tol (float): default='1e-3'
Tolerance parameter. For almost all combinations of loss and
penalty functions, this parameter is based on a duality gap.
Assuming the (non-negative) objective function is "f" and its
optimal value is "f^*", the algorithm stops with the guarantee
:math:`f(x_t) - f^* <= tol f(x_t)`
max_iter (int): default=500
Maximum number of iteration of the algorithm in terms of passes
over the data
duality_gap_interval (int): default=10
Frequency of duality-gap computation
verbose (boolean): default=True
Display information or not
n_threads (int): default=-1
Maximum number of cores the method may use (-1 = all cores).
Note that more cores is not always better.
random_state (int): default=0
Random seed
warm_start (boolean): default=False
Use a restart strategy
binary_problem (boolean): default=True
univariate or multivariate problems
limited_memory_qning (int): default=20
Memory parameter for the qning method
fista_restart (int): default=50
Restart strategy for fista (useful for computing regularization path)
"""
self.loss = loss
if loss == 'squared_hinge':
self.loss = 'sqhinge'
self.penalty = penalty
self.fit_intercept = fit_intercept
self.dual = dual
self.solver = solver
self.tol = tol
self.random_state = random_state
self.max_iter = max_iter
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.lambda_3 = lambda_3
self.limited_memory_qning = limited_memory_qning
self.fista_restart = fista_restart
self.verbose = verbose
self.warm_start = warm_start
self.multi_class = multi_class
self.duality_gap_interval = duality_gap_interval
self.n_threads = n_threads
def fit(self, X, y, le_parameter=None):
"""
Fit the parameters.
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
input n X p numpy matrix; the samples are on the rows
y (numpy array):
- vector of size n with real values for regression
- vector of size n with {-1,+1} for binary classification,
which will be automatically converted if {0,1} are
provided
- matrix of size n X k for multivariate regression
- vector of size n with entries in {0,1,k-1} for classification
with k classes
Returns
-------
self (ERM):
Returns the instance
"""
loss = None
X, labels, le = check_input_fit(X, y, self)
if le_parameter is not None:
self.le_ = le_parameter
else:
self.le_ = le
if (self.multi_class == "multinomial" or
(self.multi_class == "auto" and not self._binary_problem)) and self.loss == "logistic":
if self.multi_class == "multinomial":
if len(np.unique(labels)) != 2:
self._binary_problem = False
loss = "multiclass-logistic"
logger.info(
"Loss has been set to multiclass-logistic because "
"the multiclass parameter is set to multinomial!")
if loss is None:
loss = self.loss
labels = np.squeeze(labels)
initial_weight, yf, nclasses = self._initialize_weight(X, labels)
training_data_fortran = X.T if scipy.sparse.issparse(
X) else np.asfortranarray(X.T)
w = np.copy(initial_weight)
self.optimization_info_ = cyanure_lib.erm_(
training_data_fortran, yf, initial_weight, w, dual_variable=self.dual, loss=loss,
penalty=self.penalty, solver=self.solver, lambda_1=float(self.lambda_1),
lambda_2=float(self.lambda_2), lambda_3=float(self.lambda_3),
intercept=bool(self.fit_intercept),
tol=float(self.tol), duality_gap_interval=int(self.duality_gap_interval),
max_iter=int(self.max_iter), limited_memory_qning=int(self.limited_memory_qning),
fista_restart=int(self.fista_restart), verbose=bool(self.verbose),
univariate=bool(self._binary_problem),
n_threads=int(self.n_threads), seed=int(self.random_state)
)
if ((self.multi_class == "multinomial" or
(self.multi_class == "auto" and not self._binary_problem)) and
self.loss == "logistic") and self.optimization_info_.shape[0] == 1:
self.optimization_info_ = np.repeat(
self.optimization_info_, nclasses, axis=0)
self.n_iter_ = np.array([self.optimization_info_[class_index][0][-1]
for class_index in range(self.optimization_info_.shape[0])])
for index in range(self.n_iter_.shape[0]):
if self.n_iter_[index] == self.max_iter:
warnings.warn(
"The max_iter was reached which means the coef_ did not converge",
ConvergenceWarning)
if self.fit_intercept:
self.intercept_ = w[-1, ]
self.coef_ = w[0:-1, ]
else:
self.coef_ = w
self.n_features_in_ = self.coef_.shape[0]
return self
@abstractmethod
def predict(self, X):
"""Predict the labels given an input matrix X (same format as fit)."""
def get_weights(self):
"""
Get the model parameters (either w or the tuple (w,b)).
Returns
-------
w or (w,b) (numpy.array or tuple of numpy.array):
Model parameters
"""
return (self.coef_, self.intercept_) if self.fit_intercept else self.coef_
def get_params(self, deep=True):
"""
Get parameters for the estimator.
Parameters
----------
deep (bool, optional):
If True returns also subobjects that are estimators. Defaults to True.
Returns
-------
params (dict):
Parameters names and values
"""
out = {}
for key in self._get_param_names():
try:
value = getattr(self, key)
except AttributeError:
value = None
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
@classmethod
def _get_param_namesrestart(cls):
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError()
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def set_params(self, **params):
"""
Allow to change the value of parameters.
Parameters
----------
params (dict):
Estimator parameters to set
Raises
------
ValueError:
The parameter does not exist
Returns
-------
self (ERM):
Estimator instance
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
# Grouped by prefix
nested_params = defaultdict(dict)
for key, value in params.items():
key, delim, sub_key = key.partition('__')
if key not in valid_params:
raise ValueError(f'Invalid parameter {key} for estimator {self}. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def densify(self):
"""
Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self (ERM):
Fitted estimator converted to dense estimator
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, msg=msg)
if scipy.sparse.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""
Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Returns
-------
self (ERM):
Fitted estimator converted to parse estimator.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, msg=msg)
self.coef_ = scipy.sparse.csr_matrix(self.coef_)
if self.coef_.shape[0] == 1:
self.coef_ = self.coef_.T
return self
class ClassifierAbstraction(ERM):
"""A class to define abstract methods for classifiers."""
@abstractmethod
def predict_proba(self, X):
"""
Estimate the probability for each class.
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
Data matrix for which we want probabilities
Returns
-------
proba (numpy.array):
Return the probability of the samples for each class.
"""
pass
class Regression(ERM):
r"""
The regression class which derives from ERM.
The goal is to minimize the following objective:
.. math::
\min_{w,b} \frac{1}{n} \sum_{i=1}^n
L\left( y_i, w^\top x_i + b\right) + \psi(w),
where :math:`L` is a regression loss, :math:`\\psi` is a
regularization function (or constraint), :math:`w` is a p-dimensional
vector representing model parameters, and b is an optional
unregularized intercept., and the targets will be real values.
Parameters
----------
loss (string): default='square'
Loss function to be used. Possible choices are:
Only the square loss is implemented at this point. Given two
k-dimensional vectors y,z:
* 'square' => :math:`L(y,z) = \frac{1}{2}( y-z)^2`
penalty (string): default='none'
Regularization function psi. Possible choices are
For binary_problem problems:
- 'none'
:math:`psi(w) = 0`
- 'l2'
:math:`psi(w) = \frac{\lambda_1}{2} ||w||_2^2`
- 'l1
:math:`psi(w) = \lambda_1 ||w||_1`
- 'elasticnet'
:math:`psi(w) = \lambda_1 ||w||_1 + \frac{\lambda_2}{2}||w||_2^2`
- 'fused-lasso'
:math:`psi(w) = \lambda_3 \sum_{i=2}^p |w[i]-w[i-1]|
+ \lambda_1||w||_1 + \frac{\lambda_2}{2}||w||_2^2`
- 'l1-ball'
encodes the constraint :math:`||w||_1 <= \lambda`
- 'l2-ball'
encodes the constraint :math:`||w||_2 <= \lambda`
For multivariate problems, the previous penalties operate on each
individual (e.g., class) predictor.
.. math::
\psi(W) = \sum_{j=1}^k \psi(w_j).
In addition, multitask-group Lasso penalties are provided for
multivariate problems (w is then a matrix)
- 'l1l2', which is the multi-task group Lasso regularization
.. math::
\psi(W) = \lambda \sum_{j=1}^p \|W^j\|_2~~~~
\text{where}~W^j~\text{is the j-th row of}~W.
- 'l1linf'
.. math::
\psi(W) = \lambda \sum_{j=1}^p \|W^j\|_\infty.
- 'l1l2+l1', which is the multi-task group Lasso regularization + l1
.. math::
\psi(W) = \sum_{j=1}^p \lambda \|W^j\|_2 + \lambda_2 \|W^j\|_1 ~~~~
\text{where}~W^j~\text{is the j-th row of}~W.
fit_intercept (boolean): default='False'
Learns an unregularized intercept b (or several intercepts for
multivariate problems)
lambda_1 (float): default=0
First regularization parameter
lambda_2 (float): default=0
Second regularization parameter, if needed
lambda_3 (float): default=0
Third regularization parameter, if needed
solver (string): default='auto'
Optimization solver. Possible choices are
- 'ista'
- 'fista'
- 'catalyst-ista'
- 'qning-ista' (proximal quasi-Newton method)
- 'svrg'
- 'catalyst-svrg' (accelerated SVRG with Catalyst)
- 'qning-svrg' (quasi-Newton SVRG)
- 'acc-svrg' (SVRG with direct acceleration)
- 'miso'
- 'catalyst-miso' (accelerated MISO with Catalyst)
- 'qning-miso' (quasi-Newton MISO)
- 'auto'
see the Latex documentation for more details.
If you are unsure, use 'auto'
tol (float): default='1e-3'
Tolerance parameter. For almost all combinations of loss and
penalty functions, this parameter is based on a duality gap.
Assuming the (non-negative) objective function is "f" and its
optimal value is "f^*", the algorithm stops with the guarantee
:math:`f(x_t) - f^* <= tol f(x_t)`
max_iter (int): default=500
Maximum number of iteration of the algorithm in terms of passes
over the data
duality_gap_interval (int): default=10
Frequency of duality-gap computation
verbose (boolean): default=True
Display information or not
n_threads (int): default=-1
Maximum number of cores the method may use (-1 = all cores).
Note that more cores is not always better.
random_state (int): default=0
Random seed
warm_start (boolean): default=False
Use a restart strategy
binary_problem (boolean): default=True
univariate or multivariate problems
limited_memory_qning (int): default=20
Memory parameter for the qning method
fista_restart (int): default=50
Restart strategy for fista (useful for computing regularization path)
"""
_estimator_type = "regressor"
def _more_tags(self):
return {"multioutput": True, "requires_y": True}
def __init__(self, loss='square', penalty='l2', fit_intercept=True, random_state=0,
lambda_1=0, lambda_2=0, lambda_3=0, solver='auto', tol=1e-3,
duality_gap_interval=10, max_iter=500,
limited_memory_qning=20, fista_restart=50, verbose=True,
warm_start=False, n_threads=-1, dual=None):
if loss != 'square':
raise ValueError("square loss should be used")
super().__init__(loss=loss, penalty=penalty,
fit_intercept=fit_intercept, random_state=random_state, lambda_1=lambda_1,
lambda_2=lambda_2, lambda_3=lambda_3, solver=solver, tol=tol,
duality_gap_interval=duality_gap_interval, max_iter=max_iter,
limited_memory_qning=limited_memory_qning,
fista_restart=fista_restart, verbose=verbose,
warm_start=warm_start, n_threads=n_threads, dual=dual)
def fit(self, X, y, le_parameter=None):
"""
Fit the parameters.
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
input n X p numpy matrix; the samples are on the rows
y (numpy array):
- vector of size n with real values for regression
- matrix of size n X k for multivariate regression
Returns
-------
self (ERM):
Returns the instance of the class
"""
X, labels, _ = check_input_fit(X, y, self)
if labels.squeeze().ndim <= 1:
self._binary_problem = True
else:
self._binary_problem = False
return super().fit(X, labels, le_parameter)
def predict(self, X):
"""
Predict the labels given an input matrix X (same format as fit).
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
Input matrix for the prediction
Returns
-------
pred (numpy.array):
Prediction for the X matrix
"""
check_is_fitted(self)
X = check_input_inference(X, self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
pred = safe_sparse_dot(
X, self.coef_, dense_output=False) + self.intercept_
return pred.squeeze()
def score(self, X, y, sample_weight=None):
r"""
Return the coefficient of determination of the prediction.
The coefficient of determination :math:`R^2` is defined as
:math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always predicts
the expected value of `y`, disregarding the input features, would get
a :math:`R^2` score of 0.0.
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
Test samples.
y (numpy.array):
True labels for X.
sample_weight (numpy.array, optional):
Sample weights. Defaults to None.
Returns
-------
score (float):
:math:`R^2` of ``self.predict(X)`` wrt. `y`.
"""
from sklearn.metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred, sample_weight=sample_weight)
class Classifier(ClassifierAbstraction):
r"""
The classification class.
The goal is to minimize the following objective:
.. math::
\min_{W,b} \frac{1}{n} \sum_{i=1}^n
L\left( y_i, W^\top x_i + b\right) + \psi(W)
where :math:`L` is a classification loss, :math:`\psi` is a regularization
function (or constraint), :math:`W=[w_1,\ldots,w_k]` is a (p x k) matrix
that carries the k predictors, where k is the number of classes, and
:math:`y_i` is a label in :math:`\{1,\ldots,k\}`.
b is a k-dimensional vector representing an unregularized intercept
(which is optional).
Parameters
----------
loss: string, default='square'
Loss function to be used. Possible choices are
- 'square'
:math:`L(y,z) = \frac{1}{2} ( y-z)^2`
- 'logistic'
:math:`L(y,z) = \log(1 + e^{-y z} )`
- 'sqhinge' or 'squared_hinge'
:math:`L(y,z) = \frac{1}{2} \max( 0, 1- y z)^2`
- 'safe-logistic'
:math:`L(y,z) = e^{ yz - 1 } - y z
~\text{if}~ yz \leq 1~~\text{and}~~0` otherwise
- 'multiclass-logistic'
which is also called multinomial or softmax logistic:
:math:`L(y, W^\top x + b) = \sum_{j=1}^k
\log\left(e^{w_j^\top + b_j} - e^{w_y^\top + b_y} \right)`
penalty (string): default='none'
Regularization function psi. Possible choices are
For binary_problem problems:
- 'none'
:math:`psi(w) = 0`
- 'l2'
:math:`psi(w) = \frac{\lambda_1}{2} ||w||_2^2`
- 'l1'
:math:`psi(w) = \lambda_1 ||w||_1`
- 'elasticnet'
:math:`psi(w) = \lambda_1 ||w||_1 + \frac{\lambda_2}{2}||w||_2^2`
- 'fused-lasso'
:math:`psi(w) = \lambda_3 \sum_{i=2}^p |w[i]-w[i-1]| +
\lambda_1||w||_1 + \frac{\lambda_2}{2}||w||_2^2`
- 'l1-ball'
encodes the constraint :math:`||w||_1 <= \lambda`
- 'l2-ball'
encodes the constraint :math:`||w||_2 <= \lambda`
For multivariate problems, the previous penalties operate on each
individual (e.g., class) predictor.
.. math::
\psi(W) = \sum_{j=1}^k \psi(w_j).
In addition, multitask-group Lasso penalties are provided for
multivariate problems (w is then a matrix)
- 'l1l2', which is the multi-task group Lasso regularization
.. math::
\psi(W) = \lambda \sum_{j=1}^p \|W^j\|_2~~~~
\text{where}~W^j~\text{is the j-th row of}~W.
- 'l1linf'
.. math::
\psi(W) = \lambda \sum_{j=1}^p \|W^j\|_\infty.
- 'l1l2+l1', which is the multi-task group Lasso regularization + l1
.. math::
\psi(W) = \sum_{j=1}^p \lambda \|W^j\|_2 + \lambda_2 \|W^j\|_1 ~~~~
\text{where}~W^j~\text{is the j-th row of}~W.
fit_intercept (boolean): default='False'
Learns an unregularized intercept b (or several intercepts for
multivariate problems)
lambda_1 (float): default=0
First regularization parameter
lambda_2 (float): default=0
Second regularization parameter, if needed
lambda_3 (float): default=0
Third regularization parameter, if needed
solver (string): default='auto'
Optimization solver. Possible choices are
- 'ista'
- 'fista'
- 'catalyst-ista'
- 'qning-ista' (proximal quasi-Newton method)
- 'svrg'
- 'catalyst-svrg' (accelerated SVRG with Catalyst)
- 'qning-svrg' (quasi-Newton SVRG)
- 'acc-svrg' (SVRG with direct acceleration)
- 'miso'
- 'catalyst-miso' (accelerated MISO with Catalyst)
- 'qning-miso' (quasi-Newton MISO)
- 'auto'
see the Latex documentation for more details.
If you are unsure, use 'auto'
tol (float): default='1e-3'
Tolerance parameter. For almost all combinations of loss and
penalty functions, this parameter is based on a duality gap.
Assuming the (non-negative) objective function is "f" and its
optimal value is "f^*", the algorithm stops with the guarantee
:math:`f(x_t) - f^* <= tol f(x_t)`
max_iter (int): default=500
Maximum number of iteration of the algorithm in terms of passes
over the data
duality_gap_interval (int): default=10
Frequency of duality-gap computation
verbose (boolean): default=True
Display information or not
n_threads (int): default=-1
Maximum number of cores the method may use (-1 = all cores).
Note that more cores is not always better.
random_state (int): default=0
Random seed
warm_start (boolean): default=False
Use a restart strategy
binary_problem (boolean): default=True
univariate or multivariate problems
limited_memory_qning (int): default=20
Memory parameter for the qning method
fista_restart (int): default=50
Restart strategy for fista (useful for computing regularization path)
"""
_estimator_type = "classifier"
def __init__(self, loss='square', penalty='l2', fit_intercept=True, tol=1e-3, solver="auto",
random_state=0, max_iter=500, fista_restart=50, verbose=True,
warm_start=False, multi_class="auto",
limited_memory_qning=20, lambda_1=0, lambda_2=0, lambda_3=0,
duality_gap_interval=5, n_threads=-1, dual=None):
super().__init__(loss=loss, penalty=penalty, fit_intercept=fit_intercept,
tol=tol, solver=solver,
random_state=random_state, max_iter=max_iter, fista_restart=fista_restart,
verbose=verbose, warm_start=warm_start,
limited_memory_qning=limited_memory_qning,
lambda_1=lambda_1, lambda_2=lambda_2, lambda_3=lambda_3,
duality_gap_interval=duality_gap_interval,
n_threads=n_threads, multi_class=multi_class, dual=dual)
def fit(self, X, y, le_parameter=None):
"""
Fit the parameters.
Parameters
----------
X (numpy array, or scipy sparse CSR matrix):
input n x p numpy matrix; the samples are on the rows
y (numpy.array):
Input labels.
- vector of size n with {-1, +1} labels for binary classification,
which will be automatically converted if labels in {0,1} are
provided and {0,1,..., n} for multiclass classification.
"""
X, labels, le = check_input_fit(X, y, self)
if le_parameter is not None:
self.le_ = le_parameter
else:
self.le_ = le
labels = np.squeeze(labels)
unique = np.unique(labels)
nb_classes = len(unique)
if self.le_ is not None:
self.classes_ = self.le_.classes_
else:
self.classes_ = unique
if nb_classes != 2 and (nb_classes != unique.shape[0] or
not all(np.unique(labels) == np.arange(nb_classes))):
logger.info("Class labels should be of the form")
logger.info(np.arange(nb_classes))
logger.info("but they are")
logger.info(unique)
logger.info(
"The labels have been converted to respect the expected format.")
if nb_classes == 2:
self._binary_problem = True
if self.le_ is not None:
neg = labels == self.le_.transform(self.classes_)[0]
else:
neg = labels == self.classes_[0]
labels = labels.astype(int)
labels[neg] = -1
labels[np.logical_not(neg)] = 1
else:
min_value = min(labels)
if min_value != 0:
labels = labels - min_value
self._binary_problem = False
super().fit(
X, labels, le_parameter=self.le_)
self.coef_ = self.coef_.reshape(self.coef_.shape[0], -1)
if self.fit_intercept:
self.intercept_ = self.intercept_.reshape(1, -1)
return self
def predict(self, X):
"""
Predict the labels given an input matrix X (same format as fit).
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
Input matrix for the prediction
Returns
-------
pred (numpy.array):
Prediction for the X matrix
"""
check_is_fitted(self)
X = check_input_inference(X, self)
pred = self.decision_function(X)
output = None
if len(self.classes_) == 2:
if self.le_ is None:
output = np.sign(pred)
output[output == -1.0] = self.classes_[0]
output = output.astype(np.int32)
else:
output = np.sign(pred)
output[output == -1.0] = 0
output = output.astype(np.int32)
output = self.le_.inverse_transform(output)
else:
if self.le_ is None:
output = np.argmax(pred, axis=1)
else:
output = self.le_.inverse_transform(np.argmax(pred, axis=1))
return output
def score(self, X, y):
"""
Give an accuracy score on test data.
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
Test samples.
y (numpy.array):
True labels for X.
sample_weight (numpy.array, optional):
Sample weights. Defaults to None.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` wrt. `y`.
"""
check_is_fitted(self)
X = check_input_inference(X, self)
pred = np.squeeze(self.predict(X))
return np.sum(np.squeeze(y) == pred) / pred.shape[0]
def decision_function(self, X):
"""
Predict confidence scores for samples.
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
The data for which we want scores
Returns
-------
scores (numpy.array):
Confidence scores per (n_samples, n_classes) combination.
In the binary case, confidence score for self.classes_[1] where >0 means t
his class would be predicted.
"""
check_is_fitted(self)
X = check_input_inference(X, self)
if self.fit_intercept:
scores = safe_sparse_dot(
X, self.coef_, dense_output=False) + self.intercept_
else:
scores = safe_sparse_dot(X, self.coef_, dense_output=False)
output = None
if len(self.classes_) == 2:
output = scores.ravel()
else:
output = scores.ravel() if scores.shape[1] == 1 else scores
return output
def predict_proba(self, X):
"""
Estimate the probability for each class.
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
Data matrix for which we want probabilities
Returns
-------
proba (numpy.array):
Return the probability of the samples for each class.
"""
check_is_fitted(self)
X = check_input_inference(X, self)
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for binary outcomes
# which requires softmax prediction with only a 1D decision.
decision = np.c_[-decision, decision]
return softmax(decision, copy=False)
class LinearSVC(Classifier):
"""A pre-configured class for square hinge loss."""
def __init__(self, loss='sqhinge', penalty='l2', fit_intercept=True,
verbose=False, lambda_1=0.1, lambda_2=0, lambda_3=0,
solver='auto', tol=1e-3, duality_gap_interval=10,
max_iter=500, limited_memory_qning=20,
fista_restart=50, warm_start=False, n_threads=-1, random_state=0, dual=None):
if loss not in ['squared_hinge', 'sqhinge']:
logger.error("LinearSVC is only compatible with squared hinge loss at "
"the moment")
super().__init__(
loss=loss, penalty=penalty, fit_intercept=fit_intercept,
solver=solver, tol=tol, random_state=random_state, verbose=verbose,
lambda_1=lambda_1, lambda_2=lambda_2, lambda_3=lambda_3,
duality_gap_interval=duality_gap_interval, max_iter=max_iter,
limited_memory_qning=limited_memory_qning,
fista_restart=fista_restart, warm_start=warm_start, n_threads=n_threads, dual=dual)
class LogisticRegression(Classifier):
"""A pre-configured class for logistic regression loss."""
_estimator_type = "classifier"
def __init__(self, penalty='l2', loss='logistic', fit_intercept=True,
verbose=False, lambda_1=0, lambda_2=0, lambda_3=0,
solver='auto', tol=1e-3, duality_gap_interval=10,
max_iter=500, limited_memory_qning=20,
fista_restart=50, warm_start=False, n_threads=-1,
random_state=0, multi_class="auto", dual=None):
super().__init__(loss=loss, penalty=penalty, fit_intercept=fit_intercept,
solver=solver, tol=tol, random_state=random_state, verbose=verbose,
lambda_1=lambda_1, lambda_2=lambda_2, lambda_3=lambda_3,
duality_gap_interval=duality_gap_interval, max_iter=max_iter,
limited_memory_qning=limited_memory_qning, multi_class=multi_class,
fista_restart=fista_restart, warm_start=warm_start,
n_threads=n_threads, dual=dual)
def compute_r(estimator_name, aux, X, labels, active_set):
"""
Compute R coefficient corresponding to the estimator.
Parameters
----------
estimator_name (string):
Name of the estimator class
aux (ERM):
Auxiliary estimator
X (numpy array or scipy sparse CSR matrix):
Features matrix
labels (numpy.array):
Labels matrix
active_set (numpy.array):
Active set
Returns
-------
R (float):
_description_
"""
R = None
pred = aux.predict(X[:, active_set])
if estimator_name == "Lasso":
if len(active_set) == 0:
R = labels
else:
R = labels.ravel() - pred.ravel()
elif estimator_name == "L1Logistic":
if len(active_set) == 0:
R = -0.5 * labels.ravel()
else:
R = -labels.ravel() / (1.0 + np.exp(labels.ravel() * pred.ravel()))
return R
def fit_large_feature_number(estimator, aux, X, labels):
"""
Fitting function when the number of feature is superior to 1000.
Args
----
estimator (ERM):
Fitted estimator
aux (ERM):
Auxiliary estimator
X (numpy array or scipy sparse CSR matrix):
Features matrix
labels (numpy.array):
Labels matrix
"""
n, p = X.shape
scaling = 4.0
init = min(100, p)
estimator.restart = True
num_as = math.ceil(math.log10(p / init) / math.log10(scaling))
active_set = []
n_active = 0
estimator.coef_ = np.zeros(p, dtype=X.dtype)
if estimator.fit_intercept:
estimator.intercept_ = 0
for ii in range(num_as):
R = compute_r(estimator.__name__, aux, X, labels, active_set)
corr = np.abs(X.transpose().dot(R).ravel()) / n
if n_active > 0:
corr[active_set] = -10e10
n_new_as = max(
min(init * math.ceil(scaling ** ii), p) - n_active, 0)
new_as = corr.argsort()[-n_new_as:]
if len(new_as) == 0 or max(corr[new_as]) <= estimator.lambda_1 * (1 + estimator.tol):
break
if len(active_set) > 0:
neww = np.zeros(n_active + n_new_as,
dtype=X.dtype)
neww[0:n_active] = aux.coef_
aux.coef_ = neww
active_set = np.concatenate((active_set, new_as))
else:
active_set = new_as
aux.coef_ = np.zeros(
len(active_set), dtype=X.dtype)
n_active = len(active_set)
if estimator.verbose:
logger.info("Size of the active set: {%d}", n_active)
aux.fit(X[:, active_set], labels)
estimator.coef_[active_set] = aux.coef_
if estimator.fit_intercept:
estimator.intercept_ = aux.intercept_
class Lasso(Regression):
"""
A pre-configured class for Lasso regression.
Using active set when the number of features is superior to 1000.
"""
def __init__(self, lambda_1=0, solver='auto', tol=1e-3,
duality_gap_interval=10, max_iter=500, limited_memory_qning=20,
fista_restart=50, verbose=True,
warm_start=False, n_threads=-1, random_state=0, fit_intercept=True, dual=None):
super().__init__(loss='square', penalty='l1', lambda_1=lambda_1, solver=solver, tol=tol,
duality_gap_interval=duality_gap_interval, max_iter=max_iter,
limited_memory_qning=limited_memory_qning, fista_restart=fista_restart,
verbose=verbose, warm_start=warm_start, n_threads=n_threads,
random_state=random_state, fit_intercept=fit_intercept, dual=dual)
def fit(self, X, y):
"""
Fit the parameters.
Parameters
----------
X (numpy array or scipy sparse CSR matrix):
input n X p numpy matrix; the samples are on the rows
y (numpy array):
- vector of size n with real values for regression
- matrix of size n X k for multivariate regression
Returns
-------
self (ERM):
Returns the instance of the class
"""
X, labels, _ = check_input_fit(X, y, self)
_, p = X.shape
if p <= 1000:
# no active set
super().fit(X, labels)
else:
aux = Regression(loss='square', penalty='l1',
fit_intercept=self.fit_intercept, random_state=self.random_state)
fit_large_feature_number(self, aux, X, labels)
return self
class L1Logistic(Classifier):
"""
A pre-configured class for L1 logistic classification.
Using active set when the number of features is superior to 1000
"""
_estimator_type = "classifier"
def _more_tags(self):
return {"requires_y": True, "_xfail_checks": {
"check_non_transformer_estimators_n_iter": (
"We have a different implementation of _n_iter in the multinomial case."
),
}}
def __init__(self, lambda_1=0, solver='auto', tol=1e-3,
duality_gap_interval=10, max_iter=500, limited_memory_qning=20,
fista_restart=50, verbose=True, warm_start=False, n_threads=-1,
random_state=0, fit_intercept=True, multi_class="auto", dual=None):
super().__init__(loss='logistic', penalty='l1', lambda_1=lambda_1, solver=solver, tol=tol,
duality_gap_interval=duality_gap_interval, max_iter=max_iter,
limited_memory_qning=limited_memory_qning,
fista_restart=fista_restart, verbose=verbose,
warm_start=warm_start, n_threads=n_threads, random_state=random_state,
fit_intercept=fit_intercept, multi_class=multi_class, dual=dual)
if multi_class == "multinomial":
self.loss = "multiclass-logistic"
def fit(self, X, y):
"""
Fit the parameters.
Parameters
----------
X (numpy array, or scipy sparse CSR matrix):
input n x p numpy matrix; the samples are on the rows
y (numpy.array):
Input labels.
- vector of size n with {-1, +1} labels for binary classification,
which will be automatically converted if labels in {0,1} are
provided and {0,1,..., n} for multiclass classification.
"""
X, labels, le = check_input_fit(X, y, self)
self.le_ = le
_, p = X.shape
if p <= 1000:
# no active set
super().fit(X, labels, le_parameter=self.le_)
else:
aux = Classifier(
loss='logistic', penalty='l1', fit_intercept=self.fit_intercept)
fit_large_feature_number(self, aux, X, labels)
return self
```
#### File: arsenic/test/test_estimator.py
```python
import pytest
from sklearn.utils.estimator_checks import check_estimator
from cyanure.estimators import LogisticRegression, Regression, Classifier, LinearSVC, L1Logistic, Lasso
@pytest.mark.parametrize(
"estimator",
[LogisticRegression(), Regression(), Classifier(), LinearSVC(), Lasso(), L1Logistic()]
)
def test_all_estimators(estimator):
return check_estimator(estimator)
``` |
{
"source": "jmaister/ml-functions",
"score": 3
} |
#### File: jmaister/ml-functions/softmax.py
```python
import numpy as np
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
Rows are scores for each class.
Columns are predictions (samples).
"""
scoreMatExp = np.exp(np.asarray(x))
return scoreMatExp / scoreMatExp.sum(0)
``` |
{
"source": "jmak123/docs_share",
"score": 3
} |
#### File: docs_share/docs_share/dbt_parser.py
```python
import yaml
class dbt_parser:
"""
base object of a source/schema yaml in dbt repo
"""
def __init__(self, file_path):
self.file_path: str = file_path
self.content: dict = {}
self.desc: list = []
self.parse()
self.get_desc()
def parse(self):
"""
open file and load dbt yaml as json
"""
with open(self.file_path, "r") as f:
self.content = yaml.safe_load(f)
return self.content
def get_desc(self):
"""
parse all json and get all description field info in views-dimensions
"""
if (
"models" in self.content
): ## only consider schema yamls as we ignore source yamls here
for m in self.content["models"]:
if "columns" in m: ## skip models with no columns
for c in m["columns"]:
if "description" in c:
self.desc.append(
{
"relation": m["name"],
"field": c["name"],
"description": c["description"],
}
)
``` |
{
"source": "jmakeig/mlvm",
"score": 2
} |
#### File: mlvm/commands/prepare.py
```python
import os
import logging
logger = logging.getLogger('mlvm')
from mlvm.settings import HOME, SYSTEM, USER
import mlvm.filesystem as fs
from mlvm.exceptions import RootUserRequired, UnsupportedPlatform
# PATHS = {
# 'Darwin': {
# 'Lib': USER + '/Library',
# 'Sup': USER + '/Library/Application Support',
# 'Pref': USER + '/Library/PreferencePanes',
# 'Start': USER + '/Library/StartupItems'
# }
# }
def is_prepared():
if 'Darwin' == SYSTEM:
return True # TODO
else:
raise UnsupportedPlatform('%s is not a supported platform', SYSTEM)
def ensure_sudo():
try:
real_user = os.environ['SUDO_USER']
except Exception, err:
raise RootUserRequired()
import pwd
return (
pwd.getpwnam(real_user).pw_uid,
pwd.getpwnam(real_user).pw_gid
)
def prepare():
if 'Darwin' == SYSTEM:
real_user_id, real_group_id = ensure_sudo()
# TODO: Extract me into a proper data structure
LIBRARY = USER + '/Library'
APPLICATION_SUPPORT = LIBRARY + '/Application Support'
PREF_PANES = LIBRARY + '/PreferencePanes'
STARTUP = LIBRARY + '/StartupItems'
current_dir = fs.ensure_directory(HOME + '/versions/.current')
os.lchown(current_dir, real_user_id, real_group_id)
fs.clear_links(current_dir)
# MarkLogic Server
fs.symlink_force(current_dir + '/MarkLogic', LIBRARY + '/MarkLogic')
os.lchown(LIBRARY + '/MarkLogic', real_user_id, real_group_id)
# Application Support
fs.symlink_force(current_dir + '/Support', APPLICATION_SUPPORT + '/MarkLogic')
os.lchown(APPLICATION_SUPPORT + '/MarkLogic', real_user_id, real_group_id)
# Preference Pane
fs.symlink_force(current_dir + '/PreferencePanes/MarkLogic.prefPane', PREF_PANES + '/MarkLogic.prefPane')
os.lchown(PREF_PANES + '/MarkLogic.prefPane', real_user_id, real_group_id)
# Start Up
fs.symlink_force(current_dir + '/StartupItems/MarkLogic', STARTUP + '/MarkLogic')
else:
raise Exception('%s is not a supported platform', SYSTEM)
def _unlink(path):
if(os.path.islink(path)):
os.unlink(path)
def remove():
if 'Darwin' == SYSTEM:
real_user_id, real_group_id = ensure_sudo()
# TODO: Extract me into a proper data structure
LIBRARY = USER + '/Library'
APPLICATION_SUPPORT = LIBRARY + '/Application Support'
PREF_PANES = LIBRARY + '/PreferencePanes'
STARTUP = LIBRARY + '/StartupItems'
logger.debug('Unlinking {0}'.format(LIBRARY + '/MarkLogic'))
_unlink(LIBRARY + '/MarkLogic')
logger.debug('Unlinking {0}'.format(APPLICATION_SUPPORT + '/MarkLogic'))
_unlink(APPLICATION_SUPPORT + '/MarkLogic')
logger.debug('Unlinking {0}'.format(PREF_PANES + '/MarkLogic.prefPane'))
_unlink(PREF_PANES + '/MarkLogic.prefPane')
logger.debug('Unlinking {0}'.format(STARTUP + '/MarkLogic'))
_unlink(STARTUP + '/MarkLogic')
current_dir = HOME + '/versions/.current'
if os.path.isdir(current_dir):
logger.debug('Clearing {0}'.format(current_dir))
fs.clear_links(current_dir)
else:
raise UnsupportedPlatform('%s is not a supported platform', SYSTEM)
```
#### File: mlvm/mlvm/main.py
```python
import sys
import logging
logger = logging.getLogger('mlvm')
logger.setLevel(logging.DEBUG)
#log = logging.FileHandler('mlvm.log')
log = logging.StreamHandler()
log.setLevel(logging.DEBUG)
log.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
logger.addHandler(log)
from settings import HOME, SYSTEM
# TODO: Add error handling to make sure `prepare` has been called for
# all of the commands that need it
def route_command(arguments):
if arguments.get('install'):
from mlvm.commands.install import install
install(arguments) # TODO: Pass decouple arguments
elif arguments.get('prepare'):
from mlvm.commands.prepare import prepare
prepare()
elif arguments.get('use'):
from mlvm.commands.use import use
use(arguments.get('<version>'))
elif arguments.get('list'):
from mlvm.commands.list import list
list(sys.stdout)
elif arguments.get('start'):
from mlvm.commands.run import start
start()
elif arguments.get('stop'):
from mlvm.commands.run import stop
stop()
elif arguments.get('ps'):
from mlvm.commands.run import ps
ps()
elif arguments.get('init'):
from mlvm.commands.init import init
init(arguments.get('--host'), arguments.get('--rename'))
elif arguments.get('remove'):
from mlvm.commands.prepare import remove
remove()
elif arguments.get('eval'):
from mlvm.commands.eval import remote_eval
remote_eval('Sequence.from(["asdf", xdmp.version(), 42, true]);')
```
#### File: mlvm/mlvm/versions.py
```python
import platform
import re
def get_release_artifact(major, minor, patch, system=platform.system()):
if 'Darwin' == system:
return 'MarkLogic-' + major + '.' + minor + '-' + patch + '-x86_64'
raise Exception('Not yet implemented')
def parse_artifact_from_file(file):
pattern = 'MarkLogic-(?:RHEL\d-)?\d{1,2}\.\d-(?:(?:\d{8})|(?:\d{1,2}\.\d{1,2}))[\.\-](?:x86_|amd)64'
match = re.findall(pattern, file)
if len(match) == 1:
return match[0]
raise Exception(artifact + ' does not match a MarkLogic version')
def parse_version(version):
""" Parses a string, such as `'9.0-20160731'` or `'8.0-5.5'` into a
dictionary with keys, `major`, `minor`, and `patch`. """
version = str(version)
pattern = '(?:MarkLogic-(?:RHEL\d-)?)?(\d{1,2}\.\d(?:-(?:(?:\d{8})|(?:\d{1,2}(?:\.\d{1,2})?)))?)(?:[\.\-](?:x86_|amd)64)?'
match = re.findall(pattern, version)
if 1 != len(match):
raise Exception(version + ' doesn’t match a MarkLogic artifact')
mm_patch = match[0].split('-')
mm = mm_patch[0]
patch = None
if len(mm_patch) > 1:
patch = mm_patch[1]
mm_tokens = mm.split('.')
major = int(mm_tokens[0])
minor = None
if len(mm_tokens) > 1:
minor = int(mm_tokens[1])
return {'major': str(major), 'minor': str(minor), 'patch': patch}
def serialize_version(version):
major = version.get('major')
minor = version.get('minor')
patch = version.get('patch')
result = major
if minor is not None:
result = result + '.' + minor
if patch is not None:
result = result + '-' + patch
return result
``` |
{
"source": "jmakeig/modulesdb",
"score": 2
} |
#### File: modulesdb/src/local-watcher.py
```python
import os
import sys
import subprocess
import datetime
import time
import copy
import argparse
import getpass
import json
import fnmatch
import re
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from modulesclient import ModulesClient
def walk(top, exclusions=[]):
"Walks the file system recursively starting at top, putting each file to the globally defined REST service."
# TODO: Figure out how to separate out directory and file exclusions
# http://stackoverflow.com/a/5141829/563324
excludes = r'|'.join([fnmatch.translate(x) for x in exclusions]) or r'$.'
# print excludes
def _rel(path, start):
"Pass-through to os.path.relpath except that it returns an empty string instead of '.' for exact matches."
r = os.path.relpath(path, start)
if "." == r:
return ""
return r
for root, dirs, files in os.walk(top):
# print top, root, dirs, [os.path.join(os.path.relpath(root, top), d) for d in dirs]
# dirs[:] = [os.path.join(top, d) for d in dirs]
# print dirs
# print (root, top, os.path.relpath(root, top))
# for d in dirs:
# print (os.path.join(_rel(root, top), d), bool(re.match(excludes, os.path.join(_rel(root, top), d))))
dirs[:] = [d for d in dirs if not re.match(excludes, os.path.join(_rel(root, top), d))]
#print dirs
for name in files:
asb = os.path.join(root, name)
rel = _rel(asb, top)
#print (rel, os.path.join(os.path.relpath(root, top)))
if not re.match(excludes, rel):
print format_put_message(
modules_client.put_file(
uri=rel,
file_path=asb
)
)
# else:
# print " Ignoring " + rel
def format_put_message(msg):
"Format the return values for reporting to stdout"
# msg is of the form (HTTP verb, URI of the affected doc, status code)
# 201 is added or
# print msg
verb = "* Updated"
if msg[0] == "DELETE":
verb = "- Removed"
if msg[0] == "PUT" and msg[1] == 201:
verb = "+ Added"
return verb + " " + msg[2]
class ChangeHandler(FileSystemEventHandler):
"Handle changes to files and directories."
def __init__(self, directory, exclusions=[], after=[]):
"Initialize with the base directory."
self.directory = directory
self.exclusions = exclusions
self.exclusions_re = r'|'.join([fnmatch.translate(x) for x in exclusions]) or r'$.'
self.after = after # after is a list of strings, each of which is a path to a shell script. TODO: Is that the best way?
def _rel(self, path):
"Use the stored base directory to calculate the relative path with os.path.relpath."
r = os.path.relpath(path, self.directory)
if "." == r:
return ""
return r
def _is_excluded(self, path):
"Whether a particular path is excluded based on the list passed in at initialization."
return bool(re.match(self.exclusions_re, self._rel(path)))
def _run_after(self, event):
"Loops through the list of scripts and runs each in a separate subprocess"
# TODO: Figure out if there are acutal requirements for I/O in and out of the script.
for script in self.after:
# TODO: Need some try/catch logic here
subprocess.call([script])
# TODO: Exclusion lists (e.g. .git, .DS_Store, Thumbs.db), similar to the way .gitignore works.
# Call it .mlignore? Is there a Python class that already does this matching?
def on_created(self, event):
# print "created event"
if event.is_directory or self._is_excluded(event.src_path):
return
else:
print format_put_message(
modules_client.put_file(uri=self._rel(event.src_path), file_path=event.src_path)
)
self._run_after(event)
def on_modified(self, event):
# print "modified event " + self._rel(event.src_path)
if event.is_directory or self._is_excluded(event.src_path):
return
else:
print format_put_message(
modules_client.put_file(uri=self._rel(event.src_path), file_path=event.src_path)
)
self._run_after(event)
def on_deleted(self, event):
# print "deleted event"
if event.is_directory or self._is_excluded(event.src_path):
return
else:
print format_put_message(
modules_client.delete(uri=self._rel(event.src_path))
)
self._run_after(event)
def on_moved(self, event):
# TODO: Clean all of this logic and repeated code up
if self._is_excluded(event.src_path) and not self._is_excluded(event.dest_path):
print format_put_message(
modules_client.put_file(uri=self._rel(event.dest_path), file_path=event.dest_path)
)
return
if not self._is_excluded(event.src_path) and self._is_excluded(event.dest_path):
print format_put_message(
modules_client.delete(uri=self._rel(event.src_path))
)
return
# print "moved event"
print format_put_message(
# put_file_contents(event.dest_path)
modules_client.move_file(from_uri=self._rel(event.src_path), to_uri=self._rel(event.dest_path), file_path=event.dest_path)
)
self._run_after(event)
def on_any_event(self, event):
# print event
pass
def observe(directory, recursive=True, exclusions=[], after=[]):
"Observe folder and file changes. Only supports"
event_handler = ChangeHandler(directory, exclusions, after)
observer = Observer()
observer.schedule(event_handler, directory, recursive=recursive)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
# All of the examples call .join(). I don't see how this is ever exectued, though, except right after the interrupt is handled. What does join do?
observer.join()
if __name__ == '__main__':
def get_configuration(working_dir, command_line):
"Get the configuration from a combination of the command line input and a dot file"
# Parse the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--url", default=None, help="The REST API endpoint fronting the modules database, of the form protocol://host:port, where protocol is http or https")
parser.add_argument('--auth', default=None, choices=["none", "digest", "basic", "cert"], help="The HTTP authentication method to use.") # None here means that it can be pruned such taht the overlay on the dot file just works
parser.add_argument('--user', '-u', default=None, help="The username to use to authenticate against the REST service.")
parser.add_argument('--password', '-p', default=None, help="The password to use to authenticate against the REST service. If the authentication method requires a password and you don't supply one at the command line or in the prefrences file you will be prompted for it.")
parser.add_argument('--database', '-d', default=None, help="The name of the database to write to")
#parser.add_argument('--cert', "-E", help="(SSL) Tells curl to use the specified client certificate file when getting a file with HTTPS, FTPS or another SSL-based protocol. The certificate must be in PEM format. If the optional password isn't specified, it will be queried for on the terminal. Note that this option assumes a \"certificate\" file that is the private key and the private certificate concatenated! See --cert and --key to specify them independently.")
#parser.add_argument('--key', help="(SSL/SSH) Private key file name. Allows you to provide your private key in this separate file.")
#parser.add_argument('--insecure', "-k")
parser.add_argument('--permission', '-perm', dest="permissions", nargs="?", action="append", default=None, help="Zero or more permissions that should be set on all files. Each entry should be an individaul role:capability, where capability is in {read, update, execute}, for example -perm app-user:execute.")
parser.add_argument('--ignore', nargs="?", action="append", default=None, help="A gitignore-style path to ignore for observation and walking.")
# Command-line only
parser.add_argument('--config', '-K', default=".modulesdb", help="The location of the JSON configuration file. Command-line options take precedence. Defaults to .modulesdb at the root of the directory being observed.")
parser.add_argument('--walk', action="store_true", default=False, help="Whether to recusively push all of the files to the modules database before begining observation.")
parser.add_argument("--debug", action="store_true", default=False, help="Print out some extra debugging information.")
#parser.add_argument('--quiet', '-q', action="store_true", default=False, help="")
parser.add_argument('--after', '-a', action="append", default=None, help="A script to be invoked after a file is pushed to the remote modules database. This is _not_ invoked after a directory walk (--walk). This also doesn't currently support piping input in or handling output or errors.")
parser.add_argument("dir", nargs='?', help="The directory to watch. Defaults to the current working directory.", default=working_dir)
# UGLY: Turn the Namesapce into a dictionary. Is this the only way to do this?
args = copy.deepcopy(
vars(
# http://docs.python.org/2/library/sys.html#sys.argv
parser.parse_args(command_line[1:])
)
)
# Prune Nones from command-line args
# http://stackoverflow.com/questions/2544710/how-i-can-get-rid-of-none-values-in-dictionary
args = dict((k,v) for k,v in args.iteritems() if v is not None)
# Get the preferences out of a dot file in the target directory
# Command-line options take precedence
prefs = {}
pref_path = args['dir'] + "/" + args['config']
if os.path.isfile(pref_path):
print "Reading preferences from " + pref_path
pref_file = open(pref_path, "r")
prefs = json.load(pref_file)
# print prefs
# Overlay command-line arguments on top of the preferences read from the dot file
prefs.update(args)
def parse_perms(perms, prefix):
"Turns something like ['a:A', 'b:B', 'a:C'] into {'a': ['A', 'C'], 'b': ['B']}"
d = {}
for p in perms:
kv = p.split(':')
k = prefix + kv[0]
if k not in d:
d[k] = [kv[1]]
else:
d[k].append(kv[1])
return d
# Ask for a password if it's not provided
if prefs['auth'] in ['digest', 'basic'] and "password" not in prefs:
prefs['password'] = <PASSWORD>pass("Password for " + prefs['url'] + ": ")
if not isinstance(prefs['permissions'], dict):
prefs['permissions'] = parse_perms(prefs['permissions'], 'perm:')
else:
prefs['permissions'] = dict(('perm:' + k,v) for k,v in prefs['permissions'].iteritems())
if "after" not in prefs:
# UGLY: Reset proper default of after if it hasn't been set in the dot file or the command line
prefs['after'] = []
if prefs['debug']:
print prefs
return prefs
config = get_configuration(os.getcwd(), sys.argv)
modules_client = ModulesClient(config)
# Start the script in a directory you want to observe
BASEDIR = os.path.abspath(config['dir'])
if config['walk']:
print "Walking " + BASEDIR
walk(BASEDIR, config['ignore'])
print "Observing " + BASEDIR + "…"
observe(BASEDIR, recursive=True, exclusions=config['ignore'], after=config.get('after'))
``` |
{
"source": "j-mak/exercism.io",
"score": 4
} |
#### File: exercism.io/atbash-cipher/atbash_cipher.py
```python
from string import ascii_lowercase
cipher = ascii_lowercase[::-1]
def encode(raw):
"""Encode plain text with atbash cipher method."""
response = ''
raw = raw.replace(' ', '')
tab_index = 0
for char in raw.lower():
if char in ascii_lowercase:
index = ascii_lowercase.index(char)
response += cipher[index]
tab_index += 1
elif char.isdigit():
response += char
tab_index += 1
if tab_index == 5:
response += ' '
tab_index = 0
return response.strip()
def decode(encoded):
"""decode encoded text with atbash cipher method."""
response = ''
encoded = encoded.replace(' ', '')
for char in encoded:
if char in cipher:
index = cipher.index(char)
response += ascii_lowercase[index]
else:
response += char
return response
```
#### File: exercism.io/grains/grains_test.py
```python
import unittest
from grains import (
on_square,
total_after,
)
class GrainsTest(unittest.TestCase):
def test_square_1(self):
self.assertEqual(on_square(1), 1)
self.assertEqual(total_after(1), 1)
def test_square_2(self):
self.assertEqual(on_square(2), 2)
self.assertEqual(total_after(2), 3)
def test_square_3(self):
self.assertEqual(on_square(3), 4)
self.assertEqual(total_after(3), 7)
def test_square_4(self):
self.assertEqual(on_square(4), 8)
self.assertEqual(total_after(4), 15)
def test_square_16(self):
self.assertEqual(on_square(16), 32768)
self.assertEqual(total_after(16), 65535)
def test_square_32(self):
self.assertEqual(on_square(32), 2147483648)
self.assertEqual(total_after(32), 4294967295)
def test_square_64(self):
self.assertEqual(on_square(64), 9223372036854775808)
self.assertEqual(total_after(64), 18446744073709551615)
def test_square_0_raises_exception(self):
with self.assertRaises(ValueError):
on_square(0)
with self.assertRaises(ValueError):
total_after(0)
def test_square_negative_raises_exception(self):
with self.assertRaises(ValueError):
on_square(-1)
with self.assertRaises(ValueError):
total_after(-1)
def test_square_gt_64_raises_exception(self):
with self.assertRaises(ValueError):
on_square(65)
with self.assertRaises(ValueError):
total_after(65)
if __name__ == '__main__':
unittest.main()
```
#### File: exercism.io/hamming/hamming.py
```python
def distance(dna_strand1, dna_strand2):
"""Calculate hamming distance between two DNA strands."""
if len(dna_strand1) != len(dna_strand2):
raise ValueError
counter = 0
for i in range(len(dna_strand1)):
if dna_strand1[i] != dna_strand2[i]:
counter += 1
return counter
```
#### File: exercism.io/hello-world/hello_world.py
```python
def hello():
"""Simple method to print Hello, World!"""
return "Hello, World!"
```
#### File: exercism.io/isogram/isogram.py
```python
def is_isogram(word):
"""This method check if given word is isogram."""
chars = set()
if len(word) == 0:
return True
for letter in word:
letter = letter.lower()
if letter.isalpha() and letter in chars:
return False
chars.add(letter)
return True
```
#### File: exercism.io/kindergarten-garden/kindergarten_garden.py
```python
class Garden(object):
seed = {
"R": "Radishes",
"C": "Clover",
"G": "Grass",
"V": "Violets"
}
def __init__(self, seeds, students=("Alice", "Bob", "Charlie", "David",
"Eve", "Fred", "Ginny", "Harriet",
"Ileana", "Joseph", "Kincaid",
"Larry")):
self.students = sorted(students)
self.seeds = seeds.split('\n')
def plants(self, name):
"""Returns plants owned for given name."""
result = []
if name in self.students:
index = self.students.index(name)
for group in self.seeds:
for i in range(index * 2, index * 2 + 2):
result.append(self.seed.get(group[i]))
return result
```
#### File: exercism.io/largest-series-product/largest_series_product.py
```python
def largest_product(number, series):
"""Calculate the largest product from continuous
substring of given length."""
if len(number) < series or series < 0:
raise ValueError
if not series:
return 1
maximum = 0
for i in range(len(number) + 1 - series):
partial_sum = int(number[i])
for j in number[i + 1:i + series]:
partial_sum *= int(j)
maximum = max(maximum, partial_sum)
return maximum
```
#### File: exercism.io/leap/leap.py
```python
def is_leap_year(year):
"""This method verify if given year is leap or not."""
return not year % 4 and ((year % 100) or (not year % 400))
```
#### File: exercism.io/pangram/pangram_test.py
```python
import unittest
from pangram import is_pangram
# test cases adapted from `x-common//canonical-data.json` @ version: 1.0.0
class PangramTests(unittest.TestCase):
def test_sentence_empty(self):
self.assertFalse(is_pangram(''))
def test_pangram_with_only_lower_case(self):
self.assertTrue(
is_pangram('the quick brown fox jumps over the lazy dog'))
def test_missing_character_x(self):
self.assertFalse(
is_pangram('a quick movement of the enemy will '
'jeopardize five gunboats'))
def test_another_missing_character_x(self):
self.assertFalse(
is_pangram('the quick brown fish jumps over the lazy dog'))
def test_pangram_with_underscores(self):
self.assertTrue(
is_pangram('the_quick_brown_fox_jumps_over_the_lazy_dog'))
def test_pangram_with_numbers(self):
self.assertTrue(
is_pangram('the 1 quick brown fox jumps over the 2 lazy dogs'))
def test_missing_letters_replaced_by_numbers(self):
self.assertFalse(
is_pangram('7h3 qu1ck brown fox jumps ov3r 7h3 lazy dog'))
def test_pangram_with_mixedcase_and_punctuation(self):
self.assertTrue(is_pangram('"Five quacking Zephyrs jolt my wax bed."'))
def test_upper_and_lower_case_versions_of_the_same_character(self):
self.assertFalse(
is_pangram('the quick brown fox jumped over the lazy FOX'))
if __name__ == '__main__':
unittest.main()
```
#### File: exercism.io/pig-latin/pig_latin.py
```python
VOWELS = ('a', 'e', 'i', 'o', 'u', 'yt', 'xr')
CONSONANTS = ('b', 'c', 'd', 'f', 'g',
'h', 'j', 'k', 'l', 'm',
'n', 'p', 'q', 'r', 's',
't', 'v', 'w', 'x', 'y',
'z', 'sh', 'sch', 'zz', 'gh',
'ch', 'th', 'qu', 'thr',
'squ')
def translate(sentence):
result = []
for word in sentence.split(' '):
if word[:3] in CONSONANTS:
word = word[3:] + word[:3]
elif word[:2] in CONSONANTS:
word = word[2:] + word[:2]
elif word[0] in CONSONANTS and word[:2] not in VOWELS:
word = word[1:] + word[0]
result.append(word + 'ay')
return " ".join(result)
```
#### File: exercism.io/prime-factors/prime_factors.py
```python
def prime_factors(number):
"""Returns prime factors for given number."""
result = []
divisor = 2
while divisor <= number:
if number % divisor == 0:
number /= divisor
result.append(divisor)
else:
divisor += 1
return result
```
#### File: exercism.io/pythagorean-triplet/pythagorean_triplet.py
```python
import math
def primitive_triplets(b):
trips = set()
for x, y, z in trip(b):
if math.gcd(x, y) == math.gcd(y, z) == 1:
trips.add((x, y, z))
return trips
def triplets_in_range(min_value, max_value):
triple = set()
for a in range(min_value, max_value + 1):
for b in range(a, max_value + 1):
for c in range(b, max_value + 1):
if a ** 2 + b ** 2 == c ** 2:
triple.update([(a, b, c)])
return triple
def is_triplet(triplet):
a, b, c = sorted(triplet)
return a * a + b * b == c * c
def trip(b):
if b % 4:
raise ValueError('Invalid Input')
max_num = int(math.sqrt((b - 1) / 2))
for n in range(1, max_num + 1):
m, redr = divmod(b // 2, n)
if redr == 0:
yield (tuple(sorted((m * m - n * n, b, m * m + n * n))))
```
#### File: exercism.io/series/series.py
```python
def slices(string, slice_size):
"""Return list of lists with size of given slice size."""
result = []
if slice_size <= 0 or slice_size > len(string):
raise ValueError
for i in range(len(string) + 1 - slice_size):
string_slice = string[i:i + slice_size]
slice_array = []
for character in string_slice:
slice_array.append(int(character))
result.append(slice_array)
return result
``` |
{
"source": "jmakovecki/sentinel3",
"score": 3
} |
#### File: stactools/sentinel3/commands.py
```python
import logging
import os
import click
from stactools.sentinel3.stac import create_item
logger = logging.getLogger(__name__)
def create_sentinel3_command(cli):
"""Creates the stactools-sentinel3 command line utility."""
@cli.group(
"sentinel3",
short_help="Commands for working with stactools-sentinel3",
)
def sentinel3():
pass
@sentinel3.command(
"create-item",
short_help="Convert a Sentinel3 scene into a STAC item",
)
@click.argument("src")
@click.argument("dst")
@click.option("--skip_nc",
default=False,
help="Insert <True> to skip reading nc files")
def create_item_command(src, dst, skip_nc):
"""Creates a STAC Collection
Args:
src (str): path to the scene
dst (str): path to the STAC Item JSON file that will be created
skip_nc (bool): Skip parsing NetCDF data files. Since these are large, this saves
bandwidth when working over network, at the cost of metadata we can obtain
from them. Defaults to False.
"""
item = create_item(src, skip_nc)
item_path = os.path.join(dst, "{}.json".format(item.id))
item.set_self_href(item_path)
item.save_object()
return sentinel3
```
#### File: sentinel3/tests/test_commands.py
```python
import os
from tempfile import TemporaryDirectory
import pystac
from pystac.extensions.eo import EOExtension
from pystac.utils import is_absolute_href
from stactools.testing import CliTestCase
from stactools.sentinel3.commands import create_sentinel3_command
from stactools.sentinel3.constants import (SENTINEL_OLCI_BANDS,
SENTINEL_SLSTR_BANDS,
SENTINEL_SRAL_BANDS,
SENTINEL_SYNERGY_BANDS)
from tests import test_data
class CreateItemTest(CliTestCase):
def create_subcommand_functions(self):
return [create_sentinel3_command]
def test_create_olci_1_efr_item(self):
item_id = str("S3A_OL_1_EFR____"
"20211021T073827_20211021T074112_20211021T091357_"
"0164_077_334_4320_LN1_O_NR_002")
granule_href = test_data.get_path(
"data-files/"
"S3A_OL_1_EFR____"
"20211021T073827_20211021T074112_20211021T091357_"
"0164_077_334_4320_LN1_O_NR_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_OLCI_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_olci_1_err_item(self):
item_id = str("S3B_OL_1_ERR____"
"20210831T200148_20210831T204600_20210902T011514_"
"2652_056_242______LN1_O_NT_002")
granule_href = test_data.get_path(
"data-files/"
"S3B_OL_1_ERR____"
"20210831T200148_20210831T204600_20210902T011514_"
"2652_056_242______LN1_O_NT_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_OLCI_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_olci_2_lfr_item(self):
item_id = str("S3A_OL_2_LFR____"
"20210523T003029_20210523T003329_20210524T050403_"
"0179_072_102_1980_LN1_O_NT_002")
granule_href = test_data.get_path(
"data-files/"
"S3A_OL_2_LFR____"
"20210523T003029_20210523T003329_20210524T050403_"
"0179_072_102_1980_LN1_O_NT_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_OLCI_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_olci_2_lrr_item(self):
item_id = str("S3B_OL_2_LRR____"
"20210731T214325_20210731T222741_20210802T020007_"
"2656_055_186______LN1_O_NT_002")
granule_href = test_data.get_path(
"data-files/"
"S3B_OL_2_LRR____"
"20210731T214325_20210731T222741_20210802T020007_"
"2656_055_186______LN1_O_NT_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_OLCI_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_olci_2_wfr_item(self):
item_id = str("S3A_OL_2_WFR____"
"20210604T001016_20210604T001316_20210604T021918_"
"0179_072_273_1440_MAR_O_NR_003")
granule_href = test_data.get_path(
"data-files/"
"S3A_OL_2_WFR____"
"20210604T001016_20210604T001316_20210604T021918_"
"0179_072_273_1440_MAR_O_NR_003.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_OLCI_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_slstr_1_rbt_item(self):
item_id = str("S3A_SL_1_RBT____"
"20210930T220914_20210930T221214_20211002T102150_"
"0180_077_043_5400_LN2_O_NT_004")
granule_href = test_data.get_path(
"data-files/"
"S3A_SL_1_RBT____"
"20210930T220914_20210930T221214_20211002T102150_"
"0180_077_043_5400_LN2_O_NT_004.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SLSTR_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_slstr_2_frp_item(self):
item_id = str("S3A_SL_2_FRP____"
"20210802T000420_20210802T000720_20210803T123912_"
"0179_074_344_2880_LN2_O_NT_004")
granule_href = test_data.get_path(
"data-files/"
"S3A_SL_2_FRP____"
"20210802T000420_20210802T000720_20210803T123912_"
"0179_074_344_2880_LN2_O_NT_004.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SLSTR_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_slstr_2_lst_item(self):
item_id = str("S3A_SL_2_LST____"
"20210510T002955_20210510T003255_20210511T101010_"
"0179_071_301_5760_LN2_O_NT_004")
granule_href = test_data.get_path(
"data-files/"
"S3A_SL_2_LST____"
"20210510T002955_20210510T003255_20210511T101010_"
"0179_071_301_5760_LN2_O_NT_004.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SLSTR_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_slstr_2_wst_item(self):
item_id = str("S3B_SL_2_WST____"
"20210419T051754_20210419T065853_20210420T160434_"
"6059_051_247______MAR_O_NT_003")
granule_href = test_data.get_path(
"data-files/"
"S3B_SL_2_WST____"
"20210419T051754_20210419T065853_20210420T160434_"
"6059_051_247______MAR_O_NT_003.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SLSTR_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_sral_2_lan_item(self):
item_id = str("S3A_SR_2_LAN____"
"20210611T011438_20210611T012436_20210611T024819_"
"0598_072_373______LN3_O_NR_004")
granule_href = test_data.get_path(
"data-files/"
"S3A_SR_2_LAN____"
"20210611T011438_20210611T012436_20210611T024819_"
"0598_072_373______LN3_O_NR_004.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SRAL_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_sral_2_wat_item(self):
item_id = str("S3A_SR_2_WAT____"
"20210704T012815_20210704T021455_20210729T173140_"
"2800_073_316______MAR_O_NT_004")
granule_href = test_data.get_path(
"data-files/"
"S3A_SR_2_WAT____"
"20210704T012815_20210704T021455_20210729T173140_"
"2800_073_316______MAR_O_NT_004.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SRAL_BANDS.values()
]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_synergy_2_aod_item(self):
item_id = str("S3B_SY_2_AOD____"
"20210512T143315_20210512T151738_20210514T064157_"
"2663_052_196______LN2_O_NT_002")
granule_href = test_data.get_path(
"data-files/"
"S3B_SY_2_AOD____"
"20210512T143315_20210512T151738_20210514T064157_"
"2663_052_196______LN2_O_NT_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SYNERGY_BANDS.values()
][26:32]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_synergy_2_syn_item(self):
item_id = str("S3A_SY_2_SYN____"
"20210325T005418_20210325T005718_20210325T142858_"
"0180_070_031_1620_LN2_O_ST_002")
granule_href = test_data.get_path(
"data-files/"
"S3A_SY_2_SYN____"
"20210325T005418_20210325T005718_20210325T142858_"
"0180_070_031_1620_LN2_O_ST_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
combined_bands = {
**SENTINEL_OLCI_BANDS,
**SENTINEL_SLSTR_BANDS,
**SENTINEL_SYNERGY_BANDS
}
band_list = [value.name for value in combined_bands.values()]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_synergy_2_v10_item(self):
item_id = str("S3A_SY_2_V10____"
"20210911T000000_20210920T235959_20210928T121452_"
"EUROPE____________LN2_O_NT_002")
granule_href = test_data.get_path(
"data-files/"
"S3A_SY_2_V10____"
"20210911T000000_20210920T235959_20210928T121452_"
"EUROPE____________LN2_O_NT_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SYNERGY_BANDS.values()
][-4:]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_synergy_2_vg1_item(self):
item_id = str("S3A_SY_2_VG1____"
"20211013T000000_20211013T235959_20211014T203456_"
"EUROPE____________LN2_O_ST_002")
granule_href = test_data.get_path(
"data-files/"
"S3A_SY_2_VG1____"
"20211013T000000_20211013T235959_20211014T203456_"
"EUROPE____________LN2_O_ST_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SYNERGY_BANDS.values()
][-4:]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
def test_create_synergy_2_vgp_item(self):
item_id = str("S3A_SY_2_VGP____"
"20210703T142237_20210703T150700_20210703T211742_"
"2663_073_310______LN2_O_ST_002")
granule_href = test_data.get_path(
"data-files/"
"S3A_SY_2_VGP____"
"20210703T142237_20210703T150700_20210703T211742_"
"2663_073_310______LN2_O_ST_002.SEN3")
with self.subTest(granule_href):
with TemporaryDirectory() as tmp_dir:
cmd = ["sentinel3", "create-item", granule_href, tmp_dir]
self.run_command(cmd)
jsons = [p for p in os.listdir(tmp_dir) if p.endswith(".json")]
self.assertEqual(len(jsons), 1)
fname = jsons[0]
item = pystac.Item.from_file(os.path.join(tmp_dir, fname))
item.validate()
self.assertEqual(item.id, item_id)
band_list = [
value.name for value in SENTINEL_SYNERGY_BANDS.values()
][-4:]
bands_seen = set()
for _, asset in item.assets.items():
self.assertTrue("/./" not in asset.href)
self.assertTrue(is_absolute_href(asset.href))
asset_eo = EOExtension.ext(asset)
bands = asset_eo.bands
if bands is not None:
bands_seen |= set(b.name for b in bands)
[self.assertTrue(band in band_list) for band in bands_seen]
os.remove(f"{tmp_dir}/{item_id}.json")
```
#### File: sentinel3/tests/test_metadata.py
```python
import unittest
import pystac
from pystac.extensions.eo import EOExtension
from pystac.extensions.sat import SatExtension
from stactools.sentinel3.metadata_links import MetadataLinks
from stactools.sentinel3.product_metadata import ProductMetadata
from stactools.sentinel3.properties import (fill_eo_properties,
fill_sat_properties)
from tests import test_data
class Sentinel3OLCIMetadataTest(unittest.TestCase):
def test_parses_olci_1_efr_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_OL_1_EFR____20211021T073827_20211021T074112_20211021T091357_"
"0164_077_334_4320_LN1_O_NR_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"brightPixels_percentage":
item.properties["s3:brightPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [-44.0441, -83.51, 13.0151, -68.2251],
"datetime": "2021-10-21T07:39:49.724590Z",
"orbit_state": "descending",
"absolute_orbit": 29567,
"relative_orbit": 334,
"instruments": ["OLCI"],
"mode": "EO",
"productType": "OL_1_EFR___",
"salineWaterPixels_percentage": 44.0,
"coastalPixels_percentage": 0.0,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 0.0,
"brightPixels_percentage": 99.0,
"invalidPixels_percentage": 1.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 25.0,
"saturatedPixels_percentage": 0.0,
"dubiousSamples_percentage": 0.0,
"shape": [4865, 3749]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_olci_1_err_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3B_OL_1_ERR____20210831T200148_20210831T204600_20210902T011514_"
"2652_056_242______LN1_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"brightPixels_percentage":
item.properties["s3:brightPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [-179.151, -64.2325, 179.92, 89.5069],
"datetime": "2021-08-31T20:23:54.000366Z",
"orbit_state": "ascending",
"absolute_orbit": 17454,
"relative_orbit": 242,
"instruments": ["OLCI"],
"mode": "EO",
"productType": "OL_1_ERR___",
"salineWaterPixels_percentage": 90.0,
"coastalPixels_percentage": 0.0,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 0.0,
"brightPixels_percentage": 47.0,
"invalidPixels_percentage": 3.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 0.0,
"saturatedPixels_percentage": 8e-06,
"dubiousSamples_percentage": 0.0,
"shape": [1217, 15070]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_olci_2_lfr_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_OL_2_LFR____20210523T003029_20210523T003329_20210524T050403_"
"0179_072_102_1980_LN1_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [138.497, 49.8938, 164.009, 62.918],
"datetime": "2021-05-23T00:31:59.485583Z",
"orbit_state": "descending",
"absolute_orbit": 27410,
"relative_orbit": 102,
"cloud_cover": 83.0,
"instruments": ["OLCI"],
"mode": "EO",
"productType": "OL_2_LFR___",
"salineWaterPixels_percentage": 4.0,
"coastalPixels_percentage": 0.0082,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 1.0,
"landPixels_percentage": 4.0,
"invalidPixels_percentage": 4.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 1.545942,
"saturatedPixels_percentage": 0.0,
"dubiousSamples_percentage": 0.0,
"shape": [4865, 4090]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_olci_2_lrr_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3B_OL_2_LRR____20210731T214325_20210731T222741_20210802T020007_"
"2656_055_186______LN1_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [-179.968, -53.7609, 179.943, 89.6231],
"datetime": "2021-07-31T22:05:32.974566Z",
"orbit_state": "ascending",
"absolute_orbit": 17013,
"relative_orbit": 186,
"cloud_cover": 51.0,
"instruments": ["OLCI"],
"mode": "EO",
"productType": "OL_2_LRR___",
"salineWaterPixels_percentage": 35.0,
"coastalPixels_percentage": 0.332161,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 0.0,
"landPixels_percentage": 1.0,
"invalidPixels_percentage": 4.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 0.0,
"saturatedPixels_percentage": 0.0,
"dubiousSamples_percentage": 0.0,
"shape": [1217, 15092]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_olci_2_wfr_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_OL_2_WFR____20210604T001016_20210604T001316_20210604T021918_"
"0179_072_273_1440_MAR_O_NR_003.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"invalidPixels_percentage":
item.properties["s3:invalidPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"dubiousSamples_percentage":
item.properties["s3:dubiousSamples_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [-176.303, 76.7724, 179.972, 88.9826],
"datetime": "2021-06-04T00:11:45.867265Z",
"orbit_state": "ascending",
"absolute_orbit": 27581,
"relative_orbit": 273,
"cloud_cover": 67.0,
"instruments": ["OLCI"],
"mode": "EO",
"productType": "OL_2_WFR___",
"salineWaterPixels_percentage": 0.0,
"coastalPixels_percentage": 0.013921,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 0.0,
"landPixels_percentage": 0.0,
"invalidPixels_percentage": 3.0,
"cosmeticPixels_percentage": 0.0,
"duplicatedPixels_percentage": 11.701367,
"saturatedPixels_percentage": 0.0,
"dubiousSamples_percentage": 0.0,
"shape": [4865, 4091]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_slstr_1_rbt_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SL_1_RBT____20210930T220914_20210930T221214_20211002T102150_"
"0180_077_043_5400_LN2_O_NT_004.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"outOfRangePixels_percentage":
item.properties["s3:outOfRangePixels_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [-3.34105, -39.7421, 15.4906, -25.8488],
"datetime": "2021-09-30T22:10:43.843538Z",
"orbit_state": "ascending",
"absolute_orbit": 29276,
"relative_orbit": 43,
"cloud_cover": 80.216007,
"instruments": ["SLSTR"],
"mode": "EO",
"productType": "SL_1_RBT___",
"salineWaterPixels_percentage": 100.0,
"landPixels_percentage": 0.0,
"coastalPixels_percentage": 0.0,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 0.0,
"cosmeticPixels_percentage": 28.085521,
"duplicatedPixels_percentage": 5.105382,
"saturatedPixels_percentage": 0.0,
"outOfRangePixels_percentage": 0.0,
"shape": [1500, 1200]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_slstr_2_frp_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SL_2_FRP____20210802T000420_20210802T000720_20210803T123912_"
"0179_074_344_2880_LN2_O_NT_004.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"outOfRangePixels_percentage":
item.properties["s3:outOfRangePixels_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [139.182, -3.03934, 154.722, 10.4264],
"datetime": "2021-08-02T00:05:49.503088Z",
"orbit_state": "descending",
"absolute_orbit": 28422,
"relative_orbit": 344,
"cloud_cover": 63.904667,
"instruments": ["SLSTR"],
"mode": "EO",
"productType": "SL_2_FRP___",
"salineWaterPixels_percentage": 99.891,
"landPixels_percentage": 0.109,
"coastalPixels_percentage": 0.017944,
"freshInlandWaterPixels_percentage": 0.000167,
"tidalRegionPixels_percentage": 0.0,
"cosmeticPixels_percentage": 21.585889,
"duplicatedPixels_percentage": 5.461111,
"saturatedPixels_percentage": 0.0,
"outOfRangePixels_percentage": 0.184722,
"shape": [1500, 1200]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_slstr_2_lst_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SL_2_LST____20210510T002955_20210510T003255_20210511T101010_"
"0179_071_301_5760_LN2_O_NT_004.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"outOfRangePixels_percentage":
item.properties["s3:outOfRangePixels_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [-41.5076, -18.6129, -25.5773, -5.01269],
"datetime": "2021-05-10T00:31:24.660731Z",
"orbit_state": "ascending",
"absolute_orbit": 27224,
"relative_orbit": 301,
"cloud_cover": 57.378222,
"instruments": ["SLSTR"],
"mode": "EO",
"productType": "SL_2_LST___",
"salineWaterPixels_percentage": 78.747222,
"landPixels_percentage": 21.252778,
"coastalPixels_percentage": 0.050167,
"freshInlandWaterPixels_percentage": 0.169778,
"tidalRegionPixels_percentage": 0.899167,
"cosmeticPixels_percentage": 21.881167,
"duplicatedPixels_percentage": 5.449222,
"saturatedPixels_percentage": 0.0,
"outOfRangePixels_percentage": 0.0,
"shape": [1500, 1200]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_slstr_2_wst_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3B_SL_2_WST____20210419T051754_20210419T065853_20210420T160434_"
"6059_051_247______MAR_O_NT_003.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"cosmeticPixels_percentage":
item.properties["s3:cosmeticPixels_percentage"],
"duplicatedPixels_percentage":
item.properties["s3:duplicatedPixels_percentage"],
"saturatedPixels_percentage":
item.properties["s3:saturatedPixels_percentage"],
"outOfRangePixels_percentage":
item.properties["s3:outOfRangePixels_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [-175.687, -85.8995, 175.031, 89.0613],
"datetime": "2021-04-19T06:08:23.709828Z",
"orbit_state": "descending",
"absolute_orbit": 15534,
"relative_orbit": 247,
"cloud_cover": 67.421502,
"instruments": ["SLSTR"],
"mode": "EO",
"productType": "SL_2_WST___",
"salineWaterPixels_percentage": 69.464947,
"landPixels_percentage": 30.535053,
"coastalPixels_percentage": 0.0,
"freshInlandWaterPixels_percentage": 0.0,
"tidalRegionPixels_percentage": 0.0,
"cosmeticPixels_percentage": 42.198716,
"duplicatedPixels_percentage": 0.0,
"saturatedPixels_percentage": 0.0,
"outOfRangePixels_percentage": 26.93685,
"shape": [1500, 40394]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_sral_2_lan_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SR_2_LAN____20210611T011438_20210611T012436_20210611T024819_"
"0598_072_373______LN3_O_NR_004.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"lrmModePercentage":
item.properties["s3:lrmModePercentage"],
"sarModePercentage":
item.properties["s3:sarModePercentage"],
"landPercentage":
item.properties["s3:landPercentage"],
"closedSeaPercentage":
item.properties["s3:closedSeaPercentage"],
"continentalIcePercentage":
item.properties["s3:continentalIcePercentage"],
"openOceanPercentage":
item.properties["s3:openOceanPercentage"],
}
expected = {
"bbox": [-19.9677, -81.3739, 110.573, -67.0245],
"datetime": "2021-06-11T01:19:37.201974Z",
"orbit_state": "descending",
"absolute_orbit": 27681,
"relative_orbit": 373,
"instruments": ["SRAL"],
"mode": "EO",
"productType": "SR_2_LAN___",
"lrmModePercentage": 0.0,
"sarModePercentage": 100.0,
"landPercentage": 0.0,
"closedSeaPercentage": 0.0,
"continentalIcePercentage": 97.0,
"openOceanPercentage": 3.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_sral_2_wat_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SR_2_WAT____20210704T012815_20210704T021455_20210729T173140_"
"2800_073_316______MAR_O_NT_004.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"lrmModePercentage":
item.properties["s3:lrmModePercentage"],
"sarModePercentage":
item.properties["s3:sarModePercentage"],
"landPercentage":
item.properties["s3:landPercentage"],
"closedSeaPercentage":
item.properties["s3:closedSeaPercentage"],
"continentalIcePercentage":
item.properties["s3:continentalIcePercentage"],
"openOceanPercentage":
item.properties["s3:openOceanPercentage"],
}
expected = {
"bbox": [-153.507, -74.0588, -20.0953, 81.4226],
"datetime": "2021-07-04T01:51:35.180925Z",
"orbit_state": "descending",
"absolute_orbit": 28009,
"relative_orbit": 316,
"instruments": ["SRAL"],
"mode": "EO",
"productType": "SR_2_WAT___",
"lrmModePercentage": 0.0,
"sarModePercentage": 100.0,
"landPercentage": 8.0,
"closedSeaPercentage": 0.0,
"continentalIcePercentage": 0.0,
"openOceanPercentage": 92.0,
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_aod_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3B_SY_2_AOD____20210512T143315_20210512T151738_20210514T064157_"
"2663_052_196______LN2_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"],
"shape":
item.properties["s3:shape"]
}
expected = {
"bbox": [-104.241, -54.5223, 112.209, 89.7337],
"datetime": "2021-05-12T14:55:26.593379Z",
"orbit_state": "ascending",
"absolute_orbit": 15868,
"relative_orbit": 196,
"cloud_cover": 82.147057,
"instruments": ["SYNERGY"],
"mode": "EO",
"productType": "SY_2_AOD___",
"salineWaterPixels_percentage": 72.660328,
"landPixels_percentage": 27.276878,
"shape": [324, 4035]
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_syn_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SY_2_SYN____20210325T005418_20210325T005718_20210325T142858_"
"0180_070_031_1620_LN2_O_ST_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixels_percentage":
item.properties["s3:coastalPixels_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [-179.619, 69.3884, 179.853, 83.7777],
"datetime": "2021-03-25T00:55:48.019583Z",
"orbit_state": "descending",
"absolute_orbit": 26569,
"relative_orbit": 31,
"cloud_cover": 8.166911,
"instruments": ["SYNERGY"],
"mode": "EO",
"productType": "SY_2_SYN___",
"salineWaterPixels_percentage": 94.483109,
"coastalPixels_percentage": 0.093193,
"freshInlandWaterPixels_percentage": 0.076276,
"tidalRegionPixels_percentage": 0.0,
"landPixels_percentage": 2.368632
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_v10_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SY_2_V10____20210911T000000_20210920T235959_20210928T121452_"
"EUROPE____________LN2_O_NT_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"snowOrIcePixels_percentage":
item.properties["s3:snowOrIcePixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [-10.9911, 25.0, 62.0, 75.0],
"datetime": "2021-09-15T23:59:59.500000Z",
"orbit_state": "descending",
"absolute_orbit": 28848,
"relative_orbit": 145,
"cloud_cover": 3.041905,
"instruments": ["SYNERGY"],
"mode": "EO",
"productType": "SY_2_V10___",
"snowOrIcePixels_percentage": 0.154442,
"landPixels_percentage": 65.278832
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_vg1_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SY_2_VG1____20211013T000000_20211013T235959_20211014T203456_"
"EUROPE____________LN2_O_ST_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"snowOrIcePixels_percentage":
item.properties["s3:snowOrIcePixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [-10.9911, 25.0, 62.0, 75.0],
"datetime": "2021-10-13T11:59:59.500000Z",
"orbit_state": "descending",
"absolute_orbit": 29233,
"relative_orbit": 216,
"cloud_cover": 23.811417,
"instruments": ["SYNERGY"],
"mode": "EO",
"productType": "SY_2_VG1___",
"snowOrIcePixels_percentage": 0.102883,
"landPixels_percentage": 46.680979
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
def test_parses_synergy_2_vgp_metadata_properties(self):
# Get the path of the test xml
manifest_path = test_data.get_path(
"data-files/"
"S3A_SY_2_VGP____20210703T142237_20210703T150700_20210703T211742_"
"2663_073_310______LN2_O_ST_002.SEN3")
metalinks = MetadataLinks(manifest_path)
product_metadata = ProductMetadata(manifest_path, metalinks.manifest)
item = pystac.Item(
id=product_metadata.scene_id,
geometry=product_metadata.geometry,
bbox=product_metadata.bbox,
datetime=product_metadata.get_datetime,
properties={},
stac_extensions=[],
)
# ---- Add Extensions ----
# sat
sat = SatExtension.ext(item, add_if_missing=True)
fill_sat_properties(sat, metalinks.manifest)
# eo
eo = EOExtension.ext(item, add_if_missing=True)
fill_eo_properties(eo, metalinks.manifest)
# s3 properties
item.properties.update({**product_metadata.metadata_dict})
# Make a dictionary of the properties
s3_props = {
"bbox":
item.bbox,
"datetime":
item.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"orbit_state":
item.properties["sat:orbit_state"],
"absolute_orbit":
item.properties["sat:absolute_orbit"],
"relative_orbit":
item.properties["sat:relative_orbit"],
"cloud_cover":
item.properties["eo:cloud_cover"],
"instruments":
item.properties["instruments"],
"mode":
item.properties["s3:mode"],
"productType":
item.properties["s3:productType"],
"snowOrIcePixels_percentage":
item.properties["s3:snowOrIcePixels_percentage"],
"salineWaterPixels_percentage":
item.properties["s3:salineWaterPixels_percentage"],
"coastalPixelss_percentage":
item.properties["s3:coastalPixelss_percentage"],
"freshInlandWaterPixels_percentage":
item.properties["s3:freshInlandWaterPixels_percentage"],
"tidalRegionPixels_percentage":
item.properties["s3:tidalRegionPixels_percentage"],
"landPixels_percentage":
item.properties["s3:landPixels_percentage"]
}
expected = {
"bbox": [-98.2945, -49.2134, 115.456, 89.5354],
"datetime": "2021-07-03T14:44:48.463954Z",
"orbit_state": "ascending",
"absolute_orbit": 28003,
"relative_orbit": 310,
"cloud_cover": 1.692044,
"instruments": ["SYNERGY"],
"mode": "EO",
"productType": "SY_2_VGP___",
"snowOrIcePixels_percentage": 0.436467,
"salineWaterPixels_percentage": 67.744293,
"coastalPixelss_percentage": 0.169447,
"freshInlandWaterPixels_percentage": 0.878855,
"tidalRegionPixels_percentage": 0.470567,
"landPixels_percentage": 32.227482
}
for k, v in expected.items():
self.assertIn(k, s3_props)
self.assertEqual(s3_props[k], v)
``` |
{
"source": "jmakov/market_tia",
"score": 3
} |
#### File: jmakov/market_tia/_ordbBenchmark.py
```python
from tia.trad.market.orderbook import RecorderOrderbook, Order
import time
NTIME = 10**5
def timer(func):
def wrapper(*args):
stime = time.time()
func(*args)
etime = time.time()
print func.__name__, etime - stime
return wrapper
@timer
def sdPlaceOrderDiffPrice(orderbook):
for i in xrange(NTIME):
orderbook.asks[i] = Order(i, 10**8, 10**8)
#orderbook.bids[i] = Order(i, 10**8, 10**8)
@timer
def sdPlaceorderSamePrice(orderbook):
for i in xrange(NTIME):
orderbook.asks[1] = Order(1, 10**8, i)
@timer
def sdRemoveOrderSamePrice(orderbook):
for i in xrange(NTIME):
orderbook.asks[1] = Order(1, -10**8, i)
@timer
def sdRemoveOrderDiffPrice(orderbook):
for i in xrange(NTIME):
orderbook.asks[i] = Order(i, -10**8, i)
orderbook = RecorderOrderbook()
sdPlaceOrderDiffPrice(orderbook)
sdPlaceorderSamePrice(orderbook)
sdRemoveOrderSamePrice(orderbook)
sdRemoveOrderDiffPrice(orderbook)
```
#### File: trad/market/features.py
```python
from tia.trad.tools.sortedDict import SortedDict
import logging
import tia.trad.tools.ipc.processLogger as pl
LOGGER_NAME = pl.PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME)
class DataWindow(object):
def __repr__(self): return "%s(%s)" % (self.__class__.__name__, self.__dict__)
def __init__(self):
self.lastSentCheckpoint = 0
self.deltas = {}
self.prevOrderbook = {}
self.lastTid = 0
self.newTrades = SortedDict()
"""
def pMinAsk(_market):
try: return _market.orderbook.asks._orderbook.smallest_key()
except Exception: raise
def pMaxBid(_market):
try: return _market.orderbook.bids._orderbook.largest_key()
except Exception: raise
def pSpread(_Market):
try:
res_ = pMinAsk(_Market) - pMaxBid(_Market)
assert res_ >= 0, [res_, pMaxBid(_Market), pMinAsk(_Market)]
return res_
except Exception: raise
"""
```
#### File: trad/market/orderManager.py
```python
import tia.trad.tools.ipc.naming_conventions as names
import tia.trad.tools.arithm.floatArithm as fl
import tia.trad.market.orders as orders; reload(orders)
from tia.trad.tools.dicDiff import DictDiff
import tia.trad.market.events as event; reload(event)
import logging
import tia.trad.tools.ipc.processLogger as pl
LOGGER_NAME = pl.PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME)
def cancelOrder(_Order, _ChangedMarket):
try:
logger.debug("cancelOrder: hi")
_ChangedMarket.sendEvent(event.onCancelOrder(names.orderCancel, _ChangedMarket.name, _Order.oid, _Order.type, _Order))
except Exception: raise
def placeOrder(_Order, _foundTargets, _ChangedMarket):
try:
logger.debug("placeOrder: hi")
price = _Order.price.quantize(_ChangedMarket.pip)
amount = _Order.amount.quantize(_ChangedMarket.pipAmount)
dummyOid = 123
_ChangedMarket.sendEvent(event.onPlaceOrder(names.orderPlace, _ChangedMarket.name, price, amount, _Order.type, _Order.datePlaced, _foundTargets, dummyOid))
except Exception: raise
def replaceOrder(_Order, _eventDate, _priorityExecutionPrice, _newAmount, _foundTargets, _ChangedMarket):
try:
logger.debug("replaceOrder: hi")
if _Order.price != _priorityExecutionPrice or _Order.amount != _newAmount:
cancelOrder(_Order, _ChangedMarket)
newOrder = orders.Order(_ChangedMarket.name, _priorityExecutionPrice, _newAmount, _Order.type, _eventDate)
placeOrder(newOrder, _foundTargets, _ChangedMarket)
except Exception: raise
def replaceOnL2Change(_changedSide, _ChangedMarket, _UniverseD):
# cancel current AO and replace tighter to l2
try:
logger.debug("replaceOnL2Change: hi")
foundTargets = _findTargets(_changedSide, _ChangedMarket, _UniverseD)
# sim our action instantly so that we can calculate priority price correctly
#e.g. del out top order
if _changedSide == names.asks:
AO = _ChangedMarket.activeOrders.asks
l1ask = _ChangedMarket.orderbook.asks.smallest_key()
del _ChangedMarket.orderbook.asks[l1ask]
elif _changedSide == names.bids:
AO = _ChangedMarket.activeOrders.bids
l1bid = _ChangedMarket.orderbook.bids.largest_key()
del _ChangedMarket.orderbook.bids[l1bid]
else: raise Exception("unknown side: %s" % _changedSide)
[newAmount, newPriorityExecutionPrice] = getNewAmount(_changedSide, foundTargets, _ChangedMarket)
if newAmount:
replaceOrder(AO, AO.datePlaced, newPriorityExecutionPrice, newAmount, foundTargets, _ChangedMarket)
except Exception: raise
def getPriorityExecutionPrice(_changedSide, _topOrderPrice, _largestBid, _smallestAsk, _ChangedMarket):
# don't overbid own orders. if top price the same as ours, replaceOrder will not replace it unless the amount changes
try:
logger.debug("getPriorityExecutionPrice: hi")
pip = _ChangedMarket.pip
if _changedSide == names.bids:
priorityExecutionPrice = _topOrderPrice + pip
AO = _ChangedMarket.activeOrders.bids
if AO:
if AO.price == _largestBid:
priorityExecutionPrice = AO.price
elif _changedSide == names.asks:
priorityExecutionPrice = _topOrderPrice - pip
AO = _ChangedMarket.activeOrders.asks
if AO:
if AO.price == _smallestAsk:
priorityExecutionPrice = AO.price
else: raise Exception("Unknown side: %s" % _changedSide)
return priorityExecutionPrice
except Exception: raise
def _findTargets(_changedSide, _ChangedMarket, _UniverseD):
try:
logger.debug("_findTargets: hi")
# recognize opportunity
foundTargets = {}
for strategy in _ChangedMarket.activeStrategies.emitsLimitOrder.values():
retDct = strategy.findExits(_changedSide, _ChangedMarket, _UniverseD)
foundTargets.update(retDct)
return foundTargets
except Exception: raise
def getNewAmount(_changedSide, _foundTargets, _ChangedMarket):
try:
logger.debug("getNewAmount: hi")
# get amount from TM
targetManagerQuote = _ChangedMarket.targetManager.targetsAmountSum(_changedSide, _ChangedMarket)
topOrderPrice = targetManagerQuote[0]
# get a price for priority execution on curr market
onePip = _ChangedMarket.pip; smallestAsk = _ChangedMarket.orderbook.asks.smallest_key(); largestBid = _ChangedMarket.orderbook.bids.largest_key()
# if our order at the top, priorityPrice == same as top, so it will get replaced only if amount changes
if smallestAsk - largestBid > onePip:
priorityExecutionPrice = getPriorityExecutionPrice(_changedSide, topOrderPrice, largestBid, smallestAsk, _ChangedMarket)
else:
priorityExecutionPrice = largestBid if _changedSide == names.bids else smallestAsk
# get amount for curr market from TM
availItems = _ChangedMarket.account.getResources(_changedSide, priorityExecutionPrice, _ChangedMarket) # all amounts are in items!
maxSaturationAtSomePrice = 2
TMamountSum = targetManagerQuote[1]
# if in bids, change returned funds into items
if _changedSide == names.bids: TMamountSum = (TMamountSum / priorityExecutionPrice).quantize(_ChangedMarket.pip)
# keep always some funds to realize TM
if availItems <= _ChangedMarket.minAllowableBet: newAmount = fl.D("0")
# keep some funds to realize TM
# TM saturation: wait for TM to realize this amount first
#elif amountSum > maxSaturationAtSomePrice: newAmount = min([amountSum, availItems])
# place a bet in addition to TM realization amount
else:
minBet = fl.D("1") if availItems > 10 else fl.D("0.1")
foundExitsBet = min([TMamountSum + minBet, availItems])
newAmount = foundExitsBet if _foundTargets else min([TMamountSum, availItems])
logger.debug("newAm: %s, priorityPrice: %s" % (newAmount, priorityExecutionPrice))
if newAmount <= _ChangedMarket.minAllowableBet: newAmount = 0
return [newAmount, priorityExecutionPrice]
except Exception: raise
def getAmountForMarketOrder(_changedSide, _ChangedMarket):
try:
logger.debug("getAmountForMarketOrder: hi")
res_ = {"newAmount": None, "changedSideAO": None}
exchangeRate = _ChangedMarket.exchangeRates[_ChangedMarket.currency]
targetSide = names.bids if _changedSide == names.asks else names.asks
if _changedSide == names.bids:
AO = _ChangedMarket.activeOrders.bids
opposedAO = _ChangedMarket.activeOrders.asks
# try to realize items
try:
target = _ChangedMarket.targetManager.targetItems.smallest_item()
targetPrice = target[0]
targetAmount = target[1]
except KeyError: target = None
if target:
topOrderIter = _ChangedMarket.orderbook.bids.iteritems(reverse=True)
topOrder = topOrderIter.next(); topOrderPrice = topOrder[0]; topOrderAmount = topOrder[1]
# check if top order ours
try:
if AO.price == topOrderPrice:
topOrder = topOrderIter.next(); topOrderPrice = topOrder[0]; topOrderAmount = topOrder[1]
res_["changedSideAO"] = AO
# AO might be behind top order so check the top order
except AttributeError: pass #AO might not exist
finally:
# check if it crosses
if topOrderPrice * exchangeRate + 10 * _ChangedMarket.pip > targetPrice:
logger.debug("topOrderUSD:%s, target:%s" % (topOrderPrice * exchangeRate, targetPrice))
availItems = _ChangedMarket.account.getResources(targetSide, topOrderPrice, _ChangedMarket)
amount = min([targetAmount, topOrderAmount, availItems])
res_["newAmount"] = amount
res_["priorityExecutionPrice"] = topOrderPrice
res_["side"] = targetSide
res_["oppositeAO"] = opposedAO
elif _changedSide == names.asks:
AO = _ChangedMarket.activeOrders.asks
opposedAO = _ChangedMarket.activeOrders.bids
# try to realize funds
try:
target = _ChangedMarket.targetManager.targetFunds.largest_item()
targetPrice = target[0]
targetAmount = target[1]
except KeyError: target = None
if target:
topOrderIter = _ChangedMarket.orderbook.asks.iteritems()
topOrder = topOrderIter.next(); topOrderPrice = topOrder[0]; topOrderAmount = topOrder[1]
# check if top order ours
try:
if AO.price == topOrderPrice:
topOrder = topOrderIter.next(); topOrderPrice = topOrder[0]; topOrderAmount = topOrder[1]
res_["changedSideAO"] = AO
except AttributeError: pass
finally:
# check if it crosses
if topOrderPrice * exchangeRate - 10 * _ChangedMarket.pip < targetPrice:
logger.debug("topOrderUSD:%s, target:%s" % (topOrderPrice * exchangeRate, targetPrice))
availItems = _ChangedMarket.account.getResources(targetSide, topOrderPrice, _ChangedMarket)
amount = min([targetAmount, topOrderAmount, availItems])
res_["newAmount"] = amount
res_["priorityExecutionPrice"] = topOrderPrice
res_["side"] = targetSide
res_["oppositeAO"] = opposedAO
else: raise Exception("unknown side: %s" % _changedSide)
logger.debug("getAmountForMarketOrder: %s" % res_)
return res_
except Exception: raise
def _manageCurrentMarket(_eventDate, _changedSide, _foundTargets, _ChangedMarket):
try:
logger.debug("_manageCurrentMarket: hi")
[newAmount, priorityExecutionPrice] = getNewAmount(_changedSide, _foundTargets, _ChangedMarket)
# check if we have to alter order already in the market
activeOrder = _ChangedMarket.activeOrders.bids if _changedSide == names.bids else _ChangedMarket.activeOrders.asks
logger.debug("activeOrder: %s" % activeOrder)
if activeOrder:
if newAmount:
replaceOrder(activeOrder, _eventDate, priorityExecutionPrice, newAmount, _foundTargets, _ChangedMarket)
# newAmount can be 0
else: cancelOrder(activeOrder, _ChangedMarket)
else:
if newAmount:
newOrder = orders.Order(_ChangedMarket.name, priorityExecutionPrice, newAmount, _changedSide, _eventDate)
placeOrder(newOrder, _foundTargets, _ChangedMarket)
except Exception: raise
def _manageFoundTargets(_eventDate, _changedSide, _foundTargets, _ChangedMarket, _UniverseD):
"""
manage exposure: expose or change activeOrder
"""
try:
logger.debug("_manageFoundTargets: hi")
targetSide = names.asks if _changedSide == names.bids else names.bids
for targetMarketname in _foundTargets:
TargetMarket = _UniverseD[targetMarketname]
if TargetMarket != _ChangedMarket: # since for our market we already manage it
_manageCurrentMarket(_eventDate, targetSide, _foundTargets, TargetMarket)
except Exception: raise
def _manageLostTargets(_eventDate, _changedSide, _foundTargets, _ChangedMarket, _UniverseD):
try:
logger.debug("_manageLostTargets: hi")
#HANDLE MARKETS WITH NO SIGNAL
# check markets with no signal: if an order sees still a target after sgn loss from our market, leave it
# else get amount from targetManager
lostSignal = DictDiff(_foundTargets, _UniverseD).removed()
for marketName in lostSignal:
LostMarket = _UniverseD[marketName]
if LostMarket != _ChangedMarket: # since changedMarket is already handeled
if _changedSide == names.bids:
activeOrder = LostMarket.activeOrders.asks
targetSide = names.asks
elif _changedSide == names.asks:
activeOrder = LostMarket.activeOrders.bids
targetSide = names.bids
else: raise Exception("unknown side: %s" % _changedSide)
if activeOrder:
# remove our market from targets since no more signal
try: del activeOrder.targets[_ChangedMarket.name]
except KeyError: pass
if activeOrder.targets: pass # has other targets, leave it alone
else: # has no more targets -> no bets, just empty TM: get amount from targetManager
targetManagerQuote = LostMarket.targetManager.targetsAmountSum(targetSide, LostMarket)
amountSum = targetManagerQuote[1]
newAmount = min([amountSum, LostMarket.minAllowableBet, LostMarket.account.getResources(targetSide, activeOrder.price, LostMarket)])
# check if the order has to be replaced
if newAmount: replaceOrder(activeOrder, _eventDate, activeOrder.price, newAmount, {}, LostMarket)
else: cancelOrder(activeOrder, LostMarket)
except Exception: raise
def manageUniverse(_eventDate, _ChangedMarket, _UniverseD):
try:
logger.debug("\nmanageUniverse: hi")
"""
#check if TM can try to empty itself with a market order
marketQuote = _MarketOfChange.targetManager.emitMarketQuote(side, _MarketOfChange)
if marketQuote:
pass
#marketOrderProcedure -
else:
#TODO handle marketOrder Strategies
# recognize opportunity for a market order
#for strategy in _MarketOfChange.activeStrategies.emitsMarketOrder:
# marketQuote = strategy.returnMarketQuote(side, _MarketOfChange, _MarketsOtherD)
# get marketQuote
if marketQuote:
pass
#marketOrderProcedure -
# recognize opportunity for limit order in this Market
else:
"""
l1askChanged = _ChangedMarket.filters.minAskChanged; l1bidChanged = _ChangedMarket.filters.maxBidChanged
AOA = _ChangedMarket.activeOrders.asks; AOB = _ChangedMarket.activeOrders.bids
l2askChanged = AOA and _ChangedMarket.filters.secondaryAskChanged
l2bidChanged = AOB and _ChangedMarket.filters.secondaryBidChanged
l1b = _ChangedMarket.orderbook.bids.largest_item()
l1a = _ChangedMarket.orderbook.asks.smallest_item()
L1 = "L1changed"; L2 = "L2changed"; ActiveOrder = "ActiveOrder"; topOrder = "topOrder"
d = {names.bids: {L1: l1bidChanged, L2: l2bidChanged, ActiveOrder: AOB, topOrder: l1b},
names.asks: {L1: l1askChanged, L2: l2askChanged, ActiveOrder: AOA, topOrder: l1a}
}
# check for market orders
foundMO = 0
"""
for changedSide in d:
marketOrderD = getAmountForMarketOrder(changedSide, _ChangedMarket)
amount = marketOrderD["newAmount"]
if amount > _ChangedMarket.minAllowableBet:
foundMO = 1
changedSideAO = marketOrderD["changedSideAO"]
if changedSideAO: cancelOrder(changedSideAO, _ChangedMarket)
newOrder = orders.Order(_ChangedMarket.name, marketOrderD["priorityExecutionPrice"], marketOrderD["newAmount"], marketOrderD["side"], _eventDate)
AO = marketOrderD["oppositeAO"]
if AO: cancelOrder(AO, _ChangedMarket)
placeOrder(newOrder, {}, _ChangedMarket)
# limit orders and new bets
"""
if foundMO: pass
else:
for changedSide in d:
container = d[changedSide]
# if not tight above any more, request reconfiguration
if container[L2]:
AO = container[ActiveOrder]
L1OrderAmount = container[topOrder][1]
if L1OrderAmount <= AO.amount: # since if greater sbdy else has placed an order at that price
replaceOnL2Change(changedSide, _ChangedMarket, _UniverseD)
else:
foundTargets = _findTargets(changedSide, _ChangedMarket, _UniverseD)
_manageCurrentMarket(_eventDate, changedSide, foundTargets, _ChangedMarket)
_manageFoundTargets(_eventDate, changedSide, foundTargets, _ChangedMarket, _UniverseD)
_manageLostTargets(_eventDate, changedSide, foundTargets, _ChangedMarket, _UniverseD)
except Exception: raise
```
#### File: tia/trad/monitor_mainTr.py
```python
import sys
import time
from tia.trad.tools.io.follow import followMonitor
import tia.configuration as conf
from tia.trad.tools.errf import eReport
import ujson as json
import matplotlib.pyplot as plt
import math
import collections
import logging
from tia.trad.tools.ipc.processLogger import PROCESS_NAME
LOGGER_NAME = PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME)
reportFile = None
def pointDistance(initF, initI, point):
try:
t = initI[0]-initF[0], initI[1]-initF[1] # Vector ab
dd = math.sqrt(t[0]**2+t[1]**2) # Length of ab
t = t[0]/dd, t[1]/dd # unit vector of ab
n = -t[1], t[0] # normal unit vector to ab
ac = point[0]-initF[0], point[1]-initF[1] # vector ac
return math.fabs(ac[0]*n[0]+ac[1]*n[1]) # Projection of ac to n (the minimum distance)
except Exception: raise
def getAvg(_list):
try:
return float(max(_list) + min(_list)) / float(2)
except Exception: raise
def shutdown():
try:
logger.debug("shutting down")
global reportFile
reportFile.close()
except Exception: raise
def run(**kwargs):
try:
global logger
global reportFile
logger = kwargs["processLogger"]
logger.debug("monitor_mainTr:hi")
_initFunds = kwargs["initFunds"]
_initItems = kwargs["initItems"]
plt.ion() # turn interactive on
fig = plt.figure()
fig.show()
# raw
ax = fig.add_subplot(221)
#hline = ax.axhline(y=_initFunds)
#vline = ax.axvline(x=_initItems)
#ax.set_xscale("log")
#ax.set_yscale("log")
data, = ax.plot([], [], 'b+')
data11, = ax.plot([], [], 'ro')
# value
ax2 = fig.add_subplot(222)
data2, = ax2.plot([], [], 'ro-')
# inside TM
ax3 = fig.add_subplot(223)
data3, = ax3.plot([], [], 'ro')
data4, = ax3.plot([],[], 'bo')
minBids, = ax3.plot([], [], "r>")
maxAsks, = ax3.plot([], [], "b>")
# top b/a
ax5 = fig.add_subplot(224)
dataI, = ax5.plot([], [], "o-")
dataF, = ax5.plot([], [], "ro-")
windowLength = 50
fundsHistory = collections.deque(maxlen=windowLength); itemsHistory = collections.deque(maxlen=windowLength)
valueHistory = collections.deque(maxlen=windowLength)
tmFundsHistory = collections.deque(maxlen=windowLength); tmItemsHistory = collections.deque(maxlen=windowLength)
tmIAHSum = collections.deque(maxlen=windowLength); tmFAHSum = collections.deque(maxlen=windowLength)
topAsksHistory = collections.deque(maxlen=10)
topBidsHistory = collections.deque(maxlen=10)
# touch report.json
#reportFile = open(conf.FN_REPORT, "w"); reportFile.close()
reportFile = open(conf.FN_REPORT, "r")
newline = followMonitor(reportFile, fig)
while 1:
try:
#for line in reportFile:
line = newline.next()
jsonObj = json.loads(line)
universeSize = float(jsonObj["universeSize"])
topAsks = jsonObj["topAsks"]; topBids = jsonObj["topBids"]
initInvF = float(_initFunds) * universeSize
initInvI = float(_initItems) * universeSize
cumulFunds = float(jsonObj["cumulFunds"])
cumulItems = float(jsonObj["cumulItems"])
#fundsHistory.append(funds); itemsHistory.append(items)
dist = pointDistance([0, initInvF], [initInvI, 0], [cumulFunds, cumulItems])
fundsHistory.append(dist)
#data.set_ydata(fundsHistory); data.set_xdata(itemsHistory)
data.set_ydata(fundsHistory); data.set_xdata(xrange(len(fundsHistory)))
#data11.set_ydata(funds); data11.set_xdata(items)
#data11.set_ydata(dist); data11.set_xdata(xrange(len(fundsHistory)))
ax.relim()
ax.autoscale_view(True,True,True)
tmFunds = jsonObj["tmFunds"]; tmItems = jsonObj["tmItems"]
tmFA = 0; tmIA = 0
tmFPH = collections.deque(); tmFAH = collections.deque()
tmIPH = collections.deque(); tmIAH = collections.deque()
for price in tmFunds:
amount = tmFunds[price]
tmFPH.append(price)
tmFAH.append(amount)
tmFA += amount
tmFAHSum.append(tmFA)
for price in tmItems:
amount = tmItems[price]
tmIPH.append(price)
tmIAH.append(amount)
tmIA += amount
tmIAHSum.append(tmIA)
dataI.set_ydata(tmIAHSum); dataI.set_xdata(xrange(len(tmIAHSum)))
dataF.set_ydata(tmFAHSum); dataF.set_xdata(xrange(len(tmFAHSum)))
ax5.relim()
ax5.autoscale_view(True,True,True)
value = float(jsonObj["value"]) / initInvF if initInvF else float(jsonObj["value"])
valueHistory.append(value)
data2.set_xdata(range(len(valueHistory)))
data2.set_ydata(valueHistory)
ax2.relim()
ax2.autoscale_view(True,True,True)
"""
TM stuff
"""
# make universe states pretty
tmpList = list(tmFAH) + list(tmIAH)
xDrawStart = min(tmpList)
drawedInterval = max(tmpList) - xDrawStart
spacing = float(drawedInterval) / float (len(topBids))
offset = float(spacing) / float(2)
xcords = collections.deque()
for index, bid in enumerate(topBids):
xcords.append(offset + xDrawStart + index * spacing)
minBids.set_ydata(topBids); minBids.set_xdata(xcords)
maxAsks.set_ydata(topAsks); maxAsks.set_xdata(xcords)
data3.set_xdata(tmFAH)
data3.set_ydata(tmFPH)
data4.set_xdata(tmIAH)
data4.set_ydata(tmIPH)
ax3.relim()
ax3.autoscale_view(True,True,True)
fig.canvas.draw()
#plt.savefig(conf.FN_PLOT_IMAGE)
except ValueError: continue
except Exception as ex:
eReport(__file__)
reportFile.close()
sys.exit()
```
#### File: trad/streams/stream_mtgoxAPI.py
```python
import time
import tia.trad.market.m as markets; reload(markets)
import tia.trad.tools.classOps as classes; reload(classes)
from tia.trad.tools.errf import eReport
import tia.trad.tools.ipc.naming_conventions as names
import tia.trad.tools.net.httpApi as httpApi; reload(httpApi)
import tia.trad.tools.ipc.zmqHelper as zmqHelper
import sys
import zmq
import logging
import tia.trad.tools.ipc.processLogger as pl
LOGGER_NAME = pl.PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME)
API_LIMIT = 10 * 10 ** 6 # in usec since comparing to .lastUpdate
ZMQPRODUCER = None
ZMQCONTEXT = zmq.Context()
def pushToStream(_mode, _container, _Market, _QMainStream):
try:
logger.debug("pushToStream: hi: %s" % _mode)
marketName = _Market.name
date = _Market.lastUpdate
if _mode == names.onOrderbook:
eventMsg = {"name": names.onOrderbook, "market": marketName, "date": date, "orderbook": _container}
elif _mode == names.onBatchTrades:
eventMsg = {"name": names.onBatchTrades, "market": marketName, "date": date, names.container: _container}
elif _mode == names.onTicker:
eventMsg = _container
elif _mode == names.onLag:
eventMsg = {"name": names.onLag, "market": marketName, "date": date, "lag": _container}
else: raise Exception("unknown mode")
_QMainStream.send_json(eventMsg)
except Exception: raise
def getNewTrades(_Market):
try:
logger.debug("getNewTrades: hi")
newTrades_ = []
tradesD = httpApi.getTrades(_Market)
if tradesD:
lTid = _Market.lastTid
if lTid:
# just search for new ones
for tid in tradesD:
if tid > lTid:
price = tradesD[tid]["price"]
amount = tradesD[tid]["amount"]
date = tradesD[tid]["date"]
newTrades_.append({"tid": tid, "price": price, "amount": amount, "date": date})
lTid = tid
_Market.lastTid = lTid
else:
# just append the last one
latestTid = tradesD.largest_key()
price = tradesD[latestTid]["price"]
amount = tradesD[latestTid]["amount"]
date = tradesD[latestTid]["date"]
newTrades_.append({"tid": latestTid, "price": price, "amount": amount, "date": date})
_Market.lastTid = latestTid
return newTrades_
except Exception: raise
def shutdown():
try:
ZMQPRODUCER.close()
ZMQCONTEXT.term()
except Exception: raise
def config():
try:
# get subclasses
marketObjD = classes.getSubclasses(markets)
# init subclasses
for marketName in marketObjD:
marketObjD[marketName] = marketObjD[marketName]()
marketObjD[marketName].changed = 1 # so that we get the orderbook first
marketObjD[marketName].gotFirstOrdb = 0
marketObjD[marketName].prevOrderbook = None
return marketObjD
except Exception: raise
def run(**kwargs):
try:
global logger
global ZMQPRODUCER
logger = kwargs["processLogger"]
httpApi.logger = logger
logger.debug("run: hi")
ZMQPRODUCER = zmqHelper.getProducer(ZMQCONTEXT)
marketObjD = config()
Market = marketObjD["mtgoxUSD"]
now = 10**16
while 1:
# periodically send the whole orderbook
orderbook = httpApi.getOrderbook(Market)
if orderbook:
pushToStream(names.onOrderbook, orderbook, Market, ZMQPRODUCER)
time.sleep(3)
newTrades = getNewTrades(Market)
if newTrades:
pushToStream(names.onBatchTrades, newTrades, Market, ZMQPRODUCER)
time.sleep(3)
newTicker = httpApi.getTicker(Market)
if newTicker:
pushToStream(names.onTicker, newTicker, Market, ZMQPRODUCER)
time.sleep(3)
# get order lag
orderLag = httpApi.getLag(Market)
if orderLag:
pushToStream(names.onLag, orderLag, Market, ZMQPRODUCER)
time.sleep(3)
except Exception as ex:
print "%s:ex: %s" % (__file__, ex)
eReport(__file__)
sys.exit()
```
#### File: trad/tools/classOps.py
```python
import inspect
def getSubclasses(module):
"""
returns NON-initiated subclasses
"""
try:
# get all classes
classes = []
for name, member in inspect.getmembers(module):
if inspect.isclass(member):
classes.append(member)
# filter subclasses
subclassesObjD_ = {}
for classItem in classes:
# since exchenges are a subclass of market, we destinguish them with first lower/upper cases
name = classItem.__name__
if name[0].islower(): subclassesObjD_[name] = classItem
return subclassesObjD_
except Exception: raise
```
#### File: trad/tools/dicDiff.py
```python
import tia.trad.tools.ipc.naming_conventions as names
class DictDiff(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
def orderbookComparison(_currOrdb, _prevOrdb):
try:
diff_ = {names.asks: {names.added: None, names.removed: None, names.changed: None},
names.bids: {names.added: None, names.removed: None, names.changed: None}}
# get deltas
tmpAC = {}; tmpAP = {}; tmpBC = {}; tmpBP = {}
for tpl in _currOrdb[names.asks]: tmpAC[tpl[0]] = tpl[1]
for tpl in _prevOrdb[names.asks]: tmpAP[tpl[0]] = tpl[1]
for tpl in _currOrdb[names.bids]: tmpBC[tpl[0]] = tpl[1]
for tpl in _prevOrdb[names.bids]: tmpBP[tpl[0]] = tpl[1]
diffAsks = DictDiff(tmpAC, tmpAP)
diffBids = DictDiff(tmpBC, tmpBP)
decisionD = {names.asks: diffAsks, names.bids: diffBids}
for side in decisionD:
diff_[side][names.added] = decisionD[side].added()
diff_[side][names.removed] = decisionD[side].removed()
diff_[side][names.changed] = decisionD[side].changed()
diff_[names.asks]["curr"] = tmpAC; diff_[names.asks]["prev"] = tmpAP
diff_[names.bids]["curr"] = tmpBC; diff_[names.bids]["prev"] = tmpBP
return diff_
except Exception: raise
```
#### File: trad/tools/errf.py
```python
import traceback
import tia.configuration as conf
import tia.trad.tools.io.file_handlers as fh
def eReport(_processName):
try:
filename = conf.FN_CRASH_REPORT
with fh.FileLock(filename) as lock:
with open(filename, "a") as f:
trace = traceback.format_exc()
f.write(_processName + ":\n" + trace + "\n")
print "%s: crashed :(" % _processName
except Exception: raise
```
#### File: tools/io/db.py
```python
import logging
import sys
import tables
import collections
import os
from tia.trad.tools.io.file_handlers import decoratorLockFile, createDir
import tia.trad.market.m as Mm
import tia.configuration as Mfn
from tia.trad.tools.ipc.naming_conventions import IPC
LOGGER_NAME = "rl." + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME) # don't change!
class Depth(tables.IsDescription):
# market already by structuring the db
# type already by structuring the db
date = tables.Int64Col()
price = tables.Int64Col()
amount = tables.Int64Col()
class Trades(tables.IsDescription):
# market already by structuring the db
date = tables.Int64Col()
price = tables.Int64Col()
amount = tables.Int64Col()
tid = tables.Int64Col()
type = tables.Int16Col()
properties = tables.Int16Col()
class Events(tables.IsDescription):
marketName = tables.StringCol(15)
date = tables.Int64Col()
price = tables.Int64Col()
amount = tables.Int64Col()
type = tables.Int16Col()
tid = tables.Int64Col()
properties = tables.Int16Col()
def fillOrderbook(_orderbookD_, _bidaskS, _priceI, _amountI, _marketS, _date=None, _bidask=None):
"""
updates or inserts price:amount in appropriate container,
if amount = 0, deletes price for that amount
:param _orderbookD_: {IPC.asks: {}, IPC.bids: {}}
:param _bidaskS: IPC.asks/IPC.bids
:param _price: int
:param _amount: int
:return: _orderbookD_
"""
try:
assert _orderbookD_.has_key(IPC.asks), _orderbookD_
assert _bidaskS in [IPC.asks, IPC.bids], _bidaskS
validate = type(_priceI); assert validate == int, validate
validate = type(_amountI); assert validate == int, validate
try:
_orderbookD_[_bidaskS][_priceI] += _amountI
except KeyError:_orderbookD_[_bidaskS][_priceI] = _amountI
except Exception: raise
if _orderbookD_[_bidaskS][_priceI] == 0: del _orderbookD_[_bidaskS][_priceI]
elif _orderbookD_[_bidaskS][_priceI] < 0: logger.critical("Negative amount in orderbook: %s, %s, %s" % (_orderbookD_, _priceI, _marketS))
except Exception: raise
@decoratorLockFile(Mfn.DB_FILENAME)
def getMarketsToCheck(_h5file=None):
validate = _h5file; assert validate != None, validate
marketsToCheck_ = {}
for marketS in _h5file.root._v_groups:
marketsToCheck_[marketS] = None
return marketsToCheck_
def decoratorCheckDeltasIntegrity(func):
"""
#@attention: it's assumed that cpA/B will not be empty on ANY event in Q for marketX
intended to decorate DB.writeData
"""
def funcWrapper(self):
try:
logger.debug("checkDeltasIntegrity: hi")
# get market names to work on
marketsToCheck = self.marketsSD
# get last rows and assign lastDates
lastRowsD = self.getRow(marketsToCheck, "last", "cpA")
for marketS in marketsToCheck:
if lastRowsD[marketS] != None:
marketsToCheck[marketS] = lastRowsD[marketS]["date"]
# write new data
#@attention: it's assumed that cpA/B will not be empty on ANY event in Q for marketX
func(self)
# get dates from which to compare deltas with checkpoints
firstRowsD = {}
# if any of markets with empty checkpoint tables on prev call, call again for first rows
if any(["gotNoneVal" for lastDate in marketsToCheck.values() if lastDate == None]):
firstRowsD = self.getRow(marketsToCheck, "first", "cpA") # check all that has been written from the queue
for marketS in marketsToCheck: # for those with None val, insert lastDate from firstRowsD, others should have lastDate from lastRowsD
if marketsToCheck[marketS] == None:
if firstRowsD[marketS] != None:
marketsToCheck[marketS] = firstRowsD[marketS]["date"]
# compare orderbooks
for marketS in marketsToCheck:
fromDate = marketsToCheck[marketS]
while 1:
try:
if fromDate:
d1 = self.fillOrderbookFromDeltas(fromDate, marketS); o1 = d1["orderbook"]; nextCheckpointDate = d1["nextDate"]
if nextCheckpointDate:
self.logger.debug("checkDeltasIntegrity: checking diff")
d2 = self.loadOrderbookFromCheckpoint(nextCheckpointDate, marketS); o2 = d2["orderbook"]
diffD = dicComparison(o1, o2)
# CHECK: should be no difference between loaded from cp & loaded from deltas
report = [1 for bidsasks in [IPC.asks, IPC.bids] if any([len(x) for x in diffD[bidsasks]])]
if report: print("diffD:%s: %s" % (marketS, diffD))
fromDate = nextCheckpointDate
else: break
else: break
except Exception: raise
self.logger.debug("checkDeltasIntegrity: bye")
except Exception: raise
return funcWrapper
class DB:
logger = logging.getLogger(LOGGER_NAME + ".DB")
def __init__(self):
try:
self.fileName = Mfn.DB_FILENAME
self.filters = tables.Filters(complevel=9, complib='blosc', shuffle=1)
self.buffer = collections.deque()
# get all markets we're working on
self.marketsSD = Mm.getSubclasses(Mm)
# change default values in dict to None
for marketName in self.marketsSD: self.marketsSD[marketName] = None
# create DB
if not os.path.exists(Mfn.DB_FILENAME):
createDir("/db")
for marketName in self.marketsSD: self.createTables(marketName)
except Exception: raise
@decoratorLockFile(Mfn.DB_FILENAME)
def updateEventsTable(self):
# or just create new one
pass
@decoratorLockFile(Mfn.DB_FILENAME)
def createEventsTable(self, _h5file=None):
try:
self.logger.debug("createEventsTable: hi")
""" create new events.h5 """
h5tmp = tables.openFile("/run/shm/events.h5", mode="a", filters=self.filters)
h5events = tables.openFile("db/events.h5", mode="a", filters=self.filters)
# create checkpoints tables
for marketS in self.marketsSD:
if marketS not in Mfn.DEAD_MARKETS:
h5events.createGroup("/", marketS, marketS, filters=self.filters)
group = getattr(h5events.root, marketS)
h5events.createTable(group, "cpA", Depth, "checkpoints", filters=self.filters)
h5events.createTable(group, "cpB", Depth, "checkpoints", filters=self.filters)
# create events table
h5tmp.createTable(h5tmp.root, "events", Events, "CSIsorted_events", filters=self.filters)
eventsT = h5tmp.root.events; eventsR = eventsT.row
# copy data
for marketS in self.marketsSD:
if marketS not in Mfn.DEAD_MARKETS:
sys.stderr.write("copying deltas for %s\n" % marketS)
group = getattr(_h5file.root, marketS)
asksT = getattr(group, "asks")
bidsT = getattr(group, "bids")
tradesT = getattr(group, "trades")
decisionD = {asksT: IPC.normAsk, bidsT: IPC.normBid, tradesT: None}
# copy all deltas to /events
for keyTable in decisionD:
for index, row in enumerate(keyTable):
try:
eventsR["marketName"] = marketS
eventsR["date"] = row["date"]
eventsR["price"] = row["price"]
eventsR["amount"] = row["amount"]
# also copy trades
if keyTable == tradesT:
type = row["type"]
tid = row["tid"]
props = row["properties"]
else:
type = decisionD[keyTable]
tid = 0
props = 0
eventsR["type"] = type
eventsR["tid"] = tid
eventsR["properties"] = props
eventsR.append()
if index % 10000 == 0: eventsT.flush()
except Exception: raise
eventsT.flush()
# create index
sys.stderr.write("\ncreating indices")
col = tables.Column(eventsT, "date", eventsT.description)
col.createCSIndex(filters=self.filters, tmp_dir="/run/shm")
h5events.close(); h5tmp.close()
# copy data from h5tmp and sort id by "date" column
sys.stderr.write("\nwriting sorted events")
os.system('ptrepack --sortby="date" --chunkshape="auto" /run/shm/events.h5:/events db/events.h5:/events')
os.system('rm /run/shm/events.h5')
# index sorted events table
h5events = tables.openFile("db/events.h5", mode="a", filters=self.filters)
eventsT = h5events.root.events
col = tables.Column(eventsT, "date", eventsT.description)
col.createCSIndex(filters=self.filters, tmp_dir="/run/shm")
h5events.close()
self.logger.debug("createEventsTable: bye")
except Exception: raise
def validateData(self):
try:
firstRowsD = self.getRow(self.marketsSD, "first", "cpA")
lastRowsD = self.getRow(self.marketsSD, "last", "cpA")
for marketS in firstRowsD:
if firstRowsD[marketS]:
fromDate = firstRowsD[marketS]["date"]
lastDate = lastRowsD[marketS]["date"]
# check each etape
while 1:
try:
d1 = self.fillOrderbookFromDeltas(fromDate, marketS); o1 = d1["orderbook"]; nextCheckpointDate = d1["nextDate"]
if nextCheckpointDate:
d2 = self.loadOrderbookFromCheckpoint(nextCheckpointDate, marketS); o2 = d2["orderbook"]
diffD = dicComparison(o1, o2)
# CHECK: should be no difference between loaded from cp & loaded from deltas
for bidsasks in ["asks", "bids"]:
if any([len(x) for x in diffD[bidsasks]]):
print("diff: %s:from: %s:diff = %s" % (marketS, fromDate, diffD))
break
fromDate = nextCheckpointDate
else: break
except Exception: raise
# check from first firstCp->fillFrDeltas->lastCp
firstWithAllDeltasOrd = self.fillOrderbookFromDeltas(fromDate, marketS, _lastCpDate=lastDate)["orderbook"]
lastOrd = self.loadOrderbookFromCheckpoint(lastDate, marketS)["orderbook"]
diffD = dicComparison(firstWithAllDeltasOrd, lastOrd)
for bidsasks in ["asks", "bids"]:
if any([len(x) for x in diffD[bidsasks]]):
print("wholeDeltas: %s:from: %s:diff = %s" % (marketS, fromDate, diffD))
break
sys.stderr.write("%s validated: ok\n" % marketS)
else: sys.stderr.write("\n\t%s missing" % marketS)
print
except Exception: raise
@decoratorLockFile(Mfn.DB_FILENAME)
def getRow(self, _marketsD, _firstLastS, _nodeS, _h5file=None):
"""
returns first or last rows of all groups for node _nodeS
:param: _marketsL: [str(marketName)]
:param: _firstLastS: str; must be one of valid_firstLastS
:param _nodeS: str; must be one of valid_nodeS
:param _h5file: h5object; db file
:return: {marketS: lastRow}
"""
try:
self.logger.debug("getRow: hi")
# check if input valid
valid_firstLastS = ["first", "last"]
if _firstLastS not in valid_firstLastS: raise Exception("getRow: wrong input! Expected one of %s, but got: %s", [valid_firstLastS, _firstLastS])
valid_nodeS = ["cpA", "cpB", IPC.asks, IPC.bids, "trades"]
if _nodeS not in valid_nodeS: raise Exception("getRow: wrong input! Expected one of %s, but got: %s", [valid_nodeS, _nodeS])
validate = _h5file; assert validate != None, validate
rowsD_ = {}
groups = _h5file.root._v_groups # returns {'str(groupName)': getattr(_h5file.root, groupName)}
for marketS in _marketsD:
group = groups[marketS]
table = getattr(group, _nodeS)
if table.nrows:
if _firstLastS == "first": rowsD_[marketS] = [row for row in table.iterrows(0, 1)][0]
elif _firstLastS == "last": rowsD_[marketS] = [row for row in table.iterrows(table.nrows - 1, table.nrows)][0]
# check that we get what we want
returnedType = type(rowsD_[marketS])
if returnedType != tables.tableExtension.Row: raise AssertionError(
"getRow: expected type='tables.tableExtension.Row', but got type: %s", returnedType)
else: rowsD_[marketS] = None
self.logger.debug("getRow: bye")
return rowsD_
except Exception: raise
@decoratorLockFile(Mfn.DB_FILENAME)
def loadOrderbookFromCheckpoint(self, _fromDate, _marketS, _h5file=None):
"""
return orderbook filled from h5file.root.market.cpA&cpB and date of next checkpoint
:param _fromDate: int; from which date on load checkpoints
:param _marketS: str; group name
:param _h5file: h5object; db file
:return: {"orderbook":{IPC.asks: {data}, IPC.bids: {data}|None, "nextDate":data|None}
"""
try:
self.logger.debug("loadOrderbookFromCheckpoint: hi")
validate = _h5file; assert validate != None, validate
orderbook_ = {IPC.asks: {}, IPC.bids: {}}; nextDate_ = None
group = getattr(_h5file.root, _marketS)
decisionD = {IPC.asks: getattr(group, "cpA"), IPC.bids: getattr(group, "cpB")}
# fill orderbook_
highestRowNumber = 0; where = None
for bidsasks in decisionD:
table = decisionD[bidsasks]
for row in table.where('date == _fromDate'):
if row["amount"] > 0:
orderbook_[bidsasks][row["price"]] = row["amount"]
if row.nrow > highestRowNumber:
highestRowNumber = row.nrow; where = table
# scenario:nodata|anomalousdata
if all([not orderbook_[IPC.asks], not orderbook_[IPC.bids]]): # if both don't have data
orderbook_ = None
elif not(orderbook_[IPC.asks] and orderbook_[IPC.bids]): # if one has data but other doesn't
raise Exception("loadOrderbookFromCheckpoint: asks or bids have no data. Got: ", orderbook_)
# get next date
nxDate = [row["date"] for row in where.iterrows(highestRowNumber + 1, highestRowNumber + 2)] # returns empty list if at the end
# scenario:nodata
nextDate_ = nxDate[0] if nxDate else None
self.logger.debug("loadOrderbookFromCheckpoint: bye")
return {"orderbook": orderbook_, "nextDate": nextDate_}
except Exception: raise
@decoratorLockFile(Mfn.DB_FILENAME)
def suppFillOrderbookFromDeltas(self, _startDate, _marketS, d, _lastCpDate, _h5file=None):
try:
validate = _h5file; assert validate != None, validate
orderbook_ = d["orderbook"]; nextCheckpointDate_ = d["nextDate"]
if (orderbook_ != None): # and (nextCheckpointDate_ != None):
group = getattr(_h5file.root, _marketS)
# choose deltas table
decisionD = {IPC.asks: getattr(group, IPC.asks), IPC.bids: getattr(group, IPC.bids)}
# choose condition for selecting rows in table
tablesConditionS = None
if _lastCpDate: tablesConditionS = '(_startDate < date) & (date <= _lastCpDate)'
# the else is needed for continuing from old DB so that we can start from last cp and include deltas
# that come after that cp
else: tablesConditionS = '(_startDate < date) & (date <= nextCheckpointDate_)' if nextCheckpointDate_ else '(_startDate < date)'
if tablesConditionS == None: raise Exception("TablesConditions == None")
for bidsasks in decisionD:
table = decisionD[bidsasks]
for row in table.where(tablesConditionS):
fillOrderbook(orderbook_, bidsasks, row["price"], row["amount"], _marketS, row["date"], bidsasks)
return {"orderbook": orderbook_, "nextDate": nextCheckpointDate_}
except Exception: raise
def fillOrderbookFromDeltas(self, _startDate, _marketS, _lastCpDate=None):
"""
returns orderbook from checkpoints updated with deltas till next checkpoint
:param _startDate: int; loading orderbook from that checkpoint
:param _marketS: str; group
:param _h5file: h5object; db file
:return: {"orderbook": {IPC.asks: {data}, IPC.bids: {data}}|None, "nextDate": _nextCheckpointDate|None}
"""
try:
self.logger.debug("fillOrderbookFromDeltas: hi")
d = self.loadOrderbookFromCheckpoint(_startDate, _marketS)
resultD = self.suppFillOrderbookFromDeltas(_startDate, _marketS, d, _lastCpDate)
self.logger.debug("fillOrderbookFromDeltas: bye")
return {"orderbook": resultD["orderbook"], "nextDate": resultD["nextDate"]}
except Exception: raise
@decoratorLockFile(Mfn.DB_FILENAME)
def createTables(self, _groupName, _h5file=None):
try:
self.logger.debug("createTables: hi. creating %s", _groupName)
try:
validate = _h5file; assert validate != None, validate
getattr(_h5file.root, _groupName) # throws AttributeError if attribute doesn't exist
except AttributeError:
_h5file.createGroup("/", _groupName, _groupName)
try:
newGroup = getattr(_h5file.root, _groupName)
getattr(newGroup, IPC.asks) # throws AttributeError if attribute doesn't exist
except AttributeError:
_h5file.createTable(newGroup, IPC.asks, Depth, "asks_deltas", filters=self.filters, expectedrows=1000000000)
try:
getattr(newGroup, "cpA") # throws AttributeError if attribute doesn't exist
except AttributeError:
_h5file.createTable(newGroup, "cpA", Depth, "checkpoints", filters=self.filters, expectedrows=1000000000)
try:
getattr(newGroup, IPC.bids) # throws AttributeError if attribute doesn't exist
except AttributeError:
_h5file.createTable(newGroup, IPC.bids, Depth, "bids_deltas", filters=self.filters, expectedrows=1000000000)
try:
getattr(newGroup, "cpB") # throws AttributeError if attribute doesn't exist
except AttributeError:
_h5file.createTable(newGroup, "cpB", Depth, "checkpoints", filters=self.filters, expectedrows=1000000000)
try:
getattr(newGroup, "trades")
except AttributeError:
_h5file.createTable(newGroup, "trades", Trades, "trades", filters=self.filters, expectedrows=10000000)
self.logger.debug("createTables: bye")
except Exception: raise
def writeTrades(self, _item, _h5file):
try:
validate = _item.__class__.__name__; assert validate == IPC.IPCTrade, validate
# select table
group = getattr(_h5file.root, _item.market); table = getattr(group, "trades")
# write data
table.row["date"] = _item.date
table.row["price"] = _item.price
table.row["amount"] = _item.amount
table.row["tid"] = _item.tid
table.row["type"] = _item.type
table.row["properties"] = _item.properties
table.row.append()
table.flush()
except Exception: raise
def writeCheckpoints(self, _item, _h5file):
try:
validate = _item.__class__.__name__; assert validate == IPC.IPCOrderbook, validate
# select table
group = getattr(_h5file.root, _item.market)
data = [[_item.orderbook[IPC.asks], getattr(group, "cpA")], [_item.orderbook[IPC.bids], getattr(group, "cpB")]]
# write data
for tpl in data:
bidsasksD = tpl[0]; table = tpl[1]
for price in bidsasksD:
try:
table.row["date"] = _item.date
table.row["price"] = price
table.row["amount"] = bidsasksD[price]
table.row.append()
except Exception: raise
table.flush()
except Exception: raise
def writeDeltas(self, _item, _h5file):
try:
validate = _item.__class__.__name__; assert validate == IPC.IPCDelta, validate
# select table
group = getattr(_h5file.root, _item.market)
bidsAsksS = _item.typeNorm2Str(_item.type)
table = getattr(group, bidsAsksS)
# append data
table.row["date"] = _item.date
table.row["price"] = _item.price
table.row["amount"] = _item.delta
table.row.append()
# flush to disk
table.flush()
except Exception: raise
@decoratorCheckDeltasIntegrity
@decoratorLockFile(Mfn.DB_FILENAME)
def flushBuffer(self, _h5file=None):
"""
writes data to DB file from buffer self.buffer
inout: {"mode": ("checkpoint"|"delta"|"trade"),"date": int, "market": str,
(IPC.bids: {}, IPC.asks: {}) or
("delA": {}, "delB": {}) or
("trades": [])}
"""
try:
self.logger.debug("flushBuffer: hi")
validate = _h5file; assert validate != None, validate
# flush buffer
while 1:
try:
item = self.buffer.popleft() # FIFO queue
mode = item.__class__.__name__
if mode == IPC.IPCDelta: self.writeDeltas(item, _h5file)
elif mode == IPC.IPCOrderbook: self.writeCheckpoints(item, _h5file)
elif mode == IPC.IPCTrade: self.writeTrades(item, _h5file)
else: raise Exception("writeData: unknown mode: %s", mode)
except IndexError:
break
except Exception: raise
self.logger.debug("flushBuffer: bye")
except Exception: raise
```
#### File: tools/io/file_handlers.py
```python
import os
import time
import errno
import logging
logger = logging.getLogger("rl." + __file__.split("/")[-1]) # don't change!
def createDir(_dir):
try:
if not os.path.exists(_dir):
os.makedirs(_dir)
except os.error as ex: logger.exception("createDir:ex: %s" % ex)
except Exception: raise
class FileLock(object):
"""
@attention: leaves the lock if program crashes
A file locking mechanism that has context-manager support so
you can use it in a with statement. This should be relatively cross
compatible as it doesn't rely on msvcrt or fcntl for the locking.
@author: kindly under BSD licence from: http://www.evanfosmark.com/2009/01/cross-platform-file-locking-support-in-python/
"""
logger = logging.getLogger('rl.file_handlers.FileLock')
def __init__(self, file_name, timeout=10, delay=1):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
self.is_locked = False
self.lockfile = os.path.join(os.getcwd(), "%s.lock" % file_name)
self.file_name = file_name
self.timeout = timeout
self.delay = delay
def acquire(self):
""" Acquire the lock, if possible. If the lock is in use, it check again
every `wait` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.
"""
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR)
break;
except OSError as e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= self.timeout:
self.logger.critical('Timeout occured: %s' % self.file_name)
time.sleep(self.delay)
self.is_locked = True
def release(self):
""" Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
if self.is_locked:
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
""" Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
if self.is_locked:
self.release()
def __del__(self):
""" Make sure that the FileLock instance doesn't leave a lockfile
lying around.
"""
self.release()
def decoratorLockFile(_fileName): #decorator maker
try:
def decorator(func):
def funcWrapper(*args, **kwargs):
try:
with FileLock(_fileName) as lock: # keep the lock until all file IO finished
with open(_fileName, "a") as f:
return func(*args, _file=f)
except Exception: raise
return funcWrapper
return decorator
except Exception: raise
```
#### File: tools/ipc/process.py
```python
import sys
import tia.trad.tools.ipc.processLogger as pl
import tia.configuration as conf
from tia.trad.tools.errf import eReport
class Process:
def __init__(self, _modulePath, **kwargs):
try:
validate = type(_modulePath); assert validate == str, validate
moduleName = _modulePath.split(".")[-1]
# set process name
conf.PROCESS_NAME = moduleName
#TODO: set Proces logger name before import...
# set logging file for that process
pl.PROCESS_NAME = moduleName + "."
self.processLogger = pl.loggerInit(moduleName)
# import a module whose name is only known at runtime
myMod = __import__(_modulePath, globals(), locals(), [moduleName], -1)
# pass args and start the process
kwargs["processLogger"] = self.processLogger
myMod.run(**kwargs)
except KeyboardInterrupt:
print "%s received KeyboardInterrupt, shutting down" % moduleName
myMod.shutdown()
sys.exit()
except Exception: eReport(moduleName)
```
#### File: tools/net/httpApi.py
```python
import xml.dom.minidom as minidom
import urllib
import urllib2
import json
import tia.trad.tools.sortedDict as sd
import tia.trad.tools.ipc.naming_conventions as names
from tia.trad.tools.timing import getTime
import logging
import tia.trad.tools.arithm.floatArithm as fl
import tia.trad.tools.ipc.processLogger as pl
LOGGER_NAME = pl.PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME)
MAX_RECONNECTIONS = 2
def getHtml(_url):
try:
logger.debug("getHtml: hi")
html_ = ""
website = None
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.A.B.C Safari/525.13',
'Referer': 'http://python.org'}
request = urllib2.Request(_url, headers=headers)
for attempt in range(MAX_RECONNECTIONS):
try:
# proxy = urllib2.ProxyHandler({'http': '127.0.0.1:8118'})
# opener = urllib2.build_opener(proxy)
# urllib2.install_opener(opener)
logger.info("getHtml:urlopen: %s: attempt: %s" % (_url, attempt))
website = urllib2.urlopen(request, timeout=10)
break
except Exception as ex:
logger.exception("getHtml:urlopen:exception: %s" % ex)
continue
if website != None: html_ = website.read() # downloads content
logger.debug("html: %s" % html_)
return html_
except Exception as ex:
logger.exception("getHtml:ex:%s, %s" % (_url, ex))
return None
def ratesFromECB():
try:
logger.debug("ratesFromECB: hi")
u1 = urllib2.urlopen('http://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml')
dom = minidom.parse(u1)
relativeToEUR = {}
for elem in dom.getElementsByTagName('Cube'):
if elem.hasAttribute('currency'):
relativeToEUR[elem.attributes['currency'].value] = elem.attributes['rate'].value
# add EUR
relativeToEUR["EUR"] = "1"
relative2USD = {}
for currencyS in relativeToEUR:
rate = relativeToEUR[currencyS]
rel2USD = fl.D(relativeToEUR["USD"]) / fl.D(relativeToEUR[currencyS])
relative2USD[currencyS] = str(rel2USD)
return relative2USD
except Exception as ex: logger.exception("ratesFromECB:ex: %s" % ex)
def getAndCacheExchangeRates():
"""
input: _dict = {currency: 0}
output: CACHED_EXCHANGE_RATES = {"USD": "1"...}
"""
try:
logger.debug("cacheExchangeRates: hi")
global CACHED_EXCHANGE_RATES
CACHED_EXCHANGE_RATES = ratesFromECB()
except Exception as ex: logger.exception("getAndCacheExchangeRates:ex: %s" % ex)
"""
public API
"""
def getOrderbook(_Market):
"""
return _orderbook = {"asks": [["price", "amount"], ...]}
"""
try:
logger.debug("getOrderbook: hi")
marketName = _Market.name
_Market.lastUpdate = getTime()
orderbook_ = {names.asks: [], names.bids: []}
# get orderbook
html = getHtml(_Market.depthAddr)
if html:
parsedJson = json.loads(html, parse_float=str, parse_int=str)
if "mtgox" in marketName:
data = parsedJson["data"]
for side in [names.asks, names.bids]:
for dct in data[side]:
orderbook_[side].append([dct["price"], dct["amount"]])
else:
# check that orderbook appropriate shape
dummyAccess = parsedJson[names.asks][0][0] # throws exception if not that shape
orderbook_ = parsedJson
else: orderbook_ = None
return orderbook_
except ValueError: return None # if srv down or some maintainance msg
except Exception as ex:
logger.exception("getOrderbook:ex:%s: %s" % (marketName, ex))
return None
def getTrades(_Market, _since=""):
"""
:return: tradesSD_ = SD{tid:{"date":123, "price":123, "amount":123}}
"""
try:
logger.debug("getTrades: hi")
marketName = _Market.name
_Market.lastUpdate = getTime()
tradesD_ = sd.SortedDict()
# get data
if "btc24" in _Market.name: html = getHtml(_Market.tradesAddr + str(_Market.lastTid))
else: html = getHtml(_Market.tradesAddr + _since)
if html:
parsedJson = json.loads(html, parse_float=str, parse_int=str)
if "mtgox" in marketName:
data = parsedJson["data"]
for dct in data:
if dct["primary"] == "Y":
tradesD_[int(dct["tid"])] = {"price": dct["price"], "amount": dct["amount"], "date": dct["date"]}
else:
for dct in parsedJson:
tradesD_[int(dct["tid"])] = {"price": dct["price"], "amount": dct["amount"], "date": dct["date"]}
else: tradesD_ = None
return tradesD_
except ValueError: return None # if srv down or some maintainance msg
except Exception: raise
def getTicker(_Market):
try:
logger.debug("getLag: hi")
marketName = _Market.name
_Market.lastUpdate = getTime()
eventMsg_ = None
html = getHtml(_Market.tickerAddr)
if html:
parsedJson = json.loads(html, parse_float=str, parse_int=str)
data = parsedJson["data"]
if data["avg"]["currency"] == "USD":
minAsk = data["sell"]["value"]
maxBid = data["buy"]["value"]
eventMsg_ = {"name": names.onTicker, "market": "mtgoxUSD", "minAsk": minAsk, "maxBid": maxBid, "date": data["now"]}
return eventMsg_
except ValueError: return None # if srv down or some maintainance msg
except Exception as ex:
logger.exception("getTicker:ex:%s: %s" % (marketName, ex))
return None
def getLag(_Market):
try:
logger.debug("getLag: hi")
marketName = _Market.name
_Market.lastUpdate = getTime()
lag_ = None
html = getHtml(_Market.lagAddr)
if html:
parsedJson = json.loads(html, parse_float=str, parse_int=str)
data = parsedJson["return"]
lag_ = data["lag"]
return lag_
except ValueError: return None # if srv down or some maintainance msg
except Exception as ex:
logger.exception("getLag:ex:%s: %s" % (marketName, ex))
return None
"""
private API
"""
def getAccountBalance(_Market):
try:
logger.debug("getAccountBalance: hi")
res_ = {}
if "btc24EUR" in _Market.name:
values = {"user": _Market.account.user,
"key": _Market.account.apiKey,
"api": "get_balance"}
data = urllib.urlencode(values)
while 1:
try:
req = urllib2.Request(_Market.apiBalanceAddr, data)
response = urllib2.urlopen(req)
jsonStr = response.read()
logger.debug("%s: %s" % (_Market.name, jsonStr))
break
except urllib2.HTTPError as ex:
logger.exception("getAccountBalance:ex: %s" % ex)
continue
except Exception: raise
logger.debug("%s: %s" % (_Market.name, jsonStr))
jsonD = json.loads(jsonStr, parse_float=str, parse_int=str)
res_[names.fundsAvailable] = jsonD["eur"]
res_[names.itemsAvailable] = jsonD["btc_available"]
res_["market"] = _Market.name
res_["name"] = names.onAccountBallance
return res_
except ValueError as ex:
logger.exception("ValueError:ex: %s" % ex)
return None
except Exception: raise
def getOpenOrders(_Market):
try:
logger.debug("getOpenOrders: hi")
res_ = {}
if "btc24" in _Market.name:
values = {"user": _Market.account.user,
"key": _Market.account.apiKey,
"api": "open_orders"}
data = urllib.urlencode(values)
while 1:
try:
req = urllib2.Request(_Market.apiBalanceAddr, data)
response = urllib2.urlopen(req)
jsonStr = response.read()
logger.debug("%s: %s" % (_Market.name, jsonStr))
break
except urllib2.HTTPError as ex:
logger.exception("getOpenOrders:ex: %s" % ex)
continue
except Exception: raise
logger.debug("%s: %s" % (_Market.name, jsonStr))
jsonD = json.loads(jsonStr, parse_float=str, parse_int=str)
parsedList = []
for dct in jsonD:
parsedData = {}
parsedData["oid"] = int(dct["id"])
parsedData["amount"] = dct["amount"]
side = names.bids if int(dct["type"]) == 2 else names.asks # opposite than in place order!
parsedData["orderType"] = side
parsedData["price"] = dct["price"]
parsedData["eventDate"] = int(dct["date"]) * 10**6
parsedList.append(parsedData)
res_ = {"orders": parsedList}
res_["market"] = _Market.name
res_["name"] = names.onOpenOrders
return res_
except urllib2.HTTPError as ex:
logger.error("HTTPError:ex: %s" % ex)
return None
except ValueError as ex:
logger.exception("ValueError:ex: %s" % ex)
return None
except Exception:raise
def getTransactions(_Market):
try:
logger.debug("getTransactions: hi")
res_ = {}
if "btc24" in _Market.name:
values = {"user": _Market.account.user,
"key": _Market.account.apiKey,
"api": "trades_json"}
data = urllib.urlencode(values)
req = urllib2.Request(_Market.apiTransactionsAddr, data)
response = urllib2.urlopen(req)
jsonStr = response.read()
logger.debug("%s: %s" % (_Market.name, jsonStr))
jsonL = json.loads(jsonStr, parse_int=str, parse_float=str)
lastTransaction = jsonL[-1]
lastTransactionTid = int(lastTransaction["tid"])
lastTid = _Market.lastTid
if lastTid == 0: _Market.lastTid = lastTransactionTid
elif lastTransactionTid > lastTid:
res_["transactions"] = {}
#get all the executed prices of our orders from last tid
for dct in reversed(jsonL):
if int(dct["tid"]) > lastTid:
res_["transactions"][dct["price"]] = dct["amount"]
_Market.lastTid = lastTransactionTid
res_["name"] = names.onTransactionsList
res_["market"] = _Market.name
return res_
except ValueError as ex:
logger.exception("ValueError:ex: %s" % ex)
return None
except Exception:raise
def cancelOrder(_Market, _oid, _side):
try:
logger.debug("cancelOrder: hi")
res_ = {}
if "btc24" in _Market.name:
values = {"user": _Market.account.user,
"key": _Market.account.apiKey,
"api": "cancel_order",
"id": _oid}
data = urllib.urlencode(values)
while 1:
try:
req = urllib2.Request(_Market.apiBalanceAddr, data)
response = urllib2.urlopen(req)
jsonStr = response.read()
logger.debug("%s: %s" % (_Market.name, jsonStr))
break
except urllib2.HTTPError as ex:
logger.exception("cancelOrder:ex: %s" % ex)
continue
except Exception: raise
jsonD = json.loads(jsonStr, parse_float=str, parse_int=str)
if int(jsonD["error"]) == 0:
res_ = {"name": names.orderCancel, "market": _Market.name, "side": _side, "oid": int(_oid)}
else: _Market.logger.critical("didn't cancel order: %s" % jsonD)
return res_
except KeyError:
if "not exist" in jsonD["message"]:
return names.noSuchOrder
else: raise
except Exception:raise
def placeOrder(_Market, _Order):
try:
logger.debug("placeOrder: hi")
res_ = {}
if "btc24EUR" in _Market.name:
apiAction = "buy_btc" if _Order.type == names.bids else "sell_btc"
values = {"user": _Market.account.user,
"key": _Market.account.apiKey,
"api": apiAction,
"amount": str(_Order.amount.quantize(_Market.pipAmount)),
"price": str(_Order.price.quantize(_Market.pip)),
"cur": "EUR"}
data = urllib.urlencode(values)
while 1:
try:
req = urllib2.Request(_Market.apiBalanceAddr, data)
response = urllib2.urlopen(req)
jsonStr = response.read()
logger.debug("%s: %s" % (_Market.name, jsonStr))
break
except urllib2.HTTPError as ex:
logger.exception("placeOrder:ex: %s" % ex)
continue
except Exception: raise
jsonD = json.loads(jsonStr, parse_float=str, parse_int=str)
#side = names.bids if int(jsonD["type"]) == 1 else names.asks
res_ = {"name": names.orderPlace, "market": _Market.name, "orderPrice": jsonD["price"], "orderAmount": jsonD["amount"],
"oid": int(jsonD["id"]), "orderType": _Order.type, "eventDate": int(jsonD["date"]) * 10**6, "foundTargets": _Order.targets}
return res_
except KeyError:
if jsonD:
if "enough" in jsonD["message"]: return names.accountNotEnoughFunds
else: raise
except Exception:raise
```
#### File: trad/tools/timing.py
```python
import time
def getTime():
try:
now = time.time()
normTime = now * 10 ** 6
return int(normTime) # return time in nicroseconds
except Exception: raise
``` |
{
"source": "j-mak/passwords",
"score": 3
} |
#### File: passwords/password/generator.py
```python
import random
from .rules import *
class Generator:
def __init__(self, length=8, rules=None, restrictions=None):
self.length = length
if not rules:
self.rules = [WITH_LOWERCASE]
else:
if not isinstance(rules, list):
self.rules = [rules]
else:
self.rules = rules
if not restrictions:
self.restrictions = []
else:
if not isinstance(restrictions, list):
self.restrictions = [restrictions]
else:
self.restrictions = restrictions
def __generate_password_proto(self):
pw = []
for i in range(0, self.length, len(self.rules)):
random.shuffle(self.rules)
pw.extend(self.rules)
return "".join([str(i) for i in pw])
def generate(self):
password = ""
for i in self.__generate_password_proto():
if i == '0':
table = LOWER_ALPHA
elif i == '1':
table = UPPER_ALPHA
elif i == '2':
table = NUMBERS
else:
table = SYMBOLS
# print("Before restrictions: {}".format(table))
if 11 in self.restrictions:
table = sorted(list(set(table) - set(MEDIUM_SIMILAR)))
if 12 in self.restrictions:
table = sorted(list(set(table) - set(VERY_SIMILAR)))
if 13 in self.restrictions:
table = sorted(list(set(table) - set(AMBIGUOUS)))
# print("After restrictions: {}".format(table))
rand_char = table[random.randint(0, len(table) - 1)]
password += rand_char
return password
``` |
{
"source": "jmakr0/libgroupsig",
"score": 3
} |
#### File: python/pygroupsig/blindsig.py
```python
from _groupsig import lib, ffi
from . import constants
import base64
def blindsig_export(sig):
"""
Exports the given blinded signature to a Base64 string.
Parameters:
sig: The blinded signature to export.
Returns:
The produced Base64 string. On error, an Exception is thrown.
"""
bsig = ffi.new("byte_t **")
bsig[0] = ffi.NULL
size = ffi.new("uint32_t *")
if lib.groupsig_blindsig_export(bsig, size, sig) == constants.IERROR:
raise Exception('Error exporting blindsig.')
b64sig = base64.b64encode(ffi.buffer(bsig[0],size[0]))
b64sig = b64sig.decode('utf-8').replace('\n', '')
# lib.free(bsig[0])
return b64sig
def blindsig_import(code, b64sig):
"""
Imports the given blinded signature from a Base64 string.
Parameters:
sig: The blinded signature to import.
Returns:
The imported blinded signature. On error, an Exception is thrown.
"""
b = base64.b64decode(b64sig)
sig = lib.groupsig_blindsig_import(code, b, len(b))
if sig == ffi.NULL:
raise Exception('Error importing blindsig.')
return sig
def blindsig_to_string(sig):
"""
Returns a human readable string corresponding to the given blinded signature.
Parameters:
sig: The blinded signature to print.
Returns:
The produced string. On error, an Exception is thrown.
"""
_str = ffi.new("char *")
_str = lib.groupsig_blindsig_to_string(sig)
if _str == ffi.NULL:
raise Exception('Error converting blindsig to string.')
return ffi.string(_str).decode('utf8')
```
#### File: python/pygroupsig/exim_build.py
```python
from common_build import ffibuilder
# Define data types
ffibuilder.cdef("""
typedef enum {
EXIM_FORMAT_FILE_NULL,
EXIM_FORMAT_FILE_NULL_B64,
EXIM_FORMAT_STRING_NULL_B64,
EXIM_FORMAT_MESSAGE_NULL,
EXIM_FORMAT_MESSAGE_NULL_B64,
EXIM_FORMAT_BYTEARRAY_NULL,
} exim_format_t;
""")
ffibuilder.cdef("""
typedef struct exim_handle_t exim_handle_t;
""")
ffibuilder.cdef("""
typedef struct exim_t exim_t;
""")
ffibuilder.cdef("""
typedef int (*exim_get_size_bytearray_null_f)(exim_t* obj);
""")
ffibuilder.cdef("""
typedef int (*exim_export_bytearray_fd_f)(exim_t* obj, FILE* fd);
""")
ffibuilder.cdef("""
typedef int (*exim_import_bytearray_fd_f)(FILE *fd, exim_t* obj);
""")
ffibuilder.cdef("""
struct exim_handle_t {
exim_get_size_bytearray_null_f get_size_bytearray_null;
exim_export_bytearray_fd_f export_bytearray_fd;
exim_import_bytearray_fd_f import_bytearray_fd;
};
""")
ffibuilder.cdef("""
struct exim_t {
void *eximable;
exim_handle_t *funcs;
};
""")
ffibuilder.cdef("""
int exim_get_size_in_format(exim_t *obj, exim_format_t format);
""")
ffibuilder.cdef("""
int exim_export(exim_t* obj, exim_format_t format, void *dst);
""")
ffibuilder.cdef("""
int exim_import(exim_format_t format, void *source, exim_t* obj);
""")
```
#### File: python/pygroupsig/gml.py
```python
from _groupsig import lib, ffi
from . import constants
import base64
def gml_init(code):
"""
Initializes a Group Membership List (GML) for schemes of the given type.
Parameters:
code: The code of the scheme.
Returns:
A native object representing the GML. Throws an Exception on error.
"""
gml = lib.gml_init(code)
if gml == ffi.NULL:
raise Exception('Error initializing GML.')
return gml
def gml_free(gml):
"""
Frees the native memory used to represent the given GML.
Parameters:
gml: The GML structure to free.
Returns:
IOK (1) or IERROR (0)
"""
return lib.gml_free(gml)
def gml_export(gml):
"""
Exports a GML to a Base64 string.
Parameters:
gml: The GML to export.
Returns:
A Base64 string. On error, an Exception is thrown.
"""
bgml = ffi.new("byte_t **")
bgml[0] = ffi.NULL
size = ffi.new("uint32_t *")
if lib.gml_export(bgml, size, gml) == constants.IERROR:
raise Exception('Error exporting GML.')
b64gml = base64.b64encode(ffi.buffer(bgml[0],size[0]))
b64gml = b64gml.decode('utf-8').replace('\n', '')
# lib.free(bgml[0])
return b64gml
def gml_import(code, b64gml):
"""
Imports a GML from a Base64 string.
Parameters:
code: The code of the scheme related to this GML.
b64gml: The Base64 string.
Returns:
The imported GML native data structure. Throws an Exception on error.
"""
b = base64.b64decode(b64gml)
gml = lib.gml_import(code, b, len(b))
if gml == ffi.NULL:
raise Exception('Error importing GML.')
return gml
```
#### File: python/pygroupsig/grpkey.py
```python
from _groupsig import lib, ffi
from . import constants
import base64
def grpkey_export(grpkey):
"""
Exports the given group key to a Base64 string.
Parameters:
grpkey: The native group key data structure.
Returns:
A Base64 string. On error, an Exception is thrown.
"""
bkey = ffi.new("byte_t **")
bkey[0] = ffi.NULL
size = ffi.new("uint32_t *")
if lib.groupsig_grp_key_export(bkey, size, grpkey) == constants.IERROR:
raise Exception('Error exporting group key.')
b64 = base64.b64encode(ffi.buffer(bkey[0],size[0]))
b64 = b64.decode('utf-8').replace('\n', '')
# lib.free(bkey[0])
return b64
def grpkey_import(code, b64key):
"""
Imports a group key from a Base64 string.
Parameters:
code: The code corresponding to the group signature scheme.
b64key: The Base64 string.
Returns:
A group key. On error, an Exception is thrown.
"""
b = base64.b64decode(b64key)
grpkey = lib.groupsig_grp_key_import(code, b, len(b))
if grpkey == ffi.NULL:
raise Exception('Error importing group key.')
return grpkey
#def grpkey_to_string(key):
#
# _str = ffi.new("char *")
# _str = lib.groupsig_grp_key_to_string(key)
# if _str == ffi.NULL:
# raise Exception('Error converting grpkey to string.')
# return ffi.string(_str).decode('utf8')
```
#### File: python/pygroupsig/key_build.py
```python
from pygroupsig.common_build import ffibuilder
# Define data types
ffibuilder.cdef("""
typedef enum {
GROUPSIG_KEY_GRPKEY,
GROUPSIG_KEY_MGRKEY,
GROUPSIG_KEY_MEMKEY,
GROUPSIG_KEY_BLDKEY,
} groupsig_key_types;
""")
ffibuilder.cdef("""
typedef struct {
uint8_t scheme;
void *key;
} groupsig_key_t;
""")
ffibuilder.cdef("""
typedef groupsig_key_t* (*groupsig_key_init_f)(void);
""")
ffibuilder.cdef("""
typedef int (*groupsig_key_free_f)(groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef int (*groupsig_key_copy_f)(
groupsig_key_t *dst,
groupsig_key_t *src);
""")
ffibuilder.cdef("""
typedef int (*groupsig_key_get_size_f)(
groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef groupsig_key_t* (*groupsig_key_prv_get_f)(groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef groupsig_key_t* (*groupsig_key_pub_get_f)(groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef int (*groupsig_key_prv_set_f)(
void *dst,
void *src);
""")
ffibuilder.cdef("""
typedef int (*groupsig_key_pub_set_f)(
void *dst,
void *src);
""")
ffibuilder.cdef("""
typedef int (*groupsig_key_export_f)(
unsigned char **bytes,
uint32_t *size,
groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef int (*groupsig_key_pub_export_f)(
unsigned char **bytes,
uint32_t *size,
groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef int (*groupsig_key_prv_export_f)(
unsigned char **bytes,
uint32_t *size,
groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef groupsig_key_t* (*groupsig_key_import_f)(
unsigned char *source,
uint32_t size);
""")
ffibuilder.cdef("""
typedef groupsig_key_t* (*groupsig_key_prv_import_f)(
unsigned char *source,
uint32_t size);
""")
ffibuilder.cdef("""
typedef groupsig_key_t* (*groupsig_key_pub_import_f)(
unsigned char *source,
uint32_t size);
""")
ffibuilder.cdef("""
typedef char* (*groupsig_key_to_string_f)(groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef char* (*groupsig_key_prv_to_string_f)(groupsig_key_t *key);
""")
ffibuilder.cdef("""
typedef char* (*groupsig_key_pub_to_string_f)(groupsig_key_t *key);
""")
```
#### File: python/pygroupsig/message.py
```python
from _groupsig import lib, ffi
from . import constants
import base64
def message_from_string(mstr):
"""
Imports a message from a UTF-8 string.
Parameters:
mstr: The string.
Returns:
A message data structure. On error, an Exception is thrown.
"""
msg = lib.message_from_string(mstr.encode('utf8'))
if msg == ffi.NULL:
raise Exception('Error parsing message the given string.')
return msg
def message_to_string(msg):
"""
Exports the given message object to a UTF-8 string. Use only for messages
that are ensured to be strings.
Parameters:
msg: The message to export to a string.
Returns:
A UTF-8 string. On error, an Exception is thrown.
"""
_str = ffi.new("char *")
_str = lib.message_to_string(msg)
if _str == ffi.NULL:
raise Exception('Error converting message to string.')
return ffi.string(_str).decode('utf8')
def message_from_base64(b64):
"""
Imports a message from a Base64 string.
Parameters:
b64: The Base64 string.
Returns:
A message data structure. On error, an Exception is thrown.
"""
b = base64.b64decode(b64)
msg = lib.message_from_bytes(b, len(b))
if msg == ffi.NULL:
raise Exception('Error parsing message the given Base64 string.')
return msg
def message_to_base64(msg):
"""
Exports the given message object to a Base64 string.
Parameters:
msg: The message to export to a string.
Returns:
A Base64 string. On error, an Exception is thrown.
"""
_str = ffi.new("char *")
_str = lib.message_to_base64(msg)
if _str == ffi.NULL:
raise Exception('Error converting message to a Base64 string.')
return ffi.string(_str).decode('utf8')
```
#### File: python/pygroupsig/mgr_key_build.py
```python
from pygroupsig.common_build import ffibuilder
ffibuilder.cdef("""
typedef groupsig_key_init_f mgr_key_init_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_free_f mgr_key_free_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_copy_f mgr_key_copy_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_get_size_f mgr_key_get_size_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_prv_get_f mgr_key_prv_get_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_pub_get_f mgr_key_pub_get;
""")
ffibuilder.cdef("""
typedef groupsig_key_prv_set_f mgr_key_prv_set_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_pub_set_f mgr_key_pub_set_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_export_f mgr_key_export_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_pub_export_f mgr_key_pub_export_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_prv_export_f mgr_key_prv_export_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_import_f mgr_key_import_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_prv_import_f mgr_key_prv_import_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_pub_import_f mgr_key_pub_import_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_to_string_f mgr_key_to_string_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_prv_to_string_f mgr_key_prv_to_string_f;
""")
ffibuilder.cdef("""
typedef groupsig_key_pub_to_string_f mgr_key_pub_to_string_f;
""")
ffibuilder.cdef("""
typedef struct {
uint8_t code;
mgr_key_init_f init;
mgr_key_free_f free;
mgr_key_copy_f copy;
mgr_key_export_f gexport;
mgr_key_import_f gimport;
mgr_key_to_string_f to_string;
mgr_key_get_size_f get_size;
} mgr_key_handle_t;
""")
ffibuilder.cdef("""
const mgr_key_handle_t* groupsig_mgr_key_handle_from_code(uint8_t code);
""")
ffibuilder.cdef("""
groupsig_key_t* groupsig_mgr_key_init(uint8_t code);
""")
ffibuilder.cdef("""
int groupsig_mgr_key_free(groupsig_key_t *key);
""")
ffibuilder.cdef("""
int groupsig_mgr_key_copy(groupsig_key_t *dst, groupsig_key_t *src);
""")
ffibuilder.cdef("""
int groupsig_mgr_key_get_size(groupsig_key_t *key);
""")
ffibuilder.cdef("""
int groupsig_mgr_key_export(
unsigned char **bytes,
uint32_t *size,
groupsig_key_t *key);
""")
ffibuilder.cdef("""
groupsig_key_t* groupsig_mgr_key_import(
uint8_t code,
unsigned char *source,
uint32_t size);
""")
ffibuilder.cdef("""
char* groupsig_mgr_key_to_string(groupsig_key_t *key);
""")
```
#### File: python/pygroupsig/signature_build.py
```python
from pygroupsig.common_build import ffibuilder
ffibuilder.cdef("""
typedef struct {
uint8_t scheme;
void *sig;
} groupsig_signature_t;
""")
ffibuilder.cdef("""
typedef groupsig_signature_t* (*groupsig_signature_init_f)(void);
""")
ffibuilder.cdef("""
typedef int (*groupsig_signature_free_f)(groupsig_signature_t *signature);
""")
ffibuilder.cdef("""
typedef int (*groupsig_signature_copy_f)(
groupsig_signature_t *dst,
groupsig_signature_t *src);
""")
ffibuilder.cdef("""
typedef int (*groupsig_signature_get_size_f)(
groupsig_signature_t *sig);
""")
ffibuilder.cdef("""
typedef int (*groupsig_signature_export_f)(
unsigned char **bytes,
uint32_t *size,
groupsig_signature_t *signature);
""")
ffibuilder.cdef("""
typedef groupsig_signature_t* (*groupsig_signature_import_f)(
unsigned char *source,
uint32_t size);
""")
ffibuilder.cdef("""
typedef char* (*groupsig_signature_to_string_f)(
groupsig_signature_t *signature);
""")
ffibuilder.cdef("""
typedef struct {
uint8_t scheme;
groupsig_signature_init_f init;
groupsig_signature_free_f free;
groupsig_signature_copy_f copy;
groupsig_signature_get_size_f get_size;
groupsig_signature_export_f gexport;
groupsig_signature_import_f gimport;
groupsig_signature_to_string_f to_string;
} groupsig_signature_handle_t;
""")
ffibuilder.cdef("""
const groupsig_signature_handle_t* groupsig_signature_handle_from_code(uint8_t code);
""")
ffibuilder.cdef("""
groupsig_signature_t* groupsig_signature_init(uint8_t code)
;""")
ffibuilder.cdef("""
int groupsig_signature_free(groupsig_signature_t *sig);
""")
ffibuilder.cdef("""
int groupsig_signature_copy(
groupsig_signature_t *dst,
groupsig_signature_t *src);
""")
ffibuilder.cdef("""
int groupsig_signature_get_size(
groupsig_signature_t *sig);
""")
ffibuilder.cdef("""
int groupsig_signature_export(
unsigned char **bytes,
uint32_t *size,
groupsig_signature_t *sig);
""")
ffibuilder.cdef("""
groupsig_signature_t* groupsig_signature_import(
uint8_t code,
unsigned char *source,
uint32_t size);
""")
ffibuilder.cdef("""
char* groupsig_signature_to_string(groupsig_signature_t *sig);
""")
```
#### File: python/pygroupsig/types_build.py
```python
from pygroupsig.common_build import ffibuilder
# Define data types
#extern log_t logger;
ffibuilder.cdef("""
#define IOK 0
""")
ffibuilder.cdef("""
#define IERROR 1
""")
ffibuilder.cdef("""
#define IFAIL 2
""")
ffibuilder.cdef("""
#define IEXISTS 3
""")
ffibuilder.cdef("""
typedef unsigned char byte_t;
""")
``` |
{
"source": "j-mak/smsfarm",
"score": 3
} |
#### File: smsfarm/tests/test_client.py
```python
import unittest.mock
import smsfarm
import smsfarm.core
import tests.helpers
class TestClient(unittest.TestCase):
def setUp(self):
self.client = smsfarm.Client("some-code", "some-id")
def test_recipients_exception(self):
with self.assertRaises(ValueError):
self.client.recipients = 123
def test_one_recipient_with_list(self):
self.client.recipients = ["900123456"]
self.assertEqual(self.client.recipients, "900123456")
def test_multiple_recipients_with_list(self):
self.client.recipients = ["900123456", '900654321']
recipients = self.client.recipients
self.assertEqual(recipients, "900123456,900654321")
def test_one_recipient_with_string(self):
self.client.recipients = "900123456"
self.assertEqual(self.client.recipients, "900123456")
def test_verify_if_set_properly(self):
self.assertEqual(self.client.sender, tests.helpers.get_hostname())
client = smsfarm.Client("some-code", "some-id", sender='smsfarm-sender')
self.assertEqual(client.sender, "smsfarm-sender")
# just for coverage completion
@unittest.mock.patch('smsfarm.Client._Client__get_credit')
def test_get_credit(self, mocked):
mocked.return_value = smsfarm.ApiResponse()
mocked.return_value.data = 9.23
response = self.client.get_credit()
self.assertEqual(response.data, 9.23)
@unittest.mock.patch('smsfarm.Client._Client__send_message')
def test_send_message(self, mocked):
mocked.return_value = smsfarm.ApiResponse()
mocked.return_value.data = 2410290
self.client.recipients = "900123456"
response = self.client.send_message("hello world!")
self.assertEqual(response.data, '2410290')
@unittest.mock.patch('smsfarm.Client._Client__get_all_message_statuses')
def test_all_message_statuses(self, mocked):
mocked.return_value = smsfarm.ApiResponse()
mocked.return_value.data = ['421900123456:MESSAGE-EXPIRED',
'421900654321:DELIVERED']
resp = self.client.get_all_message_statuses('12312312')
self.assertEqual(resp.data, {'421900654321': 'DELIVERED',
'421900123456': 'MESSAGE-EXPIRED'})
self.assertEqual(resp.success, True)
self.assertEqual(resp.failed, False)
def test_try_send_empty_message(self):
with self.assertRaises(ValueError):
self.client.send_message("")
def test_get_message_status_with_multiple_recipients(self):
self.client.recipients = ["421900123456", "421900654321"]
with self.assertRaises(ValueError):
self.client.get_message_status("12345678")
@unittest.mock.patch('smsfarm.Client._Client__get_message_status')
def test_get_message_status_without_recipient(self, mocked_response):
mocked_response.return_value = smsfarm.ApiResponse()
mocked_response.return_value.data = "DELIVERED"
self.client.recipients = ["421900123456"]
response = self.client.get_message_status("123456")
self.assertEqual(response.data, "DELIVERED")
@unittest.mock.patch('smsfarm.Client._Client__get_message_status')
def test_get_message_status_with_recipient(self, mocked_response):
mocked_response.return_value = smsfarm.ApiResponse()
mocked_response.return_value.data = "DELIVERED"
response = self.client.get_message_status("123456", "421900123456")
self.assertEqual(response.data, "DELIVERED")
@unittest.mock.patch('smsfarm.Client._Client__send_scheduled_message')
def test_send_scheduled_message(self, mocked_response):
mocked_response.return_value = smsfarm.ApiResponse()
mocked_response.return_value.data = 2410290
self.client.recipients = "900123456"
send_date = '2018-01-01 00:00'
response = self.client.send_scheduled_message("Hello World!", send_date)
self.assertEqual(response.data, "2410290")
def test_send_scheduled_message_with_invalid_time(self):
self.client.recipients = "900123456"
with self.assertRaises(ValueError):
msg = "Hello World!"
self.client.send_scheduled_message(msg, "2018-12-0 25:00")
@unittest.mock.patch('smsfarm.Client._Client__send_message')
def test_failed_send_message(self, mocked_response):
mocked_response.return_value = smsfarm.ApiResponse()
mocked_response.return_value.error = 'SomeSOAPError'
self.client.recipients = "900123456"
response = self.client.send_message("Hello World!")
self.assertEqual(response.error, "SomeSOAPError")
self.assertFalse(response.success)
self.assertTrue(response.failed)
``` |
{
"source": "jmaksymc/DeepRec-1",
"score": 2
} |
#### File: SOK/DLRM/train_stand.py
```python
import os
import argparse
import tensorflow as tf
import sys, os
import numpy as np
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../../../sparse_operation_kit/"
)
)
)
import sparse_operation_kit as sok
import model.utils as utils
from model.models import DLRM
import model.strategy_wrapper as strategy_wrapper
from model.dataset import BinaryDataset, BinaryDataset2
import time
import sys
def main(args):
comm_options = None
if args.distributed_tool == "onedevice":
import horovod.tensorflow as hvd
hvd.init()
avaiable_cuda_devices = ",".join(
[str(gpu_id) for gpu_id in range(args.gpu_num)]
)
os.environ["CUDA_VISIBLE_DEVICES"] = avaiable_cuda_devices
strategy = strategy_wrapper.OneDeviceStrategy()
args.task_id = 0
elif args.distributed_tool == "horovod":
import horovod.tensorflow as hvd
hvd.init()
strategy = strategy_wrapper.HorovodStrategy()
args.task_id = hvd.local_rank()
args.gpu_num = hvd.size()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.task_id)
else:
raise ValueError(
f"{args.distributed_tool} is not supported."
f"Can only be one of {'onedevice', 'horovod'}"
)
with strategy.scope():
if args.embedding_layer == "SOK":
sok_init_op = sok.Init(global_batch_size=args.global_batch_size)
model = DLRM(
vocab_size=args.vocab_size_list,
num_dense_features=args.num_dense_features,
embedding_layer=args.embedding_layer,
embedding_vec_size=args.embedding_vec_size,
bottom_stack_units=args.bottom_stack,
top_stack_units=args.top_stack,
num_gpus = hvd.size(),
comm_options=comm_options,
)
lr_callable = utils.get_lr_callable(
global_batch_size=args.global_batch_size,
decay_exp=args.decay_exp,
learning_rate=args.learning_rate,
warmup_steps=args.warmup_steps,
decay_steps=args.decay_steps,
decay_start_steps=args.decay_start_steps,
)
embedding_optimizer = utils.get_optimizer(args.embedding_optimizer)
embedding_optimizer.learning_rate = lr_callable
dense_optimizer = utils.get_optimizer("SGD")
dense_optimizer.learning_rate = lr_callable
batch_size = (
args.global_batch_size
if args.distributed_tool == "onedevice"
else args.global_batch_size // args.gpu_num
)
train_dataset = BinaryDataset2(
os.path.join(args.train_file_pattern, "label.bin"),
os.path.join(args.train_file_pattern, "dense.bin"),
os.path.join(args.train_file_pattern, "category.bin"),
batch_size= batch_size,
drop_last=True,
prefetch=10,
global_rank=hvd.rank(),
global_size=hvd.size(),
)
val_dataset = BinaryDataset(
os.path.join(args.test_file_pattern, "label.bin"),
os.path.join(args.test_file_pattern, "dense.bin"),
os.path.join(args.test_file_pattern, "category.bin"),
batch_size=batch_size,
drop_last=True,
prefetch=10,
global_rank=hvd.rank(),
global_size=hvd.size(),
)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction="none")
def _replica_loss(labels, logits):
loss = loss_fn(labels, logits)
return tf.nn.compute_average_loss(
loss, global_batch_size=args.global_batch_size
)
def _train_step(dense, category, labels, first_batch=False):
def _step_fn(dense, category, labels):
logits = model(dense, category, training=True)
loss = _replica_loss(labels, logits)
emb_vars, other_vars = utils.split_embedding_variables_from_others(model)
grads = tf.gradients(loss, emb_vars + other_vars, colocate_gradients_with_ops=True, unconnected_gradients=tf.UnconnectedGradients.NONE)
emb_grads, other_grads = grads[: len(emb_vars)], grads[len(emb_vars) :]
with tf.control_dependencies([*emb_grads]):
emb_train_op = utils.apply_gradients(embedding_optimizer,emb_vars, emb_grads, args.embedding_layer == "SOK")
if args.embedding_layer != "SOK":
emb_grads = strategy.reduce("sum", emb_grads)
with tf.control_dependencies([*other_grads]):
other_grads = strategy.reduce("sum", other_grads)
other_train_op = utils.apply_gradients(dense_optimizer, other_vars, other_grads, False)
if first_batch:
strategy.broadcast_variables(other_vars)
strategy.broadcast_variables(dense_optimizer.variables())
if args.embedding_layer == "TF":
strategy.broadcast_variables(emb_vars)
strategy.broadcast_variables(embedding_optimizer.variables())
with tf.control_dependencies([emb_train_op, other_train_op]):
loss = strategy.reduce("sum", loss)
loss = tf.identity(loss)
return loss
return strategy.run(_step_fn, dense, category, labels)
dense = tf.placeholder(tf.float32, shape = [batch_size, 13])
category = tf.placeholder(tf.float32, shape = [batch_size, 26])
labels = tf.placeholder(tf.int32, shape = [batch_size, 1])
total_loss_first = _train_step(dense, category, labels, True)
total_loss = _train_step(dense, category, labels, False)
probs = model(dense, category, training=False)
auc,update_op = tf.metrics.auc(labels = labels, predictions = probs, num_thresholds=8000, curve='ROC')
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
config = tf.ConfigProto()
config.log_device_placement = False
with tf.Session(config=config) as sess:
if args.embedding_layer == "SOK":
sess.run(sok_init_op)
sess.run([init_op])
t = time.time()
run_time = 0
iteration_time, dataload_time = [], []
dataload_start = time.time()
for step, (dense_, category_, labels_) in enumerate(train_dataset):
iteration_start = time.time()
dataload_time.append(time.time() - dataload_start)
if step == 0:
loss_v = sess.run([total_loss_first], feed_dict = {dense:dense_, category:category_, labels:labels_})
else:
loss_v = sess.run([total_loss], feed_dict = {dense:dense_, category:category_, labels:labels_})
iteration_time.append(time.time() - iteration_start)
if step > 0 and step % 100 == 0:
print('Iteration:%d\tloss:%.6f\ttime:%.2fs\tAvg:%.2fms/iter\tdataload:%.2fms/iter'%(step, loss_v[0], time.time() - t,
1000*sum(iteration_time)/len(iteration_time),
1000*sum(dataload_time)/len(dataload_time)))
run_time += time.time() - t
t = time.time()
iteration_time = []
if (step > 0 and step % 10000 == 0) or step == (len(train_dataset) - 1):
eval_t = time.time()
for step, (dense_, category_, labels_) in enumerate(val_dataset):
auc_value, _ = sess.run([auc,update_op], feed_dict = {dense:dense_, category:category_, labels:labels_})
print('Evaluate in %dth iteration, time:%.2fs, AUC: %.6f'%(step, time.time() - eval_t, auc_value))
t += (time.time() - eval_t)
dataload_start = time.time()
print('Training time: %.2fs'%(run_time + time.time() - t))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--global_batch_size", type=int, required=True)
parser.add_argument("--train_file_pattern", type=str, required=True)
parser.add_argument("--test_file_pattern", type=str, required=True)
parser.add_argument("--embedding_layer", type=str, choices=["TF", "SOK"], required=True)
parser.add_argument("--embedding_vec_size", type=int, required=True)
parser.add_argument("--embedding_optimizer", type=str, required=False, default="SGD")
parser.add_argument("--bottom_stack", type=int, nargs="+", required=True)
parser.add_argument("--top_stack", type=int, nargs="+", required=True)
parser.add_argument("--distributed_tool",type=str,choices=["onedevice", "horovod"],required=True,)
parser.add_argument("--gpu_num", type=int, required=False, default=1)
parser.add_argument("--decay_exp", type=int, required=False, default=2)
parser.add_argument("--learning_rate", type=float, required=False, default=1.25)
parser.add_argument("--warmup_steps", type=int, required=False, default=-1)
parser.add_argument("--decay_steps", type=int, required=False, default=30000)
parser.add_argument("--decay_start_steps", type=int, required=False, default=70000)
args = parser.parse_args()
args.vocab_size_list = [
39884406, 39043, 17289, 7420, 20263, 3,
7120, 1543, 63, 38532951, 2953546, 403346,
10, 2208, 11938, 155, 4, 976,
14, 39979771, 25641295, 39664984, 585935, 12972,
108, 36
]
if args.distributed_tool == "onedevice":
args.vocab_size_list = [int(num/8)+1 for num in args.vocab_size_list]
args.num_dense_features = 13
main(args)
``` |
{
"source": "jmalacho/ansible-examples",
"score": 2
} |
#### File: ansible-examples/filter_plugins/users.py
```python
def daysSinceEpoc( _unused=0 ):
import datetime
return (datetime.datetime.utcnow() - datetime.datetime(1970,1,1)).days
# Boilerplate code to add filter to Jinja2
class FilterModule(object):
def filters(self):
return { 'daysSinceEpoc': daysSinceEpoc,
}
``` |
{
"source": "jmaldon1/Crypto_wallet",
"score": 2
} |
#### File: Crypto_wallet/sim/runtest.py
```python
import os
import sys
import getopt
import shutil
def printProgramBanner():
print("--------------------------------")
print("Starting Python runtest script")
print("--------------------------------")
def printUsage():
print("test.py -h -g -v -G <gatesim_Type> -t <testname>")
def createSimDir(directory):
# run the verification model for the testcase to generate expected results and to create stimulus file
testpath = os.path.join(os.curdir, directory)
if (os.path.isdir(testpath)):
print("Directory exists, deleteing old " + directory)
shutil.rmtree(directory)
print("Making new directory " + directory)
os.mkdir(directory)
def convertBDF(testdir):
RTL_FOLDER_PATH = os.path.join(os.curdir, "..", "firmware")
RTL_SIM_FOLDER_PATH = os.path.join(
os.curdir, testdir, "autogenerated_from_bdf_for_sim_only")
QUARTUS_EXE_PATH = os.path.abspath(os.path.join(
os.sep, "intelFPGA_lite", "18.1", "quartus", "bin64", "quartus_map.exe"))
print("Copying BDF RTL files from RTL directory")
for basename in os.listdir(RTL_FOLDER_PATH):
if basename.endswith(".bdf"):
pathname = os.path.join(RTL_FOLDER_PATH, basename)
if os.path.isfile(pathname):
shutil.copy2(pathname, RTL_SIM_FOLDER_PATH)
os.chdir(RTL_SIM_FOLDER_PATH)
for bdf_file in os.listdir(os.curdir):
print("\n--------------------------------")
print("Compiling " + bdf_file + " from bdf to VHDL...")
os.system(QUARTUS_EXE_PATH +
" --read_settings_files=on --write_settings_files=off dco_synth -c dco_synth --convert_bdf_to_vhdl=" + bdf_file)
print("--------------------------------\n")
os.chdir(os.path.join("..", ".."))
def cleanTestDir(testcase):
if os.path.isfile(testcase + ".FAIL"):
shutil.rmtree(testcase + ".FAIL")
if os.path.isfile(testcase + ".pass"):
shutil.rmtree(testcase + ".pass")
if os.path.isfile("vsim.wlf"):
shutil.rmtree("vsim.wlf")
if os.path.isfile("vsim.log"):
shutil.rmtree("vsim.log")
if os.path.isdir("work"):
shutil.rmtree("work")
def compileToVHDL(src, cover):
# VHDL compiler command
# vcom -f $src -2008 $cover -l compile.log -lint -nologo
print("vcom -f " + src + " -2008 " + cover + "-l compile.log -lint -nologo")
os.system("vcom -f " + src + " -2008 " +
cover + "-l compile.log -lint -nologo")
def compileToVerilog(src, cover):
# Verilog compiler command
# vlog -f $src $cover -l compile.log -lint -nologo -sv $define
print("vlog -f " + src + " " + cover + " -l compile.log -lint -nologo")
os.system("vlog -f " + src + " " + cover + " -l compile.log -lint -nologo")
def runSim(testcase, gui, coverstore, sdf, log, vcd, wave, quit, wavefile):
# print("vsim tb_" + testcase + " " + gui + " " + coverstore + " " + sdf + "-testname " + testcase +
# " -do \"" + log + "; " + vcd + "; " + wave + "; run -a; " + quit + "\" -l vsim.log " + wavefile)
# os.system("vsim tb_" + testcase + " " + gui + " " + coverstore + " " + sdf + "-testname " + testcase +
# " -do \"" + log + "; " + vcd + "; " + wave + "; run -a; " + quit + "\" -l vsim.log " + wavefile)
print("vsim tb_" + testcase + " " + gui + " " + coverstore + " " + sdf + "-testname " + testcase +
" -do \"" + log + "; " + vcd + "; " + wave + "; run -a; " + quit + "\" -l vsim.log ")
os.system("vsim tb_" + testcase + " " + gui + " " + coverstore + " " + sdf + "-testname " + testcase +
" -do \"" + log + "; " + vcd + "; " + wave + "; run -a; " + quit + "\" -l vsim.log ")
def main(argv):
printProgramBanner()
print("Initializing Options...")
# initialize options
define = "+define+SIM=1"
gui = "-c"
quit = "quit"
runTheSim = 1
cover = ""
coverstore = ""
vcd = ""
wave = ""
wavefile = "-wlf vsim.wlf"
log = "log -r /*"
sdf = ""
src = os.path.join("..", "testbench.f")
testcase = ""
compileBDF = False
# setup getopt
try:
opts, args = getopt.getopt(
argv, "hCcgvGb:t:", ["help=", "compile=", "coverage=", "gui=", "vcd=", "Gatesim=", "bdf=", "test="])
except getopt.GetoptError:
printUsage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
printUsage()
sys.exit()
elif opt in ("-b", "--bdf"):
compileBDF = True
elif opt in ("-C", "--compile"):
runTheSim = 0
elif opt in ("-c", "--coverage"):
print("-c is not supported")
printUsage()
sys.exit()
elif opt in ("-g", "--gui"):
gui = "-gui"
quit = ""
elif opt in ("-v", "--vcd"):
vcd = "vcd file vsim.vcd"
elif opt in ("-G", "--Gatesim"):
pass
elif opt in ("-t", "--test"):
testcase = arg
src = os.path.join("..", testcase + ".f")
wavefile = os.path.join("..", testcase + "_wave.do")
else:
print("ERROR: unknown option" + opt)
printUsage()
sys.exit(2)
createSimDir(testcase)
createSimDir(os.path.join(testcase, "autogenerated_from_bdf_for_sim_only"))
if (compileBDF == True):
convertBDF(testcase)
os.chdir(testcase)
cleanTestDir(testcase)
os.system("vlib work")
# compileToVHDL(src, cover)
compileToVerilog(src, cover)
# sim command
if (runTheSim == 1):
runSim(testcase, gui, coverstore, sdf, log, vcd, wave, quit, wavefile)
os.chdir("..")
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jmaldon1/Musher",
"score": 2
} |
#### File: jmaldon1/Musher/conanfile.py
```python
from conans import ConanFile, CMake
class MusherConan(ConanFile):
name = "musher"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
_cmake = None
def requirements(self):
self.requires("gtest/[>=1.10.0]")
# self.requires("functionalplus/v0.2.10-p0@dobiasd/stable")
def set_version(self):
self.version = "0.0.1"
def _configure_cmake(self):
if self._cmake:
return self._cmake
cmake = CMake(self)
cmake.definitions["CMAKE_BUILD_TYPE"] = self.settings.build_type
cmake.definitions["ENABLE_CONAN_INSTALL"] = False
cmake.definitions["ENABLE_PACKAGE_BUILD"] = True
cmake.configure()
self._cmake = cmake
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = []
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
```
#### File: jmaldon1/Musher/setup.py
```python
import os
import platform
import subprocess
import shutil
import glob
import signal
from typing import Callable
from setuptools import setup, find_packages, Extension, Command
from setuptools.command.test import test
import pybind11
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
def get_build_dir() -> str:
"""Get the build directory that will store all C/C++ extension by-products.
Returns:
str: build directory path.
"""
return os.path.join(ROOT_DIR, "build")
# pylint: disable=no-self-use
# pylint: disable=unnecessary-pass
# pylint: disable=attribute-defined-outside-init
class CleanProject(Command):
"""Clean the project directory of temporary files involved with
building the C extension and python module.
This will also uninstall the development version of musher.
"""
user_options = []
def initialize_options(self):
"""Initialize user options
"""
pass
def finalize_options(self):
"""Finalize user options
"""
pass
def run(self):
"""Run the clean
"""
cleanup_list = [
# Cmake
os.path.join(ROOT_DIR, "build"),
# Python
os.path.join(ROOT_DIR, "dist"),
os.path.join(ROOT_DIR, "musher.egg-info"),
os.path.join(ROOT_DIR, ".eggs"),
os.path.join(ROOT_DIR, ".pytest_cache"),
os.path.join(ROOT_DIR, ".tox"),
*glob.glob(os.path.join(ROOT_DIR, "musher", "*.so")),
# Doxygen
os.path.join(ROOT_DIR, "docs", "html"),
os.path.join(ROOT_DIR, "docs", "latex"),
os.path.join(ROOT_DIR, "docs", "xml"),
# Sphinx
os.path.join(ROOT_DIR, "docs", "build")
]
for item in cleanup_list:
try: # If item is a dir then remove it
shutil.rmtree(item)
print("deleted {}".format(item))
except OSError:
try: # If item is a file then remove it
os.remove(item)
print("deleted {}".format(item))
except OSError:
pass
print(u'\u2713', "cleaning done")
class PublishDocs(Command):
"""Publish the generated documentation to Github pages
You must install the python module and generate the docs BEFORE running this command.
1. pip install -e .
2. python setup.py cmake --docs
Example usage:
python setup.py publish_docs -m "Added docs for new function"
Helpful reads:
https://discourse.gohugo.io/t/simple-deployment-to-gh-pages/5003
"""
user_options = [
('message=', 'm', "Commit message when publishing docs."),
]
def initialize_options(self):
"""Initialize user options
"""
self.message = "Updated docs"
def finalize_options(self):
"""Finalize user options
"""
pass
def copytree(self, src: str, dst: str, ignore: Callable = None):
"""copytree for older python versions that don't have the latest
version of shutil.copytree(dirs_exist_ok=True)
"""
if os.path.isdir(src):
if not os.path.isdir(dst):
os.makedirs(dst)
files = os.listdir(src)
if ignore is not None:
ignored = ignore(src, files)
else:
ignored = set()
for _file in files:
if _file not in ignored:
self.copytree(os.path.join(src, _file),
os.path.join(dst, _file),
ignore)
else:
shutil.copyfile(src, dst)
def run(self):
"""Publish to github pages
"""
sphinx_build_dir = os.path.join(ROOT_DIR, 'build', 'docs', 'sphinx')
temp_gh_pages_dir = os.path.join(ROOT_DIR, 'temp_gh_pages')
git_worktree = os.path.join(
ROOT_DIR, '.git', 'worktrees', "temp_gh_pages")
nojekyll = os.path.join(temp_gh_pages_dir, ".nojekyll")
# Cleanup to ensure we are starting fresh.
try:
shutil.rmtree(temp_gh_pages_dir)
except OSError:
pass
subprocess.run(['git', 'worktree', 'prune'], cwd=ROOT_DIR, check=True)
try:
shutil.rmtree(git_worktree)
except OSError:
pass
print("\nAdding gh-pages branch as a worktree...")
subprocess.run(['git', 'worktree', 'add', '-B', 'gh-pages',
temp_gh_pages_dir, 'origin/gh-pages'], cwd=ROOT_DIR, check=True)
# Pull to ensure we are up to date.
subprocess.run(['git', 'pull'], cwd=temp_gh_pages_dir, check=True)
# Copy newly generated docs
print("\nCopying newly generated docs to git branch...")
try:
shutil.copytree(sphinx_build_dir, temp_gh_pages_dir,
dirs_exist_ok=True)
except TypeError:
# Not using python3.8+
self.copytree(sphinx_build_dir, temp_gh_pages_dir)
# Add a .nojekyll files to ensure gh-pages uses index.html as the root of the site.
open(nojekyll, 'w').close()
subprocess.run(['git', 'add', '--all'],
cwd=temp_gh_pages_dir, check=True)
try:
print("\nCommitting changes...")
subprocess.run(['git', 'commit', '-m', self.message],
cwd=temp_gh_pages_dir, check=True)
print("\nPushing to github...")
subprocess.run(['git', 'push', 'origin', 'gh-pages'],
cwd=temp_gh_pages_dir, check=True)
except subprocess.CalledProcessError as err:
if err.returncode != 1:
# returncode == 1 means the branch is up to date.
raise err
shutil.rmtree(temp_gh_pages_dir)
subprocess.run(['git', 'worktree', 'prune'], cwd=ROOT_DIR, check=True)
class CMakeBuild(Command):
"""
Debug (with tests): python setup.py cmake --debug
Release (no tests): python setup.py cmake
Generate docs: python setup.py cmake --docs
"""
description = 'Build the C++ code with various options.'
user_options = [
('debug', 'd', "Compile in debug mode."),
('docs', 'r', "Generate documenation for C++ and Python."),
]
def initialize_options(self):
"""Initialize user options
"""
self.debug = False
self.docs = False
def finalize_options(self):
"""Finalize user options
"""
pass
def run(self):
"""Build the C++ code.
Raises:
RuntimeError: Error if Cmake is not installed.
"""
try:
subprocess.check_output(["cmake", "--version"])
except OSError as err:
raise RuntimeError(
"CMake must be installed to build musher") from err
build_dir = get_build_dir()
cmake_args = []
if self.debug:
cmake_args += ["-DCMAKE_BUILD_TYPE=Debug", "-DENABLE_TESTS=On"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=Release"]
if self.docs:
cmake_args += ["-DGENERATE_DOCS=On"]
else:
cmake_args += ["-DGENERATE_DOCS=Off"]
if not os.path.exists(build_dir):
os.makedirs(build_dir)
subprocess.run(['cmake', ROOT_DIR] + cmake_args,
cwd=build_dir,
check=True)
subprocess.run(['cmake', '--build', '.'],
cwd=build_dir,
check=True)
class CTest(test):
"""Run tests if compiled (only through DEBUG)
python setup.py cmake --debug
python setup.py ctest
"""
def run(self):
build_dir = get_build_dir()
if platform.system().lower() == "windows":
build_dir_win_debug = os.path.join(build_dir, "Debug")
if os.path.isdir(build_dir_win_debug):
build_dir = build_dir_win_debug
try:
result = subprocess.run(['ctest', "--output-on-failure"],
cwd=build_dir,
check=True,
stderr=subprocess.PIPE)
except FileNotFoundError:
# cwd was not found.
print("HINT: Did you compile the code first? "
"(python setup.py cmake --debug)")
raise
if result.stderr:
print(result.stderr.decode("utf-8"))
print("HINT: Did you compile the code in debug mode first? "
"(python setup.py cmake --debug)")
if result.returncode == -signal.SIGSEGV:
print("C++ Seg fault.")
class GTest(test):
r"""Run tests if compiled (only through DEBUG)
python setup.py cmake --debug
python setup.py gtest
example using filters:
python setup.py gtest --g=--gtest_filter=HPCP.\*
Attributes:
gtest_options (str): Any gtest options that should be used when running the tests.
"""
description = 'Run google tests for c++ library'
user_options = [
('gtest-options=', 'g',
'Any google test args that need to be passed through when building tests.'),
]
def initialize_options(self):
self.gtest_options = ""
def finalize_options(self):
pass
def run(self):
bin_dir = os.path.join(get_build_dir(), "bin")
if platform.system().lower() == "windows":
bin_dir_win_debug = os.path.join(bin_dir, "Debug")
if os.path.isdir(bin_dir_win_debug):
bin_dir = bin_dir_win_debug
try:
result = subprocess.run(['./musher-core-test', self.gtest_options],
cwd=bin_dir,
check=True)
except FileNotFoundError:
# Test executable not found / cwd not found.
print("HINT: Did you compile the code in debug mode first? "
"(python setup.py cmake --debug)")
raise
if result.returncode == -11:
print("C++ Seg fault.")
def extra_compile_args() -> list:
"""Platform dependent extras
Returns:
list: Extra compile arguments
"""
args = []
if platform.system() == 'Darwin':
# Something with OS X Mojave causes libstd not to be found
args += ['-mmacosx-version-min=10.12']
if os.name != 'nt':
if platform.machine() == 'i686':
# This makes GCC generate modern SSE2 instructions
# that give the exact IEEE 754 floating-point semantics.
# Basically ensures that calculations using produce the
# same results on all 32bit linux architecture.
args += ['-msse2']
args += ['-std=c++14']
return args
def extra_link_args() -> list:
"""Platform dependent extras
Returns:
list: Extra link arguments
"""
args = []
if platform.system() == 'Darwin':
# Something with OS X Mojave causes libstd not to be found
args += ['-stdlib=libc++', '-mmacosx-version-min=10.12']
return args
setup(
ext_modules=[
Extension(
'musher.musher_python', # Destination of .so
include_dirs=[
# https://caligari.dartmouth.edu/doc/ibmcxx/en_US/doc/complink/tasks/tuinclud.htm
# Allows for root level imports within C++
ROOT_DIR,
# 3rd party libraries
"src/third-party",
pybind11.get_include()
],
sources=[
'src/python/module.cpp',
'src/python/wrapper.cpp',
'src/python/utils.cpp',
'src/core/audio_decoders.cpp',
'src/core/utils.cpp',
'src/core/key.cpp',
'src/core/hpcp.cpp',
'src/core/framecutter.cpp',
'src/core/windowing.cpp',
'src/core/peak_detect.cpp',
'src/core/spectral_peaks.cpp',
'src/core/spectrum.cpp',
'src/core/mono_mixer.cpp'
],
depends=[
'src/python/module.h',
'src/python/wrapper.h',
'src/python/utils.h',
'src/core/audio_decoders.h',
'src/core/utils.h',
'src/core/key.h'
'src/core/hpcp.h',
'src/core/framecutter.h',
'src/core/windowing.h',
'src/core/peak_detect.h',
'src/core/spectral_peaks.h',
'src/core/spectrum.h',
'src/core/mono_mixer.h'
],
extra_compile_args=extra_compile_args(),
extra_link_args=extra_link_args(),
)
],
cmdclass={
"cmake": CMakeBuild,
"ctest": CTest,
"gtest": GTest,
"clean": CleanProject,
"publish_docs": PublishDocs
},
setup_requires=['wheel', 'cython', 'setuptools_scm>=4.1.2', 'pybind11>=2.6.0', 'numpy>=1.18.5'],
use_scm_version=True
)
```
#### File: Musher/tests/conftest.py
```python
import os
import pytest
@pytest.fixture()
def test_data_dir() -> str:
"""Test data directory that stores some files for testing purposes.
Returns:
str: Test data directory.
"""
tests_dir_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(tests_dir_path, "..", "data")
```
#### File: Musher/tests/test_hpcp.py
```python
import numpy as np
import musher
def test_hpcp():
tone = 100.
frequencies = [tone, tone * 2, tone * 3, tone * 4]
magnitudes = [1., 1., 1., 1.]
harmonics = 3
band_preset = False
min_frequency = 50.0
max_frequency = 500.0
actual_hpcp = musher.hpcp(frequencies,
magnitudes,
harmonics=harmonics,
band_preset=band_preset,
min_frequency=min_frequency,
max_frequency=max_frequency)
expected_hpcp = [0.,
0.,
0.,
0.13404962,
0.,
0.24760914,
0.,
0.,
0.,
0.,
1.,
0.]
assert np.allclose(actual_hpcp, expected_hpcp, rtol=1e-8)
def test_hpcp_from_peaks():
buffer = [0.] * (400 + 1)
buffer[100] = 1.
buffer[200] = 1.
buffer[300] = 1.
buffer[400] = 1.
harmonics = 3
band_preset = False
min_frequency = 50.0
max_frequency = 500.0
spectral_peaks = musher.spectral_peaks(buffer, sample_rate=0)
actual_hpcp = musher.hpcp_from_peaks(spectral_peaks,
harmonics=harmonics,
band_preset=band_preset,
min_frequency=min_frequency,
max_frequency=max_frequency)
expected_hpcp = [0.,
0.,
0.,
0.13404962,
0.,
0.24760914,
0.,
0.,
0.,
0.,
1.,
0.]
assert np.allclose(actual_hpcp, expected_hpcp, rtol=1e-8)
``` |
{
"source": "jmaldon1/plugin-host-api",
"score": 3
} |
#### File: plugin-host-api/src/utils.py
```python
import os
import re
from urllib.parse import urlencode, urlunparse, urlparse
import toolz
import requests
from flask import request
def slurp(path: str) -> str:
"""
Reads a file given a relative or absolute path
"""
abs_path = os.path.abspath(path)
with open(abs_path, encoding="utf-8-sig") as file:
return file.read()
def create_headers(resp: requests.Response, request_params: dict, status_code: int) -> dict:
"""Create various custom headers that need to be added to a PostgREST response.
Args:
resp (requests.Response): PostgREST response.
request_params (dict): Request params.
status_code (int): Status code of the PostgREST response.
Returns:
dict: Headers.
"""
# Remove excluded headers
excluded_headers = ['content-encoding', 'transfer-encoding']
headers = {name: val for name, val in resp.raw.headers.items(
) if name.lower() not in excluded_headers}
if status_code >= 300:
return headers
assert "Content-Range" in headers, "Content-Range is missing from header?"
content_range_header = headers['Content-Range']
link_header = create_link_header(resp,
request_params,
content_range_header)
return {
**headers,
**link_header,
}
def create_link_header(resp: requests.Response,
request_params: dict,
content_range_header: str) -> dict:
"""Create the link header that will provide pagination for the client.
Args:
resp (requests.Response): PostgREST response.
request_params (dict): Request params.
content_range_header (dict): Content-Range header from postgREST.
Returns:
dict: Link header.
"""
link_header = []
limit_q = request_params.get("limit", None)
try:
limit = int(limit_q)
except (TypeError, ValueError):
# No limit found, just return
return {}
try:
# if _ is assigned, it will be the total length of the response
response_len, _ = content_range_header.split("/")
response_range = re.findall(r'\d+', response_len)
response_range_int = [int(i) for i in response_range]
total_range = (response_range_int[1] - response_range_int[0]) + 1
except IndexError:
# This will happen if we can't find a number value in the Content-Range header
return {}
results = resp.json()
if results and total_range == limit:
# When do we create a next link header?
# 1. If results has data
# 2. If the Content-Range is equal to the limit in the query
# ex. Content-Range=0-9/* and limit=10
last_item = results[-1]
last_id = last_item.get("int_id", None)
if last_id:
last_id = results[-1]["int_id"]
next_link = create_next_link_header(last_id, request_params)
link_header.append(next_link)
if link_header:
" ".join(link_header)
return {"Link": link_header}
return {}
def create_next_link_header(last_id: int, request_params: dict) -> str:
"""Create the next link header.
Args:
last_id (int): `int_id` of the last item returned in the current request.
request_params (dict): Request params.
Returns:
str: Next link.
"""
next_page_params = {**request_params, "int_id": f"gt.{last_id}"}
next_request_url = toolz.pipe(request.url,
urlparse,
lambda req_url: req_url._replace(
query=urlencode(next_page_params)),
urlunparse)
return f'<{next_request_url}>; rel="next"'
``` |
{
"source": "jmaldon1/Python-Course",
"score": 4
} |
#### File: jmaldon1/Python-Course/books.py
```python
class Book:
def __init__(self, title, author):
self.title = title
self.author = author
def __str__(self):
return '{} by {}'.format(self.title, self.author)
class Bookcase:
def __init__(self, books = None):
self.books = books
#classsmethod's Don't take self as their first argument
#But instead take the class that they are being called on
@classmethod
def create_bookcase(cls, book_list):
books = []
for title, author in book_list:
books.append(Book(title, author))
return cls(books)
```
#### File: jmaldon1/Python-Course/circle.py
```python
class Circle:
def __init__(self, diameter):
self.diameter = diameter
#properties can be called like attributes
@property
def radius(self):
return self.diameter / 2
#Allows you to change the radius property
#Cannot change a property without a setter
@radius.setter
def radius(self, radius):
self.diameter = radius * 2
small = Circle(10)
print(small.diameter) #10
print(small.radius) #20
#this is able to be changed because of the @radius.setter decorator
small.radius = 20
print(small.radius) #20
print(small.diameter) #40
```
#### File: jmaldon1/Python-Course/protected.py
```python
class Protected:
__name = "Security"
def __method(self):
return self.__name
```
#### File: Python-Course/rpg(MultipleSuperClasses)/characters.py
```python
class Character:
def __init__(self, name, **kwargs):
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
```
#### File: jmaldon1/Python-Course/twosums.py
```python
class Solution:
def twoSum(self, nums, target):
for n in nums:
print("n = {}".format(n))
indexN = nums.index(n)
print("indexN = {}".format(indexN))
for p in nums[1:]:
print("p = {}".format(p))
indexP = nums.index(p)
print("indexP = {}".format(indexP))
if indexP == 0:
continue
else:
if n + p == target:
num1 = nums.index(n)
num2 = nums.index(p)
return [num1,num2]
twoSum(0,[3, 3], 6)
```
#### File: jmaldon1/Python-Course/VariousLinkedListQuestions.py
```python
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def pushToFront(self, data):
new_node = Node(data)
new_node.next = self.head
self.head = new_node
def length(self):
cur = self.head
total = 0
while cur != None:
cur = cur.next
total += 1
return total
def pushAfterIndex(self, index, data):
new_node = Node(data)
cur = self.head
temp = cur
idx = 0
if index > self.length():
print("Index out of range")
return
elif self.length() == 0:
self.pushToFront(data)
return
while index != idx:
idx += 1
temp = cur.next
cur = cur.next
temp = temp.next
cur.next = new_node
cur.next.next = temp
def pushToEnd(self, data):
new_node = Node(data)
cur = self.head
while cur.next != None:
cur = cur.next
cur.next = new_node
cur.next.next = None
def deleteAtPos(self, index):
temp = self.head
cur = self.head
idx = 0
if index == 0:
self.head = temp.next
temp = None
return
elif index > self.length():
print("Index is out of range")
return
while idx != (index-1):
cur = cur.next
idx += 1
cur.next = cur.next.next
def reverse(self):
prev = None
cur = self.head
while cur != None:
next = cur.next
cur.next = prev
prev = cur
cur = next
self.head = prev
def rearrange(self):
slow = self.head
fast = slow.next
while fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
if self.length() == 1:
return self.head
secondhalf = slow.next
slow.next = None
firsthalf = self.head
self.head = secondhalf
self.reverse()
secondhalf = self.head
cur1 = firsthalf
next1 = cur1.next
cur2 = secondhalf
next2 = secondhalf.next
self.head = cur1
while next1 != None and next2 != None:
cur2.next = None
cur1.next = cur2
cur1 = cur1.next
cur1.next = next1
next1 = next1.next
cur1 = cur1.next
cur2 = next2
next2 = next2.next
if (self.length()) %2 == 0:
cur2.next = next1
cur1.next=cur2
def zigzag(self):
temp = Node(0)
temp.next = self.head
prev = temp
ptr1 = prev.next
flag = True #Flag = True means we are looking for A < B
while prev.next != None and prev.next.next != None:
if flag: #change A < B
if ptr1.data > ptr1.next.data:
ptr1 = prev.next
ptr2 = prev.next.next
prev.next = ptr2
ptr1.next = ptr2.next
ptr2.next = ptr1
prev = prev.next
flag = False
else:
prev = prev.next
ptr1 = ptr1.next
ptr2 = ptr2.next
flag = False
else: #change A > B
if ptr1.data < ptr1.next.data:
ptr1 = prev.next
ptr2 = prev.next.next
prev.next = ptr2
ptr1.next = ptr2.next
ptr2.next = ptr1
prev = prev.next
flag = True
else:
prev = prev.next
ptr1 = ptr1.next
ptr2 = ptr2.next
flag = True
self.head = temp.next
def moveAllOccurancesToEnd(self, k):
last = self.head
cur = self.head
prev = Node(0)
prev.next = self.head
while last.next != None:
last = last.next
temp = last
while cur != temp:
if cur.data == k:
prev.next = cur.next
cur.next = None
last.next = cur
last = last.next
if cur == self.head:
self.head = prev.next
cur = prev
prev = cur
cur = cur.next
def display(self):
cur = self.head
while cur != None:
print(cur.data)
cur = cur.next
print("length: {}".format(self.length()))
ll = LinkedList()
ll.pushToFront(10)
ll.pushToFront(3)
ll.pushToFront(6)
ll.pushToFront(7)
ll.pushToFront(6)
ll.pushToFront(6)
ll.display()
#ll.pushAfterIndex(1,7)
#ll.display()
#ll.pushToEnd(9)
#ll.display()
#ll.deleteAtPos(2)
#ll.display()
#ll.rearrange()
ll.moveAllOccurancesToEnd(6)
ll.display()
``` |
{
"source": "JMAlego/EMPR-Individual-Project",
"score": 3
} |
#### File: EMPR-Individual-Project/GroupCode/monitor_interface.py
```python
from threading import Semaphore
import sys, os
import serial
class MonitorInterface(object):
inst_index = 0
def __init__(self, tty_location="/dev/ttyACM0"):
try:
self.tty_handle = serial.Serial(tty_location, baudrate=230400, parity=serial.PARITY_NONE)
#self.tty_handle = os.open(tty_location, os.O_RDWR)
except IOError:
raise Exception("Unable to open TTY")
self.buffer = ""
self.packets = []
self.running = False
self.buffer_semaphore = Semaphore(0)
self.inst_ind = self.inst_index
self.inst_index += 1
def run(self):
self.running = True
while self.running:
#self.buffer += os.read(self.tty_handle, 16).decode("ascii").replace("\n","").replace("\r","").replace("\0","")
self.buffer += self.tty_handle.readline().decode("ascii", "ignore").replace("\n","").replace("\r","").replace("\0","")
exclamation_index = self.buffer.index("!") if "!" in self.buffer else -1
if(exclamation_index > 0):
self.buffer = self.buffer[exclamation_index:]
exclamation_count = self.buffer.count("!")
while(exclamation_count > 1):
second_exclamation_index = self.buffer[1:].index("!") + 1
packet_data = self.buffer[1:second_exclamation_index][:1024]
self.buffer = self.buffer[second_exclamation_index:]
last_value = 0
packet_len = len(packet_data)
packet = []
read_count = 0
success = True
while packet_len > 0:
read_count += 1
if read_count > 64:
success = False
break
if packet_data[0] == "X":
try:
run_length = int(packet_data[1:3], 16)
except Exception:
exclamation_count = self.buffer.count("!")
continue
packet_data = packet_data[3:]
packet += [last_value]*run_length
packet_len -= 3
else:
try:
slot_value = int(packet_data[:2], 16)
except Exception:
exclamation_count = self.buffer.count("!")
continue
packet.append(slot_value)
last_value = slot_value
packet_data = packet_data[2:]
packet_len -= 2
if len(packet) != 512:
exclamation_count = self.buffer.count("!")
continue
if success:
self.packets.append(packet)
self.buffer_semaphore.release()
exclamation_count = self.buffer.count("!")
self.buffer = ""
def stop(self):
self.running = False
self.buffer_semaphore.release()
def clean(self):
self.buffer = ""
self.packets = []
if __name__ == "__main__":
print("This module will not function properly if run, please run the core display file instead.")
monitor_test = MonitorInterface()
monitor_test.run()
``` |
{
"source": "JMAlego/Mayfair",
"score": 4
} |
#### File: JMAlego/Mayfair/mayfair.py
```python
from itertools import product
from string import ascii_uppercase as alphabet
from abc import ABC, abstractmethod
from typing import Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
VariableReference = int
class Variable:
"""A variable."""
_INDEX = 0
def __init__(self, pretty_name: Optional[str] = None) -> None:
"""Initialise variable."""
self._values: Set[int] = set()
if pretty_name is None:
self._pretty_name = "Var{}".format(Variable._INDEX)
Variable._INDEX += 1
else:
self._pretty_name = pretty_name
@staticmethod
def from_range(domain_end, domain_start=1, pretty_name: Optional[str] = None) -> "Variable":
"""Create a variable with a range based domain."""
new_domain = Variable(pretty_name=pretty_name)
new_domain._values = set(range(domain_start, domain_end + 1))
return new_domain
def copy(self) -> "Variable":
"""Create a copy of the variable."""
new_domain = Variable(pretty_name=self._pretty_name)
new_domain._values = set(self._values)
return new_domain
def prune(self, values: Set[int]):
"""Prune specified values."""
self._values.difference_update(values)
def assign(self, value: int):
"""Prune specified values."""
self._values = set((value, ))
@property
def values(self) -> Set[int]:
"""Get the values which are in the variable's domain."""
return self._values
def __contains__(self, other) -> bool:
"""Implement for "in" operator."""
return other in self._values
def pretty(self) -> str:
"""Pretty print the variable."""
return self._pretty_name
class VariableDomain:
"""Variable domain."""
def __init__(self) -> None:
"""Initialise domain."""
self.variables: List[Variable] = []
@property
def variable_references(self) -> Set[VariableReference]:
"""Get all variable references in this domain."""
return set(range(len(self.variables)))
def from_name(self, name: str) -> VariableReference:
"""Get a variable by name in the domain."""
index = 0
for item in self.variables:
if item.pretty() == name:
return index
index += 1
raise IndexError("No variable by that name")
@staticmethod
def from_range(domain_count, domain_end, domain_start=1, alpha_names=False) -> "VariableDomain":
"""Create a domain from a range and variable count."""
new_domains_container = VariableDomain()
for i in range(domain_count):
pretty_name = None
if alpha_names:
pretty_name = alphabet[i % len(alphabet)] * (1 + i // len(alphabet))
new_domains_container.variables.append(
Variable.from_range(domain_end, domain_start, pretty_name=pretty_name))
return new_domains_container
def copy(self) -> "VariableDomain":
"""Create a copy of the domain."""
new_domains_container = VariableDomain()
new_domains_container.variables = [variable.copy() for variable in self.variables]
return new_domains_container
def get_variable(self, v: VariableReference) -> Variable:
"""Get a variable from the domain."""
return self.variables[v]
def __getitem__(self, key) -> Variable:
"""Implement indexing to get variable from the domain."""
return self.get_variable(key)
class UnaryConstraint(ABC):
"""A constraint on one variable."""
def __hash__(self) -> int:
"""Hash for a constraint."""
return hash(self.__class__.__module__ + self.__class__.__name__ + "|" + str(self.x) + "|" +
str(self.v))
def __init__(self, x: VariableReference, v: int) -> None:
"""Initialise constraints."""
self.x = x
self.v = v
def __contains__(self, other) -> bool:
"""Check whether this constraint is affected by the provided variable(s)."""
if isinstance(other, set):
return self.x in other
return other == self.x
@abstractmethod
def pretty(self, vd: Optional[VariableDomain] = None) -> str:
"""Pretty print the constraint."""
@abstractmethod
def revise(self, vd: VariableDomain) -> bool:
"""Return the revised domain if a revision was made."""
class BinaryConstraint(ABC):
"""A constraint on two variables."""
def __hash__(self) -> int:
"""Hash for a constraint."""
return hash(self.__class__.__module__ + self.__class__.__name__ + "|" + str(self.x) + "|" +
str(self.y))
def __init__(self, x: VariableReference, y: VariableReference) -> None:
"""Initialise constraints."""
self.x = x
self.y = y
def _revise_condition(self, vd, condition: Callable[[VariableReference, VariableReference],
bool]):
x, y = self.pair
x_values = vd[x].values
y_values = vd[y].values
unsupported_values: Set[int] = set()
for x_value in x_values:
supported = False
for y_value in y_values:
if condition(x_value, y_value):
supported = True
break
if not supported:
unsupported_values.add(x_value)
vd[x].prune(unsupported_values)
return bool(vd[x].values)
@abstractmethod
def revise(self, vd: VariableDomain) -> bool:
"""Return the revised domain if a revision was made."""
def __contains__(self, other) -> bool:
"""Check whether this constraint is affected by the provided variable(s)."""
if isinstance(other, set):
return self.x in other or self.y in other
return other in (self.x, self.y)
@property
def pair(self) -> Tuple[VariableReference, VariableReference]:
"""Get x and y as a tuple."""
return (self.x, self.y)
@abstractmethod
def pretty(self, vd: Optional[VariableDomain] = None) -> str:
"""Pretty print the constraint."""
class AdjacencyConstraint(BinaryConstraint):
"""Constraint that two variables are not adjacent."""
def revise(self, vd: VariableDomain) -> bool:
"""Revise, returning false if x is empty."""
if len(vd[self.y].values) > 4:
return True
return self._revise_condition(vd, lambda x, y: abs(x - y) > 1)
def pretty(self, vd: Optional[VariableDomain] = None) -> str:
"""Pretty print the constraint."""
if vd is not None:
return "|{} - {}| > 1".format(vd[self.x].pretty(), vd[self.y].pretty())
return "|x - y| > 1"
class NotEqualConstraint(BinaryConstraint):
"""Constraint that two variables are not equal."""
def revise(self, vd: VariableDomain) -> bool:
"""Return the revised domain if a revision was made."""
# We could use the generic constraint:
# return self._revise_condition(vd, lambda x, y: x != y)
# but we can do better...
x, y = self.pair
if not vd[x].values or not vd[y].values:
return False
if not len(vd[y].values) > 1:
vd[x].values.difference_update(vd[y].values)
return bool(vd[x].values)
def pretty(self, vd: Optional[VariableDomain] = None) -> str:
"""Pretty print the constraint."""
if vd is not None:
return "{} != {}".format(vd[self.x].pretty(), vd[self.y].pretty())
return "x != y"
class GenericUnaryConstraint(UnaryConstraint):
"""A constraint on one variable."""
def __init__(self,
x: VariableReference,
v: int,
constraint_condition: Callable[[VariableReference, int], bool],
operator_format: str = "{} ⊙ {}") -> None:
"""Initialise constraints."""
super().__init__(x, v)
self._condition = constraint_condition
self._operator_format = operator_format
def revise(self, vd: VariableDomain) -> bool:
"""Return the revised domain if a revision was made."""
values_to_prune: Set[int] = set()
for value in vd[self.x].values:
if not self._condition(value, self.v):
values_to_prune.add(value)
vd[self.x].prune(values_to_prune)
return bool(vd[self.x].values)
def pretty(self, vd: Optional[VariableDomain] = None) -> str:
"""Pretty print the constraint."""
if vd is not None:
return self._operator_format.format(vd[self.x].pretty(), self.v)
return self._operator_format.format("x", self.v)
class GenericBinaryConstraint(BinaryConstraint):
"""Constraint that two variables are not equal."""
def __init__(self,
x: VariableReference,
y: VariableReference,
constraint_condition: Callable[[VariableReference, VariableReference], bool],
operator_format: str = "{} ⊙ {}") -> None:
"""Initialise constraints."""
super().__init__(x, y)
self._condition = constraint_condition
self._operator_format = operator_format
def revise(self, vd: VariableDomain) -> bool:
"""Return the revised domain if a revision was made."""
return self._revise_condition(vd, self._condition)
def pretty(self, vd: Optional[VariableDomain] = None) -> str:
"""Pretty print the constraint."""
if vd is not None:
return self._operator_format.format(vd[self.x].pretty(), vd[self.y].pretty())
return self._operator_format.format("x", "y")
def bidirectional(constraint: BinaryConstraint) -> Tuple[BinaryConstraint, BinaryConstraint]:
"""Take a single constraint and return the constraint and it's inverse."""
return constraint, constraint.__class__(constraint.y, constraint.x)
Constraint = Union[BinaryConstraint, UnaryConstraint]
class Constraints:
"""Constraints container."""
def __init__(self) -> None:
"""Initialise constraints container."""
self.constraints: Set[Constraint] = set()
self._dict_constraints: Dict[Tuple[VariableReference, ...], Set[BinaryConstraint]] = dict()
def add_constraint(self, constraint: Constraint) -> None:
"""Add a constraint."""
if isinstance(constraint, BinaryConstraint):
if constraint.pair not in self._dict_constraints:
self._dict_constraints[constraint.pair] = set()
self._dict_constraints[constraint.pair].add(constraint)
self.constraints.add(constraint)
def add_constraints(self, constraints: Iterable[Constraint]) -> None:
"""Add a constraint."""
for constraint in constraints:
self.add_constraint(constraint)
def relevant_constraints(self, variables: Set[int]) -> Set[Constraint]:
"""Get constraints which mention specified variables."""
result: Set[Constraint] = set()
for constraint in self.constraints:
if variables in constraint:
result.add(constraint)
return result
def unary_constraints(self) -> Set[UnaryConstraint]:
"""Get all unary constraints."""
constraints: Set[UnaryConstraint] = set()
for constraint in self.constraints:
if isinstance(constraint, UnaryConstraint):
constraints.add(constraint)
return constraints
def arc_constraints(self, x, y) -> Set[BinaryConstraint]:
"""Get all constraints on an arc."""
if (x, y) in self._dict_constraints:
return self._dict_constraints[(x, y)]
return set()
def AllDifferent(*variables) -> Set[BinaryConstraint]:
"""Enforce that all the specified variables are different."""
constraints: Set[BinaryConstraint] = set()
for x, y in product(variables, variables):
if x == y:
continue
constraints.add(NotEqualConstraint(x, y))
return constraints
class ForwardChecker:
"""Forward checker."""
def __init__(self, constraints: Constraints, debug=False) -> None:
"""Initialise forward checker."""
self.constraints = constraints
self.debug = debug
def forward_check(self,
vd: VariableDomain,
variable: VariableReference = 0) -> Optional[VariableDomain]:
"""Run forward checker on a domain starting at a variable."""
if self.debug:
print(" " * variable, "-> Forward checking at depth {}".format(variable))
for unary_constraint in self.constraints.unary_constraints():
unary_constraint.revise(vd)
max_depth = len(vd.variables)
for value in vd[variable].values:
if self.debug:
print(" " * variable, " = Trying variable assignment", vd[variable].pretty(),
"=", value)
working_vd = vd.copy()
working_vd[variable].assign(value)
consistent = True
for other_variable in range(variable + 1, max_depth):
if not consistent:
break
for constraint in self.constraints.arc_constraints(other_variable, variable):
consistent = consistent and constraint.revise(working_vd)
if self.debug:
print(" " * variable, " Constraint", constraint.pretty(working_vd),
"application with", working_vd[other_variable].pretty(), "=",
working_vd[other_variable].values, "and",
working_vd[variable].pretty(), "=", working_vd[variable].values,
"gives consistent =", consistent)
if not consistent:
break
if consistent:
if variable + 1 == max_depth:
if self.debug:
print(" " * variable, "<- Solution found")
return working_vd
else:
result = self.forward_check(working_vd, variable + 1)
if result:
return result
if self.debug:
print(" " * variable, "<- No assignments found")
return None
def _test():
"""Run an example using the Crystal Maze problem."""
constraints = Constraints()
domain = VariableDomain.from_range(domain_count=8,
domain_start=1,
domain_end=8,
alpha_names=True)
connected = {
(domain.from_name("A"), domain.from_name("B")),
(domain.from_name("A"), domain.from_name("C")),
(domain.from_name("A"), domain.from_name("D")),
(domain.from_name("B"), domain.from_name("C")),
(domain.from_name("D"), domain.from_name("C")),
(domain.from_name("H"), domain.from_name("E")),
(domain.from_name("H"), domain.from_name("F")),
(domain.from_name("H"), domain.from_name("G")),
(domain.from_name("E"), domain.from_name("F")),
(domain.from_name("G"), domain.from_name("F")),
(domain.from_name("B"), domain.from_name("E")),
(domain.from_name("C"), domain.from_name("F")),
(domain.from_name("D"), domain.from_name("G")),
(domain.from_name("B"), domain.from_name("F")),
(domain.from_name("E"), domain.from_name("C")),
(domain.from_name("C"), domain.from_name("G")),
(domain.from_name("D"), domain.from_name("F")),
}
for connected_a, connected_b in connected:
constraints.add_constraints(bidirectional(AdjacencyConstraint(connected_a, connected_b)))
constraints.add_constraints(AllDifferent(*domain.variable_references))
solution = ForwardChecker(constraints).forward_check(domain)
if solution:
for variable in solution.variables:
print("{} = {}".format(variable.pretty(), next(variable.values.__iter__())))
if __name__ == "__main__":
_test()
``` |
{
"source": "JMAlego/NAVTool",
"score": 3
} |
#### File: NAVTool/protocols/__init__.py
```python
from os import path, listdir, getcwd, chdir
from importlib import import_module
PROTOCOLS = set(
map(
lambda x: path.splitext(x)[0],
filter(lambda x: not x.startswith("_") and path.splitext(x)[-1] == ".py",
listdir(path.abspath(path.dirname(__file__))))))
_rest, PROTOCOLS_PACKAGE = path.split(path.abspath(path.dirname(__file__)))
_, APP_PACKAGE = path.split(_rest)
FULL_PACKAGE = "{}.{}".format(APP_PACKAGE, PROTOCOLS_PACKAGE)
def get_protocols():
"""Get all protocols."""
protocols = {}
for protocol in PROTOCOLS:
protocols[protocol] = import_module("." + protocol, FULL_PACKAGE)
return protocols
``` |
{
"source": "JMAlego/pi_puck_ros_packages",
"score": 3
} |
#### File: src/pi_puck_interoperability/range_merger.py
```python
from math import radians
from functools import partial
# ROS imports
import rospy
from sensor_msgs.msg import Range, LaserScan
SENSOR_PREFIX = "navigation/proximity/"
RANGE_SENSORS = {
"short_range_ir/4": radians(-135),
"long_range_ir/4": radians(-135),
"short_range_ir/5": radians(-90),
"short_range_ir/6": radians(-45),
"long_range_ir/5": radians(-45),
"short_range_ir/7": radians(-10),
"long_range_ir/0": radians(0),
"short_range_ir/0": radians(10),
"long_range_ir/1": radians(45),
"short_range_ir/1": radians(45),
"short_range_ir/2": radians(90),
"long_range_ir/2": radians(135),
"short_range_ir/3": radians(135),
}
SCAN_START = radians(-135)
SCAN_END = radians(135)
SCAN_STEP = radians(2.5)
RELEVANCE_DISTANCE = radians(20)
EFFECT_SMOOTHING = 1.5
INF = float("inf")
MAX_RANGE = 1.35
MIN_RANGE = 0
REFERENCE_FRAME_ID = "scan_base"
ROBOT_RADIUS = 0.035
class PiPuckRangeMerger(object):
"""ROS Node to convert ranges to comprehensive laser scans."""
def __init__(self):
"""Initialise node."""
rospy.init_node("range_merger")
self._merged_topic = rospy.Publisher("range_merger/scan", LaserScan, queue_size=10)
robot_root = rospy.get_param("~robot_root", rospy.get_namespace())
tf_prefix_key = rospy.search_param("tf_prefix")
if tf_prefix_key:
tf_prefix = rospy.get_param(tf_prefix_key, None)
else:
tf_prefix = None
if tf_prefix is not None and not tf_prefix.endswith("/"):
tf_prefix += "/"
self._tf_reference_frame = str(rospy.get_param("~reference_frame", REFERENCE_FRAME_ID))
if tf_prefix:
self._tf_reference_frame = tf_prefix + self._tf_reference_frame
self._rate = rospy.Rate(rospy.get_param('~rate', 1))
self._sensor_last_values = {key: INF for key in RANGE_SENSORS}
self._latest_message = rospy.Time.now()
self._scan_start = float(rospy.get_param("~scan_start", SCAN_START))
self._scan_end = float(rospy.get_param("~scan_end", SCAN_END))
self._scan_step = float(rospy.get_param("~scan_step", SCAN_STEP))
self._scan_steps = int((self._scan_end - self._scan_start) / self._scan_step)
self._scan_max_influence = float(rospy.get_param("~scan_max_influence", RELEVANCE_DISTANCE))
self._scan_influence_smoothing = float(
rospy.get_param("~scan_influence_smoothing", EFFECT_SMOOTHING))
self._scan_max_range = float(rospy.get_param("~scan_max_range", MAX_RANGE))
self._scan_min_range = float(rospy.get_param("~scan_min_range", MIN_RANGE))
self._scan_robot_radius = float(rospy.get_param("~scan_robot_radius", ROBOT_RADIUS))
for sensor in RANGE_SENSORS:
rospy.Subscriber(robot_root + SENSOR_PREFIX + sensor, Range,
partial(self.range_handler, sensor=sensor))
self._running = True
def range_handler(self, data, sensor):
"""Handle new range data."""
self._latest_message = data.header.stamp
self._sensor_last_values[sensor] = data.range
def calculate_reading(self, angle):
"""Calculate a combined reading for a point in the pseudo laser scan."""
relevant_sensors = (key for key, value in RANGE_SENSORS.items()
if abs(value - angle) < self._scan_max_influence)
effect_percents = 0
negative_infs = 0
positive_infs = 0
cumulative_value = 0
for sensor in relevant_sensors:
sensor_value = self._sensor_last_values[sensor]
if sensor_value == INF:
positive_infs += 1
elif sensor_value == -INF:
negative_infs += 1
else:
sensor_angle = RANGE_SENSORS[sensor]
effect_percent = 1 - (abs(sensor_angle - angle) / self._scan_max_influence)
effect_percent = effect_percent**self._scan_influence_smoothing
cumulative_value += sensor_value * effect_percent
effect_percents += effect_percent
if effect_percents == 0:
if negative_infs > positive_infs:
return -INF
return INF
final_value = cumulative_value / effect_percents
if final_value > self._scan_max_range:
return INF
if final_value < self._scan_min_range:
return -INF
return final_value + self._scan_robot_radius
def publish_laser_scan(self):
"""Publish range as laser scan."""
if not self._running:
return
laser_scan_message = LaserScan()
laser_scan_message.range_max = self._scan_max_range + self._scan_robot_radius
laser_scan_message.range_min = self._scan_min_range + self._scan_robot_radius
laser_scan_message.angle_min = self._scan_start
laser_scan_message.angle_max = self._scan_end
laser_scan_message.angle_increment = self._scan_step
# Laser scans are counter clockwise
laser_scan_message.ranges = [
self.calculate_reading(step * self._scan_step + self._scan_start)
for step in reversed(range(self._scan_steps + 1))
]
laser_scan_message.header.frame_id = self._tf_reference_frame
laser_scan_message.header.stamp = self._latest_message
self._merged_topic.publish(laser_scan_message)
def run(self):
"""ROS Node server."""
while not rospy.is_shutdown():
self.publish_laser_scan()
self._rate.sleep()
self._running = False
if __name__ == "__main__":
PiPuckRangeMerger().run()
```
#### File: src/pi_puck_examples/power_meter.py
```python
from functools import partial
# ROS imports
import rospy
from sensor_msgs.msg import Image as ImageMessage, BatteryState
# Standard imports
from PIL import Image, ImageDraw, ImageFont
OLED_WIDTH = 128
OLED_HEIGHT = 32
FONT_PADDING = 2
FONT_SIZE = 12
FONT = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono-Bold.ttf", 12)
def update_power_meter(data, publisher):
"""Update the power meter with battery reading."""
battery_percent = float(data.percentage)
image = Image.new("1", (OLED_WIDTH, OLED_HEIGHT))
draw = ImageDraw.Draw(image)
draw.rectangle([(0, OLED_HEIGHT // 2), (int(OLED_WIDTH * battery_percent), OLED_HEIGHT)],
fill=0xff)
percent_text = str(int(battery_percent * 100))
status_text = "Unknown"
if data.power_supply_status == BatteryState.POWER_SUPPLY_STATUS_FULL:
status_text = "Charged"
elif data.power_supply_status == BatteryState.POWER_SUPPLY_STATUS_CHARGING:
status_text = "Charging"
percent_text = "??"
elif data.power_supply_status == BatteryState.POWER_SUPPLY_STATUS_DISCHARGING:
status_text = "Discharging"
elif data.power_supply_status == BatteryState.POWER_SUPPLY_STATUS_NOT_CHARGING:
status_text = "Slow Charging"
percent_text = "??"
draw.text((FONT_PADDING, FONT_PADDING), "Battery: " + percent_text + "%", font=FONT, fill=0xff)
draw.text((FONT_PADDING, OLED_HEIGHT - 2*FONT_PADDING - FONT_SIZE),
status_text,
font=FONT,
fill=0x00)
image_message = ImageMessage()
image_message.data = image.convert("L").tobytes()
image_message.width = image.width
image_message.height = image.height
image_message.step = image.width
image_message.encoding = "mono8"
publisher.publish(image_message)
def main():
"""Entry point function."""
rospy.init_node("power_meter")
image_publisher = rospy.Publisher("display/oled/image", ImageMessage, queue_size=10)
rospy.Subscriber("power/battery", BatteryState,
partial(update_power_meter, publisher=image_publisher))
rospy.spin()
if __name__ == "__main__":
main()
``` |
{
"source": "JMAlego/pyfio",
"score": 3
} |
#### File: JMAlego/pyfio/ai.py
```python
import random, sys
from multiprocessing import Pool
def otherPlayer(player):
return 1 + player % 2
class RandomAI:
def pickMove(self, board, player):
moves = board.getMoves(player)
if moves == {}: return None
return random.choice(moves.values())
class WeightedAI:
weighting = [ [2.0, 1.5, 1.5, 1.5, 1.5, 2.0],
[1.5, 0.7, 0.7, 0.7, 0.7, 1.5],
[1.5, 0.7, 1.0, 1.0, 0.7, 1.5],
[1.5, 0.7, 1.0, 1.0, 0.7, 1.5],
[1.5, 0.7, 0.7, 0.7, 0.7, 1.5],
[2.0, 1.5, 1.5, 1.5, 1.5, 2.0] ]
def pickMove(self, board, player):
moves = board.getMoves(player)
if moves == {}: return None
best_move = (0, None)
for piece, move in moves.items():
x = piece[0]
y = piece[1]
score = self.weighting[x][y] * move.getScore()
if best_move[0] < score:
best_move = (score, move)
elif best_move[0] == score and bool(random.getrandbits(1)):
best_move = (score, move)
elif best_move[0] == 0:
best_move = (score, move)
return best_move[1]
class WeightedV2AI:
weighting = [ [ 4.0, -0.7, 3.3, 3.3, -0.7, 4.0],
[-0.7, -0.7, 0.2, 0.2, -0.7, -0.7],
[ 3.3, 0.2, 1.0, 1.0, 0.2, 3.3],
[ 3.3, 0.2, 1.0, 1.0, 0.2, 3.3],
[-0.7, -0.7, 0.2, 0.2, -0.7, -0.7],
[ 4.0, -0.7, 3.3, 3.3, -0.7, 4.0] ]
def pickMove(self, board, player):
moves = board.getMoves(player)
if moves == {}: return None
best_move = (0, None)
for piece, move in moves.items():
x = piece[0]
y = piece[1]
score = self.weighting[x][y] * move.getScore()
if best_move[0] < score:
best_move = (score, move)
elif best_move[0] == score and bool(random.getrandbits(1)):
best_move = (score, move)
elif best_move[0] == 0:
best_move = (score, move)
return best_move[1]
class RecursiveAI:
def __init__(self, number_of_moves = 2, max_depth = 6):
self.number_of_moves = number_of_moves
self.max_depth = max_depth
def recur(self, board, player, depth = 0, number_of_moves = 4, max_depth = 3):
if max_depth == depth:
return [board]
moves = board.getMoves(player)
if moves == {}: return None
scored_moves = {}
for key, move in moves.items():
score = move.getScore()
if not score in scored_moves.keys():
scored_moves[score] = []
scored_moves[score].append(move)
best_moves = []
for score in sorted(scored_moves.keys(), reverse = True):
for move in scored_moves[score]:
if len(best_moves) < number_of_moves:
best_moves.append(move)
else:
break
if len(best_moves) >= number_of_moves:
break
player = otherPlayer(player)
results = []
for move in best_moves:
new_board = board.getCopy()
new_board.applyMove(move)
if depth == 0:
result = self.recur(new_board, player, depth+1, number_of_moves, max_depth)
if result != None and result != []:
for item in result:
if item != None:
results.append((move, item))
else:
result = self.recur(new_board, player, depth+1, number_of_moves, max_depth)
if result != None and result != []:
for item in result:
if item != None:
results.append(item)
return results
def pickMove(self, board, player):
moves = board.getMoves(player)
if moves == {}: return None
elif len(moves) == 1:
return moves.values()[0]
moves = self.recur(board.getCopy(), player, number_of_moves = self.number_of_moves, max_depth = self.max_depth)
if moves == [] or moves == {} or moves == None:
ai = WeightedV2AI()
return ai.pickMove(board, player)
best_move = (0, None)
for move, pos_board in moves:
score = pos_board.getScore(player)
if best_move[0] < score:
best_move = (score, move)
elif best_move[0] == score and bool(random.getrandbits(1)):
best_move = (score, move)
elif best_move[0] == 0:
best_move = (score, move)
return best_move[1]
class WeightedRecursiveAI:
weighting = [ [ 4.0, -0.7, 3.3, 3.3, -0.7, 4.0],
[-0.7, -0.7, 0.2, 0.2, -0.7, -0.7],
[ 3.3, 0.2, 1.0, 1.0, 0.2, 3.3],
[ 3.3, 0.2, 1.0, 1.0, 0.2, 3.3],
[-0.7, -0.7, 0.2, 0.2, -0.7, -0.7],
[ 4.0, -0.7, 3.3, 3.3, -0.7, 4.0] ]
def __init__(self, number_of_moves = 2, max_depth = 6):
self.number_of_moves = number_of_moves
self.max_depth = max_depth
def recur(self, board, player, depth = 0, number_of_moves = 4, max_depth = 3):
if max_depth == depth:
return [board]
moves = board.getMoves(player)
if moves == {}: return None
scored_moves = {}
for key, move in moves.items():
score = move.getWeightedScore(self.weighting)
if not score in scored_moves.keys():
scored_moves[score] = []
scored_moves[score].append(move)
best_moves = []
for score in sorted(scored_moves.keys(), reverse = True):
for move in scored_moves[score]:
if len(best_moves) < number_of_moves:
best_moves.append(move)
else:
break
if len(best_moves) >= number_of_moves:
break
player = otherPlayer(player)
results = []
for move in best_moves:
new_board = board.getCopy()
new_board.applyMove(move)
if depth == 0:
result = self.recur(new_board, player, depth+1, number_of_moves, max_depth)
if result != None and result != []:
for item in result:
if item != None:
results.append((move, item))
else:
result = self.recur(new_board, player, depth+1, number_of_moves, max_depth)
if result != None and result != []:
for item in result:
if item != None:
results.append(item)
return results
def pickMove(self, board, player):
moves = board.getMoves(player)
if moves == {}: return None
elif len(moves) == 1:
return moves.values()[0]
moves = self.recur(board.getCopy(), player, number_of_moves = self.number_of_moves, max_depth = self.max_depth)
if moves == [] or moves == {} or moves == None:
ai = WeightedV2AI()
return ai.pickMove(board, player)
best_move = (0, None)
for move, pos_board in moves:
x, y = move.getPlacedPiece()
score = pos_board.getScore(player) * self.weighting[x][y]
if best_move[0] < score:
best_move = (score, move)
elif best_move[0] == score and bool(random.getrandbits(1)):
best_move = (score, move)
elif best_move[0] == 0:
best_move = (score, move)
return best_move[1]
```
#### File: JMAlego/pyfio/state.py
```python
import math, copy
PLAYER0 = BLANK = 0
PLAYER1 = PLAYERA = 1
PLAYER2 = PLAYERB = 2
INITIAL_STATES = [[[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,1,2,0,0],
[0,0,2,1,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0] ],
[[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,2,1,0,0],
[0,0,1,2,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0] ]]
def otherPlayer(player):
return 1 + player % 2
class Line:
start = None
end = None
middle = []
length = 0
full_line = False
def __init__(self):
self.start = None
self.end = None
self.middle = []
self.length = 0
self.full_line = False
def __str__(self):
string = "ln:["
first = True
for item in self.getFullLine():
string += ("" if first else ", ") + "(" + str(item[0]) + ", " + str(item[1]) + ")"
first = False
string += "]"
return string
__repr__ = __str__
def getFullLine(self):
if not self.isValid(): return []
full = []
full.append(self.start)
for item in self.middle: full.append(item)
full.append(self.end)
return full
def getWithoutStart(self):
if not self.isValid(): return []
full = []
for item in self.middle: full.append(item)
full.append(self.end)
return full
def setStartPoint(self, point):
self.start = point
self.length += 1
def setEndPoint(self, point):
self.end = point
self.length += 1
self.full_line = True
def addPoint(self, point):
self.middle.append(point)
self.length += 1
def isPointInLine(self, point):
return point in self.getFullLine()
def isPointStart(self, point):
return point == self.start
def isPointEnd(self, point):
return point == self.end
def isPointInMiddle(self, point):
return point in self.middle
def isValid(self):
return self.start != None and self.end != None and self.length > 2 and self.full_line == True and len(self.middle) > 0
class Move:
lines = []
player = BLANK
__placed_piece = None
def __init__(self):
self.lines = []
self.player = BLANK
self.__placed_piece = None
def __str__(self):
string = "mv:{"
first = True
for item in self.lines:
string += ("" if first else ", ") + str(item)
first = False
string += "}"
return string
__repr__ = __str__
def setPlayer(self, in_player):
self.player = in_player
def addLine(self, line):
self.lines.append(line)
def getLines(self):
return self.lines
def getPlacedPiece(self):
return self.lines[0].end
def getSquares(self):
squares = []
for line in self.lines:
for item in line.getFullLine():
if item not in squares:
squares.append(item)
return squares
def isValid(self):
end = self.lines[0].end
for line in self.lines:
if line.end != end:
return False
return True
def getScore(self):
score = 1
for line in self.lines:
score += (line.length - 2)
return score
def getWeightedScore(self, weighting_matrix):
squares_counted = []
score = 1
for line in self.lines:
for item in line.getWithoutStart():
if not item in squares_counted:
squares_counted.append(item)
score += weighting_matrix[item[0]][item[1]]
return score
class Board:
__state = []
__board_size = 0
__col_row_size = 0
__move_masks = [(0,1),(1,0),(0,-1),(-1,0),(1,1),(-1,-1),(1,-1),(-1,1)]
__move_cache = {}
__refresh_move_cache = True
__player_move_cache = BLANK
def __init__(self, board_size = 36, blank = False):
self.__state = []
self.__move_cache = {}
self.__refresh_move_cache = True
self.__player_move_cache = BLANK
self.__board_size = board_size
self.__col_row_size = int(math.sqrt(board_size));
for x in range(0, self.__col_row_size):
col = []
for y in range(0, self.__col_row_size):
col.append(0)
self.__state.append(col)
if not blank:
counter = 0
for offx in [-1,0]:
for offy in [-1,0]:
counter += 1
self.__state[int(self.__col_row_size/2)+offx][int(self.__col_row_size/2)+offy] = 4/(counter**2-5*counter+8)
#Use fancy maths to set the starting pieces
def getBoardSize(self):
return self.__board_size
def getColRowSize(self):
return self.__col_row_size
def getScore(self, player):
score = 0
for x in range(0, self.__col_row_size):
for y in range(0, self.__col_row_size):
score += player == self.__state[x][y]
return score
def getMoves(self, player, pattern = 0):
if self.__can_use_move_cache(player):
return self.move_cache
possible_moves = []
for x, y in self.patternForBoard():
if self.getSquare((x, y)) == player:
for mask in self.__move_masks:
line = Line()
line.setStartPoint((x, y))
valid_move = True
offset = 1
while True:
if self.getSquare((x+mask[0]*offset, y+mask[1]*offset)) == otherPlayer(player):
line.addPoint((x+mask[0]*offset, y+mask[1]*offset))
else:
break
offset += 1
if self.getSquare((x+mask[0]*offset, y+mask[1]*offset)) == BLANK:
line.setEndPoint((x+mask[0]*offset, y+mask[1]*offset))
if line.isValid():
possible_moves.append(line)
moves = {}
for line in possible_moves:
if not line.end in moves.keys():
moves[line.end] = Move()
moves[line.end].setPlayer(player)
moves[line.end].addLine(line)
self.__set_move_cache(moves, player)
return moves
def __set_move_cache(self, moves, player):
self.move_cache = moves
self.__refresh_move_cache = False
self.__player_move_cache = player
def __can_use_move_cache(self, player):
return not self.__refresh_move_cache and player == self.__player_move_cache
def getSquare(self, square):
x = square[0]
y = square[1]
if x >= 0 and x < len(self.__state) and y >= 0 and y < len(self.__state[x]):
return self.__state[x][y]
else:
return None
def setSquare(self, square, player):
x = square[0]
y = square[1]
if x >= 0 and x < len(self.__state) and y >= 0 and y < len(self.__state[x]) and player in [0,1,2]:
self.__state[x][y] = player
self.__refresh_move_cache = True
return True
else:
return False
def applyMove(self, move):
for line in move.lines:
if not self.applyLine(line, move.player):
return False
return True
def applyLine(self, line, player):
for square in line.getFullLine():
if not self.setSquare(square, player):
return False
return True
def isBoardFull(self):
for x, y in self.patternForBoard():
if self.getSquare((x, y)) == BLANK:
return False
return True
def canEitherPlayerMove(self, player):
player1 = self.getMoves(player)
player2 = self.getMoves(otherPlayer(player))
return player1 != {} or player2 != {}
def setBoard(self, board):
old_board = self.__state
self.__state = copy.deepcopy(board)
return old_board
def getBoard(self):
return copy.deepcopy(self.__state)
def getWinner(self):
p1_score = self.getScore(PLAYER1)
p2_score = self.getScore(PLAYER2)
if p1_score > p2_score:
return PLAYER1
elif p2_score > p1_score:
return PLAYER2
else:
return PLAYER0
def getCopy(self):
return copy.deepcopy(self)
def patternForBoard(self):
for y in range(0, self.__col_row_size):
for x in range(0, self.__col_row_size):
yield (x,y)
def __str__(self):
string = ""
first = True
for y in range(0, self.getColRowSize()):
if not first: string += "\r\n"
first = False
for x in range(0, self.getColRowSize()):
string += str(self.getSquare((x,y))) + " "
return string
__repr__ = __str__
``` |
{
"source": "jmaliksi/aesimplifier",
"score": 3
} |
#### File: aesimplifier/model/topic.py
```python
from collections import namedtuple
class Topic(object):
def __init__(self, title):
self.title = title
self.posts = []
self.post_ids = set()
def add_post(self, post):
if post['post_id'] in self.post_ids:
return
self.posts.append(Post(
poster=post['poster'],
content=post['content'],
post_id=post['post_id'],
))
self.post_ids.add(post['post_id'])
def get_sorted_posts(self):
return sorted(self.posts, key=lambda x: int(x.post_id[5:]))
Post = namedtuple(
'Post', [
'poster',
'content',
'post_id',
]
)
``` |
{
"source": "jmalinao19/Data-Engineer-NanoDegree",
"score": 3
} |
#### File: 2-AWS_DataWarehouse/P3_AWSredshift/create_tables.py
```python
import configparser
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def drop_tables(cur, conn):
"""
Drops all tables
@type cur -- object
@param cur -- cursor object
@type conn -- object
@param conn -- database connection object
"""
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
"""
Creates all tables
@type cur -- object
@param cur -- cursor object
@type conn -- object
@param conn -- database connection object
"""
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
"""
parses config file, connects to PG database, executes function that drops and creates tables on redshift
"""
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
```
#### File: P3_AWSredshift/setup/cluster_status.py
```python
from create_AWS_cluster import aws_client, aws_resource, parse_configFile
import configparser
import pandas as pd
import boto3
import json
import time
def persist_cluster_configInfo(Redshift, DWH_CLUSTER_IDENTIFIER):
"""
Writes the cluster endpoint and IAM ARN to the dwh.cfg config file
@type redshift -- client
@param redshift -- Redshift resource client
@type DWH_CLUSTER_IDENTIFIER -- string
@param DWH_CLUSTER_IDENTIFIER -- value from config file
@return -- None
"""
print('Cluster address and IamRoleARN is being written to the config file.')
cluster_properties = Redshift.describe_clusters(ClusterIdentifier = DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
config.set('CLUSTER','HOST',cluster_properties['Endpoint']['Address'])
config.set('IAM_ROLE','ARN',cluster_properties['IamRoles'][0]['IamRoleArn'])
with open('dwh.cfg','w+') as configFile:
config.write(configFile)
parse_configFile()
def get_cluster_status(redshift,DWH_CLUSTER_IDENTIFIER):
"""
Checks the Redshift cluster status, retuns True or False
@type redshift -- redshift
@param redshift -- AWS Redshift resource client
@type DWH_CLUSTER_IDENTIFIER -- string
@param DWH_CLUSTER_IDENTIFIER -- value from config file
@return -- boolean True or False
"""
cluster_properties = redshift.describe_clusters(ClusterIdentifier = DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
clusters_status = cluster_properties['ClusterStatus']
return clusters_status.lower()
def describe_clust(redshift,DWH_CLUSTER_IDENTIFIER):
return redshift.describe_clusters(ClusterIdentifier = DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
def prettyRedshiftProps(props):
pd.set_option('display.max_colwidth', -1)
keysToShow = ["ClusterIdentifier", "NodeType", "ClusterStatus", "MasterUsername", "DBName", "Endpoint", "NumberOfNodes", 'VpcId']
x = [(k, v) for k,v in props.items() if k in keysToShow]
return pd.DataFrame(data=x, columns=["Key", "Value"])
def open_redshift_port (ec2,redshift, DWH_CLUSTER_IDENTIFIER, DB_PORT):
"""
Redshift post on VPC security group is opened
@type ec2 -- clinet
@param ec2 -- AWS EC2 resource client
@type redshift -- client
@param redshift -- Redshift resource client
@return -- none
"""
myClusterProps = redshift.describe_clusters(ClusterIdentifier = DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
try:
vpc = ec2.Vpc(id=myClusterProps['VpcId'])
secur_group = list(vpc.security_groups.all())
print(secur_group)
defaultSg = secur_group[1]
defaultSg.authorize_ingress(
GroupName= defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(DB_PORT),
ToPort=int(DB_PORT)
)
except Exception as e:
print(e)
def main():
"""
Checks the Redshift cluster status and opens the Port
"""
# parse config file
configs=parse_configFile()
#get redshift client
redshift = aws_client('redshift','us-east-2',configs[0],configs[1])
# check if cluster was created
# add port thing, and create ec2 resource object to do so
if get_cluster_status(redshift,configs[10]):
print('Cluster is available:')
print(prettyRedshiftProps(describe_clust(redshift,configs[10])))
ec2 = aws_resource('ec2','us-east-2',configs[0],configs[1])
persist_cluster_configInfo(redshift,configs[10])
open_redshift_port(ec2,redshift,configs[10],configs[5])
print('Cluster is up and running')
else:
print('Cluster is not available yet')
if __name__ =='__main__':
main()
```
#### File: P3_AWSredshift/setup/delete_cluster.py
```python
from create_AWS_cluster import parse_configFile,
from cluster_status import get_cluster_status
def delete_cluster(redshift, DWH_CLUSTER_IDENTIFIER):
"""
Request a deletion for Redshift cluster
@type redshift --
@param redshift -- Redshift resource client
@type DWH_CLUSTER_IDENTIFIER -- string
@param DWH_CLUSTER_IDENTIFIER -- value from config file
@return -- None
"""
return redshift.delete_cluster(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER, SkipFinalClusterSnapshot = True)
def main():
configs = parse_configFile()
redshift = aws_client('redshift','us-east-2')
if get_cluster_status(redshift,configs[10]) == 'available':
print('Cluster is available and will begin the deletion process')
delete_cluster(redshift,configs[10])
print(get_cluster_status(redshift,configs[10]))
else:
print('Cannot Delete because Cluster is not available ')
if __name__ == '__main__':
main()
``` |
{
"source": "jmalles/oggm",
"score": 2
} |
#### File: oggm/core/inversion.py
```python
import logging
import warnings
# External libs
import numpy as np
from scipy.interpolate import griddata
from scipy import optimize
# Locals
from oggm import utils, cfg
from oggm import entity_task
from oggm.core.gis import gaussian_blur
from oggm.exceptions import InvalidParamsError, InvalidWorkflowError
# Module logger
log = logging.getLogger(__name__)
# arbitrary constant
MIN_WIDTH_FOR_INV = 10
@entity_task(log, writes=['inversion_input'])
def prepare_for_inversion(gdir, add_debug_var=False,
invert_with_rectangular=True,
invert_all_rectangular=False,
invert_with_trapezoid=True,
invert_all_trapezoid=False):
"""Prepares the data needed for the inversion.
Mostly the mass flux and slope angle, the rest (width, height) was already
computed. It is then stored in a list of dicts in order to be faster.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
# variables
fls = gdir.read_pickle('inversion_flowlines')
towrite = []
for fl in fls:
# Distance between two points
dx = fl.dx * gdir.grid.dx
# Widths
widths = fl.widths * gdir.grid.dx
# Heights
hgt = fl.surface_h
angle = -np.gradient(hgt, dx) # beware the minus sign
# Flux needs to be in [m3 s-1] (*ice* velocity * surface)
# fl.flux is given in kg m-2 yr-1, rho in kg m-3, so this should be it:
rho = cfg.PARAMS['ice_density']
flux = fl.flux * (gdir.grid.dx**2) / cfg.SEC_IN_YEAR / rho
# Clip flux to 0
if np.any(flux < -0.1):
log.info('(%s) has negative flux somewhere', gdir.rgi_id)
utils.clip_min(flux, 0, out=flux)
if np.sum(flux <= 0) > 1 and len(fls) == 1:
log.warning("More than one grid point has zero or "
"negative flux: this should not happen.")
if fl.flows_to is None and gdir.inversion_calving_rate == 0:
if not np.allclose(flux[-1], 0., atol=0.1):
# TODO: this test doesn't seem meaningful here
msg = ('({}) flux at terminus should be zero, but is: '
'{.4f} m3 ice s-1'.format(gdir.rgi_id, flux[-1]))
raise RuntimeError(msg)
# This contradicts the statement above which has been around for
# quite some time, for the reason that it is a quality check: per
# construction, the flux at the last grid point should be zero
# HOWEVER, it is also meaningful to have a non-zero ice thickness
# at the last grid point. Therefore, we add some artificial
# flux here (an alternative would be to pmute the flux on a
# staggered grid but I actually like the QC and its easier)
# note that this value will be ignored if one uses the filter
# task afterwards
flux[-1] = flux[-2] / 3 # this is totally arbitrary
if fl.flows_to is not None and flux[-1] <= 0:
# Same for tributaries
flux[-1] = flux[-2] / 3 # this is totally arbitrary
# Shape
is_rectangular = fl.is_rectangular
if not invert_with_rectangular:
is_rectangular[:] = False
if invert_all_rectangular:
is_rectangular[:] = True
# Trapezoid is new - might not be available
is_trapezoid = getattr(fl, 'is_trapezoid', None)
if is_trapezoid is None:
is_trapezoid = fl.is_rectangular * False
if not invert_with_trapezoid:
is_rectangular[:] = False
if invert_all_trapezoid:
is_trapezoid[:] = True
# Optimisation: we need to compute this term of a0 only once
flux_a0 = np.where(is_rectangular, 1, 1.5)
flux_a0 *= flux / widths
# Add to output
cl_dic = dict(dx=dx, flux_a0=flux_a0, width=widths,
slope_angle=angle, is_rectangular=is_rectangular,
is_trapezoid=is_trapezoid, flux=flux,
is_last=fl.flows_to is None, hgt=hgt,
invert_with_trapezoid=invert_with_trapezoid)
towrite.append(cl_dic)
# Write out
gdir.write_pickle(towrite, 'inversion_input')
def _inversion_poly(a3, a0):
"""Solve for degree 5 polynomial with coefficients a5=1, a3, a0."""
sols = np.roots([1., 0., a3, 0., 0., a0])
test = (np.isreal(sols)*np.greater(sols, [0]*len(sols)))
return sols[test][0].real
def _inversion_simple(a3, a0):
"""Solve for degree 5 polynomial with coefficients a5=1, a3=0., a0."""
return (-a0)**(1./5.)
def _compute_thick(a0s, a3, flux_a0, shape_factor, _inv_function):
"""Content of the original inner loop of the mass-conservation inversion.
Put here to avoid code duplication.
Parameters
----------
a0s
a3
flux_a0
shape_factor
_inv_function
Returns
-------
the thickness
"""
a0s = a0s / (shape_factor ** 3)
a3s = a3 / (shape_factor ** 3)
if np.any(~np.isfinite(a0s)):
raise RuntimeError('non-finite coefficients in the polynomial.')
# Solve the polynomials
try:
out_thick = np.zeros(len(a0s))
for i, (a0, a3, Q) in enumerate(zip(a0s, a3s, flux_a0)):
out_thick[i] = _inv_function(a3, a0) if Q > 0 else 0
except TypeError:
# Scalar
out_thick = _inv_function(a3, a0s) if flux_a0 > 0 else 0
if np.any(~np.isfinite(out_thick)):
raise RuntimeError('non-finite coefficients in the polynomial.')
return out_thick
def sia_thickness_via_optim(slope, width, flux, shape='rectangular',
glen_a=None, fs=None, t_lambda=None):
"""Compute the thickness numerically instead of analytically.
It's the only way that works for trapezoid shapes.
Parameters
----------
slope : -np.gradient(hgt, dx)
width : section width in m
flux : mass flux in m3 s-1
shape : 'rectangular', 'trapezoid' or 'parabolic'
glen_a : Glen A, defaults to PARAMS
fs : sliding, defaults to PARAMS
t_lambda: the trapezoid lambda, defaults to PARAMS
Returns
-------
the ice thickness (in m)
"""
if len(np.atleast_1d(slope)) > 1:
shape = utils.tolist(shape, len(slope))
t_lambda = utils.tolist(t_lambda, len(slope))
out = []
for sl, w, f, s, t in zip(slope, width, flux, shape, t_lambda):
out.append(sia_thickness_via_optim(sl, w, f, shape=s,
glen_a=glen_a, fs=fs,
t_lambda=t))
return np.asarray(out)
# Sanity
if flux <= 0:
return 0
if width <= MIN_WIDTH_FOR_INV:
return 0
if glen_a is None:
glen_a = cfg.PARAMS['inversion_glen_a']
if fs is None:
fs = cfg.PARAMS['inversion_fs']
if t_lambda is None:
t_lambda = cfg.PARAMS['trapezoid_lambdas']
if shape not in ['parabolic', 'rectangular', 'trapezoid']:
raise InvalidParamsError('shape must be `parabolic`, `trapezoid` '
'or `rectangular`, not: {}'.format(shape))
# Ice flow params
n = cfg.PARAMS['glen_n']
fd = 2 / (n+2) * glen_a
rho = cfg.PARAMS['ice_density']
rhogh = (rho * cfg.G * slope) ** n
# To avoid geometrical inconsistencies
max_h = width / t_lambda if shape == 'trapezoid' else 1e4
def to_minimize(h):
u = (h ** (n + 1)) * fd * rhogh + (h ** (n - 1)) * fs * rhogh
if shape == 'parabolic':
sect = 2./3. * width * h
elif shape == 'trapezoid':
w0m = width - t_lambda * h
sect = (width + w0m) / 2 * h
else:
sect = width * h
return sect * u - flux
out_h, r = optimize.brentq(to_minimize, 0, max_h, full_output=True)
return out_h
def sia_thickness_via_optim_tidewater(slope, width, flux, water_depth,
shape='rectangular',
glen_a=None, fs=None, t_lambda=None):
"""Compute the thickness numerically instead of analytically.
It's the only way that works for trapezoid shapes.
Parameters
----------
slope : -np.gradient(hgt, dx)
width : section width in m
flux : mass flux in m3 s-1
shape : 'rectangular', 'trapezoid' or 'parabolic'
glen_a : Glen A, defaults to PARAMS
fs : sliding, defaults to PARAMS
t_lambda: the trapezoid lambda, defaults to PARAMS
Returns
-------
the ice thickness (in m)
"""
if len(np.atleast_1d(slope)) > 1:
shape = utils.tolist(shape, len(slope))
t_lambda = utils.tolist(t_lambda, len(slope))
out = []
for sl, w, f, s, t in zip(slope, width, flux, shape, t_lambda):
out.append(sia_thickness_via_optim(sl, w, f, shape=s,
glen_a=glen_a, fs=fs,
t_lambda=t))
return np.asarray(out)
# Sanity
if flux <= 0:
return 0
if width <= 10:
return 0
if glen_a is None:
glen_a = cfg.PARAMS['inversion_glen_a']
if fs is None:
fs = cfg.PARAMS['inversion_fs']
if t_lambda is None:
t_lambda = cfg.PARAMS['trapezoid_lambdas']
if shape not in ['parabolic', 'rectangular', 'trapezoid']:
raise InvalidParamsError('shape must be `parabolic`, `trapezoid` '
'or `rectangular`, not: {}'.format(shape))
# Ice flow params
n = cfg.PARAMS['glen_n']
fd = 2 / (n+2) * glen_a
rho = cfg.PARAMS['ice_density']
rhogh = (rho * cfg.G * slope) ** n
# To avoid geometrical inconsistencies
max_h = width / t_lambda if shape == 'trapezoid' else 1e4
def to_minimize(h):
u = ((h ** (n + 1)) * fd * rhogh + ((h ** n) / (utils.clip_min(10, h -
(1028/rho) * water_depth))) * fs * rhogh)
if shape == 'parabolic':
sect = 2./3. * width * h
elif shape == 'trapezoid':
w0m = width - t_lambda * h
sect = (width + w0m) / 2 * h
else:
sect = width * h
return sect * u - flux
out_h, r = optimize.brentq(to_minimize, 0, max_h, full_output=True)
return out_h
def sia_thickness(slope, width, flux, shape='rectangular',
glen_a=None, fs=None, shape_factor=None):
"""Computes the ice thickness from mass-conservation.
This is a utility function tested against the true OGGM inversion
function. Useful for teaching and inversion with calving.
Parameters
----------
slope : -np.gradient(hgt, dx) (we don't clip for min slope!)
width : section width in m
flux : mass flux in m3 s-1
shape : 'rectangular' or 'parabolic'
glen_a : Glen A, defaults to PARAMS
fs : sliding, defaults to PARAMS
shape_factor: for lateral drag
Returns
-------
the ice thickness (in m)
"""
if glen_a is None:
glen_a = cfg.PARAMS['inversion_glen_a']
if fs is None:
fs = cfg.PARAMS['inversion_fs']
if shape not in ['parabolic', 'rectangular']:
raise InvalidParamsError('shape must be `parabolic` or `rectangular`, '
'not: {}'.format(shape))
_inv_function = _inversion_simple if fs == 0 else _inversion_poly
# Ice flow params
fd = 2. / (cfg.PARAMS['glen_n']+2) * glen_a
rho = cfg.PARAMS['ice_density']
# Convert the flux to m2 s-1 (averaged to represent the sections center)
flux_a0 = 1 if shape == 'rectangular' else 1.5
flux_a0 *= flux / width
# With numerically small widths this creates very high thicknesses
try:
flux_a0[width < MIN_WIDTH_FOR_INV] = 0
except TypeError:
if width < MIN_WIDTH_FOR_INV:
flux_a0 = 0
# Polynomial factors (a5 = 1)
a0 = - flux_a0 / ((rho * cfg.G * slope) ** 3 * fd)
a3 = fs / fd
# Inversion with shape factors?
sf_func = None
if shape_factor == 'Adhikari' or shape_factor == 'Nye':
sf_func = utils.shape_factor_adhikari
elif shape_factor == 'Huss':
sf_func = utils.shape_factor_huss
sf = np.ones(slope.shape) # Default shape factor is 1
if sf_func is not None:
# Start iteration for shape factor with first guess of 1
i = 0
sf_diff = np.ones(slope.shape)
# Some hard-coded factors here
sf_tol = 1e-2
max_sf_iter = 20
while i < max_sf_iter and np.any(sf_diff > sf_tol):
out_thick = _compute_thick(a0, a3, flux_a0, sf, _inv_function)
is_rectangular = np.repeat(shape == 'rectangular', len(width))
sf_diff[:] = sf[:]
sf = sf_func(width, out_thick, is_rectangular)
sf_diff = sf_diff - sf
i += 1
log.info('Shape factor {:s} used, took {:d} iterations for '
'convergence.'.format(shape_factor, i))
return _compute_thick(a0, a3, flux_a0, sf, _inv_function)
def sia_thickness_calving(slope, width, flux, water_depth, f_b, shape='rectangular',
glen_a=None, fs=None, shape_factor=None):
"""Computes the ice thickness from mass-conservation.
This is a utility function tested against the true OGGM inversion
function. Useful for teaching and inversion with calving.
Parameters
----------
slope : -np.gradient(hgt, dx) (we don't clip for min slope!)
width : section width in m
flux : mass flux in m3 s-1
shape : 'rectangular' or 'parabolic'
glen_a : Glen A, defaults to PARAMS
fs : sliding, defaults to PARAMS
shape_factor: for lateral drag
Returns
-------
the ice thickness (in m)
"""
if glen_a is None:
glen_a = cfg.PARAMS['inversion_glen_a']
if fs is None:
fs = cfg.PARAMS['inversion_fs']
if shape not in ['parabolic', 'rectangular']:
raise InvalidParamsError('shape must be `parabolic` or `rectangular`, '
'not: {}'.format(shape))
_inv_function = _inversion_simple if fs == 0 else _inversion_poly
# Ice flow params
fd = 2. / (cfg.PARAMS['glen_n']+2) * glen_a
rho = cfg.PARAMS['ice_density']
# Convert the flux to m2 s-1 (averaged to represent the sections center)
flux_a0 = 1 if shape == 'rectangular' else 1.5
flux_a0 *= flux / width
# With numerically small widths this creates very high thicknesses
try:
flux_a0[width < 10] = 0
except TypeError:
if width < 10:
flux_a0 = 0
# Polynomial factors (a5 = 1)
a0 = - flux_a0 / ((rho * cfg.G * slope) ** 3 * fd)
#a3 = fs / fd
#water_depth = utils.clip_max(0,-bed_h + water_level)
a3 = (fs / fd) * ((water_depth+f_b) / utils.clip_min(10,(water_depth+f_b) -
(1028/rho)*water_depth))
#a3 = fs / (fd * utils.clip_min(10, ((water_depth+f_b)-(1028/rho) * water_depth)))
#print(water_depth,f_b,a3)
# Inversion with shape factors?
sf_func = None
if shape_factor == 'Adhikari' or shape_factor == 'Nye':
sf_func = utils.shape_factor_adhikari
elif shape_factor == 'Huss':
sf_func = utils.shape_factor_huss
sf = np.ones(slope.shape) # Default shape factor is 1
if sf_func is not None:
# Start iteration for shape factor with first guess of 1
i = 0
sf_diff = np.ones(slope.shape)
# Some hard-coded factors here
sf_tol = 1e-2
max_sf_iter = 20
while i < max_sf_iter and np.any(sf_diff > sf_tol):
out_thick = _compute_thick(a0, a3, flux_a0, sf, _inv_function)
is_rectangular = np.repeat(shape == 'rectangular', len(width))
sf_diff[:] = sf[:]
sf = sf_func(width, out_thick, is_rectangular)
sf_diff = sf_diff - sf
i += 1
log.info('Shape factor {:s} used, took {:d} iterations for '
'convergence.'.format(shape_factor, i))
return _compute_thick(a0, a3, flux_a0, sf, _inv_function)
def find_sia_flux_from_thickness(slope, width, thick, glen_a=None, fs=None,
shape='rectangular'):
"""Find the ice flux produced by a given thickness and slope.
This can be done analytically but I'm lazy and use optimisation instead.
"""
def to_minimize(x):
h = sia_thickness(slope, width, x[0], glen_a=glen_a, fs=fs,
shape=shape)
return (thick - h)**2
out = optimize.minimize(to_minimize, [1], bounds=((0, 1e12),))
flux = out['x'][0]
# Sanity check
minimum = to_minimize([flux])
if minimum > 1:
warnings.warn('We did not find a proper flux for this thickness',
RuntimeWarning)
return flux
def _vol_below_water(surface_h, bed_h, bed_shape, thick, widths,
is_rectangular, is_trapezoid, fac, t_lambda,
dx, water_level):
bsl = (bed_h < water_level) & (thick > 0)
n_thick = np.copy(thick)
n_thick[~bsl] = 0
n_thick[bsl] = utils.clip_max(surface_h[bsl], water_level) - bed_h[bsl]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
n_w = np.sqrt(4 * n_thick / bed_shape)
n_w[is_rectangular] = widths[is_rectangular]
out = fac * n_thick * n_w * dx
# Trap
it = is_trapezoid
out[it] = (n_w[it] + n_w[it] - t_lambda*n_thick[it]) / 2*n_thick[it]*dx
return out
@entity_task(log, writes=['inversion_output'])
def mass_conservation_inversion(gdir, glen_a=None, fs=None, write=True,
filesuffix='', water_level=None,
t_lambda=None):
""" Compute the glacier thickness along the flowlines
More or less following Farinotti et al., (2009).
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
glen_a : float
glen's creep parameter A. Defaults to cfg.PARAMS.
fs : float
sliding parameter. Defaults to cfg.PARAMS.
write: bool
default behavior is to compute the thickness and write the
results in the pickle. Set to False in order to spare time
during calibration.
filesuffix : str
add a suffix to the output file
water_level : float
to compute volume below water level - adds an entry to the output dict
t_lambda : float
defining the angle of the trapezoid walls (see documentation). Defaults
to cfg.PARAMS.
"""
# Defaults
if glen_a is None:
glen_a = cfg.PARAMS['inversion_glen_a']
if fs is None:
fs = cfg.PARAMS['inversion_fs']
if t_lambda is None:
t_lambda = cfg.PARAMS['trapezoid_lambdas']
# Check input
_inv_function = _inversion_simple if fs == 0 else _inversion_poly
# Ice flow params
fd = 2. / (cfg.PARAMS['glen_n']+2) * glen_a
a3 = fs / fd
rho = cfg.PARAMS['ice_density']
# Inversion with shape factors?
sf_func = None
use_sf = cfg.PARAMS.get('use_shape_factor_for_inversion', None)
if use_sf == 'Adhikari' or use_sf == 'Nye':
sf_func = utils.shape_factor_adhikari
elif use_sf == 'Huss':
sf_func = utils.shape_factor_huss
# Clip the slope, in rad
min_slope = 'min_slope_ice_caps' if gdir.is_icecap else 'min_slope'
min_slope = np.deg2rad(cfg.PARAMS[min_slope])
out_volume = 0.
cls = gdir.read_pickle('inversion_input')
for cl in cls:
# Clip slope to avoid negative and small slopes
slope = cl['slope_angle']
slope = utils.clip_array(slope, min_slope, np.pi/2.)
# Glacier width
w = cl['width']
a0s = - cl['flux_a0'] / ((rho*cfg.G*slope)**3*fd)
sf = np.ones(slope.shape) # Default shape factor is 1
if sf_func is not None:
# Start iteration for shape factor with first guess of 1
i = 0
sf_diff = np.ones(slope.shape)
# Some hard-coded factors here
sf_tol = 1e-2
max_sf_iter = 20
while i < max_sf_iter and np.any(sf_diff > sf_tol):
out_thick = _compute_thick(a0s, a3, cl['flux_a0'], sf,
_inv_function)
sf_diff[:] = sf[:]
sf = sf_func(w, out_thick, cl['is_rectangular'])
sf_diff = sf_diff - sf
i += 1
log.info('Shape factor {:s} used, took {:d} iterations for '
'convergence.'.format(use_sf, i))
# TODO: possible shape factor optimisations
# thick update could be used as iteration end criterion instead
# we iterate for all grid points, even if some already converged
out_thick = _compute_thick(a0s, a3, cl['flux_a0'], sf, _inv_function)
# volume
is_rect = cl['is_rectangular']
fac = np.where(is_rect, 1, 2./3.)
volume = fac * out_thick * w * cl['dx']
# Now recompute thickness where parabola is too flat
is_trap = cl['is_trapezoid']
if cl['invert_with_trapezoid']:
min_shape = cfg.PARAMS['mixed_min_shape']
bed_shape = 4 * out_thick / w ** 2
is_trap = ((bed_shape < min_shape) & ~ cl['is_rectangular'] &
(cl['flux'] > 0)) | is_trap
for i in np.where(is_trap)[0]:
try:
out_thick[i] = sia_thickness_via_optim(slope[i], w[i],
cl['flux'][i],
shape='trapezoid',
t_lambda=t_lambda,
glen_a=glen_a,
fs=fs)
sect = (2*w[i] - t_lambda * out_thick[i]) / 2 * out_thick[i]
volume[i] = sect * cl['dx']
except ValueError:
# no solution error - we do with rect
out_thick[i] = sia_thickness_via_optim(slope[i], w[i],
cl['flux'][i],
shape='rectangular',
glen_a=glen_a,
fs=fs)
is_rect[i] = True
is_trap[i] = False
volume[i] = out_thick[i] * w[i] * cl['dx']
# Sanity check
if np.any(out_thick <= 0):
log.warning("Found zero or negative thickness: "
"this should not happen.")
if write:
cl['is_trapezoid'] = is_trap
cl['is_rectangular'] = is_rect
cl['thick'] = out_thick
cl['volume'] = volume
# volume below sl
try:
bed_h = cl['hgt'] - out_thick
bed_shape = 4 * out_thick / w ** 2
if np.any(bed_h < 0):
cl['volume_bsl'] = _vol_below_water(cl['hgt'], bed_h,
bed_shape, out_thick,
w,
cl['is_rectangular'],
cl['is_trapezoid'],
fac, t_lambda,
cl['dx'], 0)
if water_level is not None and np.any(bed_h < water_level):
cl['volume_bwl'] = _vol_below_water(cl['hgt'], bed_h,
bed_shape, out_thick,
w,
cl['is_rectangular'],
cl['is_trapezoid'],
fac, t_lambda,
cl['dx'],
water_level)
except KeyError:
# cl['hgt'] is not available on old prepro dirs
pass
out_volume += np.sum(volume)
if write:
gdir.write_pickle(cls, 'inversion_output', filesuffix=filesuffix)
gdir.add_to_diagnostics('inversion_glen_a', glen_a)
gdir.add_to_diagnostics('inversion_fs', fs)
return out_volume
def mass_conservation_inversion_tidewater(gdir, glen_a=None, fs=None, write=True,
filesuffix='', water_level=None, t_lambda=None):
""" Compute the glacier thickness along the flowlines
More or less following Farinotti et al., (2009).
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
glen_a : float
glen's creep parameter A. Defaults to cfg.PARAMS.
fs : float
sliding parameter. Defaults to cfg.PARAMS.
write: bool
default behavior is to compute the thickness and write the
results in the pickle. Set to False in order to spare time
during calibration.
filesuffix : str
add a suffix to the output file
water_level : float
to compute volume below water level - adds an entry to the output dict
t_lambda : float
defining the angle of the trapezoid walls (see documentation). Defaults
to cfg.PARAMS.
"""
# Defaults
if glen_a is None:
glen_a = cfg.PARAMS['inversion_glen_a']
if fs is None:
fs = cfg.PARAMS['inversion_fs']
if t_lambda is None:
t_lambda = cfg.PARAMS['trapezoid_lambdas']
# Check input
_inv_function = _inversion_simple if fs == 0 else _inversion_poly
# Ice flow params
fd = 2. / (cfg.PARAMS['glen_n']+2) * glen_a
a3 = fs / fd
rho = cfg.PARAMS['ice_density']
# Inversion with shape factors?
sf_func = None
use_sf = cfg.PARAMS.get('use_shape_factor_for_inversion', None)
if use_sf == 'Adhikari' or use_sf == 'Nye':
sf_func = utils.shape_factor_adhikari
elif use_sf == 'Huss':
sf_func = utils.shape_factor_huss
# Clip the slope, in rad
min_slope = 'min_slope_ice_caps' if gdir.is_icecap else 'min_slope'
min_slope = np.deg2rad(cfg.PARAMS[min_slope])
out_volume = 0.
cls = gdir.read_pickle('inversion_input')
for cl in cls:
# Clip slope to avoid negative and small slopes
slope = cl['slope_angle']
slope = utils.clip_array(slope, min_slope, np.pi/2.)
# Glacier width
a0s = - cl['flux_a0'] / ((rho*cfg.G*slope)**3*fd)
sf = np.ones(slope.shape) # Default shape factor is 1
out_thick = _compute_thick(a0s, a3, cl['flux_a0'], sf, _inv_function)
w = cl['width']
bed_h = cl['hgt'] - out_thick
if water_level is None:
water_depth = utils.clip_min(0,-bed_h)
else:
water_depth = utils.clip_min(0,-bed_h + water_level)
a3s = ((fs / fd) * (out_thick / utils.clip_min(10,(out_thick -
(1028/rho)*water_depth))))
if sf_func is not None:
# Start iteration for shape factor with first guess of 1
i = 0
sf_diff = np.ones(slope.shape)
# Some hard-coded factors here
sf_tol = 1e-2
max_sf_iter = 20
while i < max_sf_iter and np.any(sf_diff > sf_tol):
out_thick = _compute_thick(a0s, a3s, cl['flux_a0'], sf,
_inv_function)
sf_diff[:] = sf[:]
sf = sf_func(w, out_thick, cl['is_rectangular'])
sf_diff = sf_diff - sf
i += 1
log.info('Shape factor {:s} used, took {:d} iterations for '
'convergence.'.format(use_sf, i))
# TODO: possible shape factor optimisations
# thick update could be used as iteration end criterion instead
# we iterate for all grid points, even if some already converged
out_thick = _compute_thick(a0s, a3s, cl['flux_a0'], sf, _inv_function)
# volume
is_rect = cl['is_rectangular']
fac = np.where(is_rect, 1, 2./3.)
volume = fac * out_thick * w * cl['dx']
# Now recompute thickness where parabola is too flat
is_trap = cl['is_trapezoid']
if cl['invert_with_trapezoid']:
min_shape = cfg.PARAMS['mixed_min_shape']
bed_shape = 4 * out_thick / w ** 2
is_trap = ((bed_shape < min_shape) & ~ cl['is_rectangular'] &
(cl['flux'] > 0)) | is_trap
for i in np.where(is_trap)[0]:
try:
out_thick[i] = sia_thickness_via_optim_tidewater(slope[i],
w[i],
cl['flux'][i],
water_depth[i],
shape='trapezoid',
t_lambda=t_lambda,
glen_a=glen_a,
fs=fs)
sect = (2*w[i] - t_lambda * out_thick[i]) / 2 * out_thick[i]
volume[i] = sect * cl['dx']
except ValueError:
# no solution error - we do with rect
out_thick[i] = sia_thickness_via_optim_tidewater(slope[i],
w[i],
cl['flux'][i],
water_depth[i],
shape='rectangular',
glen_a=glen_a,
fs=fs)
is_rect[i] = True
is_trap[i] = False
volume[i] = out_thick[i] * w[i] * cl['dx']
# Sanity check
if np.any(out_thick <= 0):
log.warning("Found zero or negative thickness: "
"this should not happen.")
if write:
cl['is_trapezoid'] = is_trap
cl['is_rectangular'] = is_rect
cl['thick'] = out_thick
cl['volume'] = volume
# volume below sl
try:
bed_h = cl['hgt'] - out_thick
bed_shape = 4 * out_thick / w ** 2
if np.any(bed_h < 0):
cl['volume_bsl'] = _vol_below_water(cl['hgt'], bed_h,
bed_shape, out_thick,
w,
cl['is_rectangular'],
cl['is_trapezoid'],
fac, t_lambda,
cl['dx'], 0)
if water_level is not None and np.any(bed_h < water_level):
cl['volume_bwl'] = _vol_below_water(cl['hgt'], bed_h,
bed_shape, out_thick,
w,
cl['is_rectangular'],
cl['is_trapezoid'],
fac, t_lambda,
cl['dx'],
water_level)
except KeyError:
# cl['hgt'] is not available on old prepro dirs
pass
out_volume += np.sum(volume)
if write:
gdir.write_pickle(cls, 'inversion_output', filesuffix=filesuffix)
gdir.add_to_diagnostics('inversion_glen_a', glen_a)
gdir.add_to_diagnostics('inversion_fs', fs)
return out_volume
@entity_task(log, writes=['inversion_output'])
def filter_inversion_output(gdir):
"""Filters the last few grid points after the physically-based inversion.
For various reasons (but mostly: the equilibrium assumption), the last few
grid points on a glacier flowline are often noisy and create unphysical
depressions. Here we try to correct for that. It is not volume conserving,
but area conserving.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
if gdir.is_tidewater:
# No need for filter in tidewater case
cls = gdir.read_pickle('inversion_output')
init_vol = np.sum([np.sum(cl['volume']) for cl in cls])
return init_vol
if not gdir.has_file('downstream_line'):
raise InvalidWorkflowError('filter_inversion_output now needs a '
'previous call to the '
'compute_dowstream_line and '
'compute_downstream_bedshape tasks')
dic_ds = gdir.read_pickle('downstream_line')
bs = np.average(dic_ds['bedshapes'][:3])
n = -5
cls = gdir.read_pickle('inversion_output')
cl = cls[-1]
# First guess thickness based on width
w = cl['width'][n:]
old_h = cl['thick'][n:]
s = w**3 * bs / 6
new_h = 3/2 * s / w
# Change only if it actually does what we want
new_h[old_h < new_h] = old_h[old_h < new_h]
# Smoothing things out a bit
hts = np.append(np.append(cl['thick'][n-3:n], new_h), 0)
h = utils.smooth1d(hts, 3)[n-1:-1]
# Recompute bedshape based on that
bs = utils.clip_min(4*h / w**2, cfg.PARAMS['mixed_min_shape'])
# OK, done
s = w**3 * bs / 6
# Change only if it actually does what we want
new_h = 3/2 * s / w
if np.any(new_h > old_h):
# No change in volume
return np.sum([np.sum(cl['volume']) for cl in cls])
cl['thick'][n:] = new_h
cl['volume'][n:] = s * cl['dx']
cl['is_trapezoid'][n:] = False
cl['is_rectangular'][n:] = False
gdir.write_pickle(cls, 'inversion_output')
# Return volume for convenience
return np.sum([np.sum(cl['volume']) for cl in cls])
@entity_task(log)
def get_inversion_volume(gdir):
"""Small utility task to get to the volume od all glaciers."""
cls = gdir.read_pickle('inversion_output')
return np.sum([np.sum(cl['volume']) for cl in cls])
@entity_task(log, writes=['inversion_output'])
def compute_velocities(gdir, glen_a=None, fs=None, filesuffix=''):
"""Surface velocities along the flowlines from inverted ice thickness.
Computed following the methods described in
Cuffey and Paterson (2010) Eq. 8.35, pp 310:
u_s = u_basal + (2A/n+1)* tau^n * H
In the case of no sliding:
u_z/u_s = [n+1]/[n+2] = 0.8 if n = 3.
The output is written in 'inversion_output.pkl' in m yr-1
You'll need to call prepare_for_inversion with the `add_debug_var=True`
kwarg for this to work!
Parameters
----------
gdir : Glacier directory
with_sliding : bool
default is True, if set to False will not add the sliding component.
filesuffix : str
add a suffix to the output file
"""
# Defaults
if glen_a is None:
glen_a = cfg.PARAMS['inversion_glen_a']
if fs is None:
fs = cfg.PARAMS['inversion_fs']
rho = cfg.PARAMS['ice_density']
glen_n = cfg.PARAMS['glen_n']
# Getting the data for the main flowline
cls = gdir.read_pickle('inversion_output')
for cl in cls:
# vol in m3 and dx in m
section = cl['volume'] / cl['dx']
# this flux is in m3 per second
flux = cl['flux']
angle = cl['slope_angle']
thick = cl['thick']
if fs > 0:
tau = rho * cfg.G * angle * thick
with warnings.catch_warnings():
# This can trigger a divide by zero Warning
warnings.filterwarnings("ignore", category=RuntimeWarning)
u_basal = fs * tau ** glen_n / thick
u_basal[~np.isfinite(u_basal)] = 0
u_deformation = (2 * glen_a / (glen_n + 1)) * (tau**glen_n) * thick
u_basal *= cfg.SEC_IN_YEAR
u_deformation *= cfg.SEC_IN_YEAR
u_surface = u_basal + u_deformation
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
velocity = flux / section
velocity *= cfg.SEC_IN_YEAR
else:
# velocity in cross section
fac = (glen_n + 1) / (glen_n + 2)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
velocity = flux / section
velocity *= cfg.SEC_IN_YEAR
u_surface = velocity / fac
u_basal = velocity * 0
u_deformation = velocity * 0
# output
cl['u_integrated'] = velocity
cl['u_surface'] = u_surface
cl['u_basal'] = u_basal
cl['u_deformation'] = u_deformation
gdir.write_pickle(cls, 'inversion_output', filesuffix=filesuffix)
@entity_task(log, writes=['gridded_data'])
def distribute_thickness_per_altitude(gdir, add_slope=True,
smooth_radius=None,
dis_from_border_exp=0.25,
varname_suffix=''):
"""Compute a thickness map by redistributing mass along altitudinal bands.
This is a rather cosmetic task, not relevant for OGGM but for ITMIX.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
add_slope : bool
whether a corrective slope factor should be used or not
smooth_radius : int
pixel size of the gaussian smoothing. Default is to use
cfg.PARAMS['smooth_window'] (i.e. a size in meters). Set to zero to
suppress smoothing.
dis_from_border_exp : float
the exponent of the distance from border mask
varname_suffix : str
add a suffix to the variable written in the file (for experiments)
"""
# Variables
grids_file = gdir.get_filepath('gridded_data')
# See if we have the masks, else compute them
with utils.ncDataset(grids_file) as nc:
has_masks = 'glacier_ext_erosion' in nc.variables
if not has_masks:
from oggm.core.gis import gridded_attributes
gridded_attributes(gdir)
with utils.ncDataset(grids_file) as nc:
topo_smoothed = nc.variables['topo_smoothed'][:]
glacier_mask = nc.variables['glacier_mask'][:]
dis_from_border = nc.variables['dis_from_border'][:]
if add_slope:
slope_factor = nc.variables['slope_factor'][:]
else:
slope_factor = 1.
# Along the lines
cls = gdir.read_pickle('inversion_output')
fls = gdir.read_pickle('inversion_flowlines')
hs, ts, vs, xs, ys = [], [], [], [], []
for cl, fl in zip(cls, fls):
hs = np.append(hs, fl.surface_h)
ts = np.append(ts, cl['thick'])
vs = np.append(vs, cl['volume'])
try:
x, y = fl.line.xy
except AttributeError:
# Squeezed flowlines, dummy coords
x = fl.surface_h * 0 - 1
y = fl.surface_h * 0 - 1
xs = np.append(xs, x)
ys = np.append(ys, y)
init_vol = np.sum(vs)
# Assign a first order thickness to the points
# very inefficient inverse distance stuff
thick = glacier_mask * np.NaN
for y in range(thick.shape[0]):
for x in range(thick.shape[1]):
phgt = topo_smoothed[y, x]
# take the ones in a 100m range
starth = 100.
while True:
starth += 10
pok = np.nonzero(np.abs(phgt - hs) <= starth)[0]
if len(pok) != 0:
break
sqr = np.sqrt((xs[pok]-x)**2 + (ys[pok]-y)**2)
pzero = np.where(sqr == 0)
if len(pzero[0]) == 0:
thick[y, x] = np.average(ts[pok], weights=1 / sqr)
elif len(pzero[0]) == 1:
thick[y, x] = ts[pzero]
else:
raise RuntimeError('We should not be there')
# Distance from border (normalized)
dis_from_border = dis_from_border**dis_from_border_exp
dis_from_border /= np.mean(dis_from_border[glacier_mask == 1])
thick *= dis_from_border
# Slope
thick *= slope_factor
# Smooth
dx = gdir.grid.dx
if smooth_radius != 0:
if smooth_radius is None:
smooth_radius = np.rint(cfg.PARAMS['smooth_window'] / dx)
thick = gaussian_blur(thick, int(smooth_radius))
thick = np.where(glacier_mask, thick, 0.)
# Re-mask
utils.clip_min(thick, 0, out=thick)
thick[glacier_mask == 0] = np.NaN
assert np.all(np.isfinite(thick[glacier_mask == 1]))
# Conserve volume
tmp_vol = np.nansum(thick * dx**2)
thick *= init_vol / tmp_vol
# write
with utils.ncDataset(grids_file, 'a') as nc:
vn = 'distributed_thickness' + varname_suffix
if vn in nc.variables:
v = nc.variables[vn]
else:
v = nc.createVariable(vn, 'f4', ('y', 'x', ), zlib=True)
v.units = '-'
v.long_name = 'Distributed ice thickness'
v[:] = thick
return thick
@entity_task(log, writes=['gridded_data'])
def distribute_thickness_interp(gdir, add_slope=True, smooth_radius=None,
varname_suffix=''):
"""Compute a thickness map by interpolating between centerlines and border.
IMPORTANT: this is NOT what has been used for ITMIX. We used
distribute_thickness_per_altitude for ITMIX and global ITMIX.
This is a rather cosmetic task, not relevant for OGGM but for ITMIX.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
add_slope : bool
whether a corrective slope factor should be used or not
smooth_radius : int
pixel size of the gaussian smoothing. Default is to use
cfg.PARAMS['smooth_window'] (i.e. a size in meters). Set to zero to
suppress smoothing.
varname_suffix : str
add a suffix to the variable written in the file (for experiments)
"""
# Variables
grids_file = gdir.get_filepath('gridded_data')
# See if we have the masks, else compute them
with utils.ncDataset(grids_file) as nc:
has_masks = 'ice_divides' in nc.variables
if not has_masks:
from oggm.core.gis import gridded_attributes
gridded_attributes(gdir)
with utils.ncDataset(grids_file) as nc:
glacier_mask = nc.variables['glacier_mask'][:]
glacier_ext = nc.variables['glacier_ext_erosion'][:]
ice_divides = nc.variables['ice_divides'][:]
if add_slope:
slope_factor = nc.variables['slope_factor'][:]
else:
slope_factor = 1.
# Thickness to interpolate
thick = glacier_ext * np.NaN
thick[(glacier_ext-ice_divides) == 1] = 0.
# TODO: domain border too, for convenience for a start
thick[0, :] = 0.
thick[-1, :] = 0.
thick[:, 0] = 0.
thick[:, -1] = 0.
# Along the lines
cls = gdir.read_pickle('inversion_output')
fls = gdir.read_pickle('inversion_flowlines')
vs = []
for cl, fl in zip(cls, fls):
vs.extend(cl['volume'])
x, y = utils.tuple2int(fl.line.xy)
thick[y, x] = cl['thick']
init_vol = np.sum(vs)
# Interpolate
xx, yy = gdir.grid.ij_coordinates
pnan = np.nonzero(~ np.isfinite(thick))
pok = np.nonzero(np.isfinite(thick))
points = np.array((np.ravel(yy[pok]), np.ravel(xx[pok]))).T
inter = np.array((np.ravel(yy[pnan]), np.ravel(xx[pnan]))).T
thick[pnan] = griddata(points, np.ravel(thick[pok]), inter, method='cubic')
utils.clip_min(thick, 0, out=thick)
# Slope
thick *= slope_factor
# Smooth
dx = gdir.grid.dx
if smooth_radius != 0:
if smooth_radius is None:
smooth_radius = np.rint(cfg.PARAMS['smooth_window'] / dx)
thick = gaussian_blur(thick, int(smooth_radius))
thick = np.where(glacier_mask, thick, 0.)
# Re-mask
thick[glacier_mask == 0] = np.NaN
assert np.all(np.isfinite(thick[glacier_mask == 1]))
# Conserve volume
tmp_vol = np.nansum(thick * dx**2)
thick *= init_vol / tmp_vol
# write
grids_file = gdir.get_filepath('gridded_data')
with utils.ncDataset(grids_file, 'a') as nc:
vn = 'distributed_thickness' + varname_suffix
if vn in nc.variables:
v = nc.variables[vn]
else:
v = nc.createVariable(vn, 'f4', ('y', 'x', ), zlib=True)
v.units = '-'
v.long_name = 'Distributed ice thickness'
v[:] = thick
return thick
def calving_flux_from_depth(gdir, k=None, water_level=None, water_depth=None,
thick=None, fixed_water_depth=False):
"""Finds a calving flux from the calving front thickness.
Approach based on <NAME> Hock, (2015) and Oerlemans and Nick (2005).
We take the initial output of the model and surface elevation data
to calculate the water depth of the calving front.
Parameters
----------
gdir : GlacierDirectory
k : float
calving constant
water_level : float
in case water is not at 0 m a.s.l
water_depth : float (mandatory)
the default is to compute the water_depth from ice thickness
at the terminus and altitude. Set this to force the water depth
to a certain value
thick :
Set this to force the ice thickness to a certain value (for
sensitivity experiments).
fixed_water_depth :
If we have water depth from Bathymetry we fix the water depth
and forget about the free-board
Returns
-------
A dictionary containing:
- the calving flux in [km3 yr-1]
- the frontal width in m
- the frontal thickness in m
- the frontal water depth in m
- the frontal free board in m
"""
# Defaults
if k is None:
k = cfg.PARAMS['inversion_calving_k']
# Read necessary data
fl = gdir.read_pickle('inversion_flowlines')[-1]
# Altitude at the terminus and frontal width
free_board = utils.clip_min(fl.surface_h[-1], 0) - water_level
width = fl.widths[-1] * gdir.grid.dx
# Calving formula
if thick is None:
cl = gdir.read_pickle('inversion_output')[-1]
thick = cl['thick'][-1]
if water_depth is None:
water_depth = thick - free_board
elif not fixed_water_depth:
# Correct thickness with prescribed water depth
# If fixed_water_depth=True then we forget about t_altitude
thick = water_depth + free_board
flux = k * thick * water_depth * width / 1e9
if fixed_water_depth:
# Recompute free board before returning
free_board = thick - water_depth
return {'flux': utils.clip_min(flux, 0),
'width': width,
'thick': thick,
'inversion_calving_k': k,
'water_depth': water_depth,
'water_level': water_level,
'free_board': free_board}
@entity_task(log, writes=['diagnostics'])
def find_inversion_calving(gdir, water_level=None, fixed_water_depth=None,
glen_a=None, fs=None, min_mu_star_frac=None):
"""Optimized search for a calving flux compatible with the bed inversion.
See Recinos et al 2019 for details.
Parameters
----------
water_level : float
the water level. It should be zero m a.s.l, but:
- sometimes the frontal elevation is unrealistically high (or low).
- lake terminating glaciers
- other uncertainties
With this parameter, you can produce more realistic values. The default
is to infer the water level from PARAMS['free_board_lake_terminating']
and PARAMS['free_board_marine_terminating']
fixed_water_depth : float
fix the water depth to an observed value and let the free board vary
instead.
glen_a : float, optional
fs : float, optional
min_mu_star_frac : float, optional
fraction of the original (non-calving) mu* you are ready to allow
for. Defaults cfg.PARAMS['calving_min_mu_star_frac'].
"""
from oggm.core import climate
from oggm.exceptions import MassBalanceCalibrationError
if not gdir.is_tidewater or not cfg.PARAMS['use_kcalving_for_inversion']:
# Do nothing
return
if min_mu_star_frac is None:
min_mu_star_frac = cfg.PARAMS['calving_min_mu_star_frac']
# Let's start from a fresh state
gdir.inversion_calving_rate = 0
with utils.DisableLogger():
climate.local_t_star(gdir)
climate.mu_star_calibration(gdir)
prepare_for_inversion(gdir)
v_ref = mass_conservation_inversion_tidewater(gdir,
water_level=water_level,
glen_a=glen_a, fs=fs)
# We have a stop condition on mu*
prev_params = gdir.read_json('local_mustar')
mu_star_orig = np.min(prev_params['mu_star_per_flowline'])
# Store for statistics
gdir.add_to_diagnostics('volume_before_calving', v_ref)
gdir.add_to_diagnostics('mu_star_before_calving', mu_star_orig)
# Get the relevant variables
cls = gdir.read_pickle('inversion_input')[-1]
slope = cls['slope_angle'][-1]
width = cls['width'][-1]
# Stupidly enough the slope is clipped in the OGGM inversion, not
# in prepro - clip here
min_slope = 'min_slope_ice_caps' if gdir.is_icecap else 'min_slope'
min_slope = np.deg2rad(cfg.PARAMS[min_slope])
slope = utils.clip_array(slope, min_slope, np.pi / 2.)
# Check that water level is within given bounds
if water_level is None:
th = cls['hgt'][-1]
if gdir.is_lake_terminating:
water_level = th - cfg.PARAMS['free_board_lake_terminating']
else:
vmin, vmax = cfg.PARAMS['free_board_marine_terminating']
water_level = utils.clip_scalar(0, th - vmax, th - vmin)
# The functions all have the same shape: they decrease, then increase
# We seek the absolute minimum first
def to_minimize(h):
if fixed_water_depth is not None:
fl = calving_flux_from_depth(gdir, thick=h,
water_level=water_level,
water_depth=fixed_water_depth,
fixed_water_depth=True)
else:
fl = calving_flux_from_depth(gdir, water_level=water_level,
water_depth=h)
flux = fl['flux'] * 1e9 / cfg.SEC_IN_YEAR
f_b = fl['free_board']
sia_thick = sia_thickness_calving(slope, width, flux, h, f_b,
glen_a=glen_a, fs=fs)
return fl['thick'] - sia_thick
abs_min = optimize.minimize(to_minimize, [1], bounds=((1e-4, 1e4), ),
tol=1e-1)
if not abs_min['success']:
raise RuntimeError('Could not find the absolute minimum in calving '
'flux optimization: {}'.format(abs_min))
if abs_min['fun'] > 0:
# This happens, and means that this glacier simply can't calve
# This is an indicator for physics not matching, often a unrealistic
# slope of free-board. We just set the water level to the surface
# height at the front and try again to get an estimate.
water_level = th
out = calving_flux_from_depth(gdir, water_level=water_level)
opt = out['thick']
# OK, we now find the zero between abs min and an arbitrary high front
else:
abs_min = abs_min['x'][0]
opt = optimize.brentq(to_minimize, abs_min, 1e4)
# This is the thick guaranteeing OGGM Flux = Calving Law Flux
# Let's see if it results in a meaningful mu_star
# Give the flux to the inversion and recompute
if fixed_water_depth is not None:
out = calving_flux_from_depth(gdir, water_level=water_level, thick=opt,
water_depth=fixed_water_depth,
fixed_water_depth=True)
f_calving = out['flux']
else:
out = calving_flux_from_depth(gdir, water_level=water_level,
water_depth=opt)
f_calving = out['flux']
gdir.inversion_calving_rate = f_calving
with utils.DisableLogger():
# We accept values down to zero before stopping
min_mu_star = mu_star_orig * min_mu_star_frac
# At this step we might raise a MassBalanceCalibrationError
try:
climate.local_t_star(gdir, clip_mu_star=False,
min_mu_star=min_mu_star,
continue_on_error=False,
add_to_log_file=False)
df = gdir.read_json('local_mustar')
except MassBalanceCalibrationError as e:
assert 'mu* out of specified bounds' in str(e)
# When this happens we clip mu*
climate.local_t_star(gdir, clip_mu_star=True,
min_mu_star=min_mu_star)
df = gdir.read_json('local_mustar')
climate.mu_star_calibration(gdir, min_mu_star=min_mu_star)
prepare_for_inversion(gdir)
mass_conservation_inversion_tidewater(gdir, water_level=water_level,
glen_a=glen_a, fs=fs)
if fixed_water_depth is not None:
out = calving_flux_from_depth(gdir, water_level=water_level,
water_depth=fixed_water_depth,
fixed_water_depth=True)
else:
out = calving_flux_from_depth(gdir, water_level=water_level)
fl = gdir.read_pickle('inversion_flowlines')[-1]
f_calving = (fl.flux[-1] * (gdir.grid.dx ** 2) * 1e-9 /
cfg.PARAMS['ice_density'])
log.info('({}) find_inversion_calving_from_any_mb: found calving flux of '
'{:.03f} km3 yr-1'.format(gdir.rgi_id, f_calving))
# Store results
odf = dict()
odf['calving_flux'] = f_calving
odf['calving_rate_myr'] = f_calving * 1e9 / (out['thick'] * out['width'])
odf['calving_mu_star'] = df['mu_star_glacierwide']
odf['calving_law_flux'] = out['flux']
odf['calving_water_level'] = out['water_level']
odf['calving_inversion_k'] = out['inversion_calving_k']
odf['calving_front_slope'] = slope
odf['calving_front_water_depth'] = out['water_depth']
odf['calving_front_free_board'] = out['free_board']
odf['calving_front_thick'] = out['thick']
odf['calving_front_width'] = out['width']
for k, v in odf.items():
gdir.add_to_diagnostics(k, v)
return odf
@entity_task(log, writes=['diagnostics'])
def find_inversion_calving_from_any_mb(gdir, mb_model=None, mb_years=None,
water_level=None,
glen_a=None, fs=None):
"""Optimized search for a calving flux compatible with the bed inversion.
See Recinos et al 2019 for details. This task is an update to
`find_inversion_calving` but acting upon a MB residual (i.e. a shift)
instead of the model temperature sensitivity.
Parameters
----------
mb_model : :py:class:`oggm.core.massbalance.MassBalanceModel`
the mass-balance model to use
mb_years : array
the array of years from which you want to average the MB for (for
mb_model only).
water_level : float
the water level. It should be zero m a.s.l, but:
- sometimes the frontal elevation is unrealistically high (or low).
- lake terminating glaciers
- other uncertainties
With this parameter, you can produce more realistic values. The default
is to infer the water level from PARAMS['free_board_lake_terminating']
and PARAMS['free_board_marine_terminating']
glen_a : float, optional
fs : float, optional
"""
from oggm.core import climate
if not gdir.is_tidewater or not cfg.PARAMS['use_kcalving_for_inversion']:
# Do nothing
return
# Let's start from a fresh state
gdir.inversion_calving_rate = 0
with utils.DisableLogger():
climate.apparent_mb_from_any_mb(gdir, mb_model=mb_model,
mb_years=mb_years)
prepare_for_inversion(gdir)
v_ref = mass_conservation_inversion(gdir, water_level=water_level,
glen_a=glen_a, fs=fs)
# Store for statistics
gdir.add_to_diagnostics('volume_before_calving', v_ref)
# Get the relevant variables
cls = gdir.read_pickle('inversion_input')[-1]
slope = cls['slope_angle'][-1]
width = cls['width'][-1]
# Stupidly enough the slope is clipped in the OGGM inversion, not
# in inversion prepro - clip here
min_slope = 'min_slope_ice_caps' if gdir.is_icecap else 'min_slope'
min_slope = np.deg2rad(cfg.PARAMS[min_slope])
slope = utils.clip_array(slope, min_slope, np.pi / 2.)
# Check that water level is within given bounds
if water_level is None:
th = cls['hgt'][-1]
if gdir.is_lake_terminating:
water_level = th - cfg.PARAMS['free_board_lake_terminating']
else:
vmin, vmax = cfg.PARAMS['free_board_marine_terminating']
water_level = utils.clip_scalar(0, th - vmax, th - vmin)
# The functions all have the same shape: they decrease, then increase
# We seek the absolute minimum first
def to_minimize(h):
fl = calving_flux_from_depth(gdir, water_level=water_level,
water_depth=h)
flux = fl['flux'] * 1e9 / cfg.SEC_IN_YEAR
sia_thick = sia_thickness(slope, width, flux, glen_a=glen_a, fs=fs)
return fl['thick'] - sia_thick
abs_min = optimize.minimize(to_minimize, [1], bounds=((1e-4, 1e4), ),
tol=1e-1)
if not abs_min['success']:
raise RuntimeError('Could not find the absolute minimum in calving '
'flux optimization: {}'.format(abs_min))
if abs_min['fun'] > 0:
# This happens, and means that this glacier simply can't calve
# This is an indicator for physics not matching, often a unrealistic
# slope of free-board
out = calving_flux_from_depth(gdir, water_level=water_level)
log.warning('({}) find_inversion_calving_from_any_mb: could not find '
'calving flux.'.format(gdir.rgi_id))
odf = dict()
odf['calving_flux'] = 0
odf['calving_rate_myr'] = 0
odf['calving_law_flux'] = out['flux']
odf['calving_water_level'] = out['water_level']
odf['calving_inversion_k'] = out['inversion_calving_k']
odf['calving_front_slope'] = slope
odf['calving_front_water_depth'] = out['water_depth']
odf['calving_front_free_board'] = out['free_board']
odf['calving_front_thick'] = out['thick']
odf['calving_front_width'] = out['width']
for k, v in odf.items():
gdir.add_to_diagnostics(k, v)
return
# OK, we now find the zero between abs min and an arbitrary high front
abs_min = abs_min['x'][0]
opt = optimize.brentq(to_minimize, abs_min, 1e4)
# Give the flux to the inversion and recompute
# This is the thick guaranteeing OGGM Flux = Calving Law Flux
out = calving_flux_from_depth(gdir, water_level=water_level,
water_depth=opt)
f_calving = out['flux']
log.info('({}) find_inversion_calving_from_any_mb: found calving flux of '
'{:.03f} km3 yr-1'.format(gdir.rgi_id, f_calving))
gdir.inversion_calving_rate = f_calving
with utils.DisableLogger():
climate.apparent_mb_from_any_mb(gdir, mb_model=mb_model,
mb_years=mb_years)
prepare_for_inversion(gdir)
mass_conservation_inversion(gdir, water_level=water_level,
glen_a=glen_a, fs=fs)
out = calving_flux_from_depth(gdir, water_level=water_level)
fl = gdir.read_pickle('inversion_flowlines')[-1]
f_calving = (fl.flux[-1] * (gdir.grid.dx ** 2) * 1e-9 /
cfg.PARAMS['ice_density'])
# Store results
odf = dict()
odf['calving_flux'] = f_calving
odf['calving_rate_myr'] = f_calving * 1e9 / (out['thick'] * out['width'])
odf['calving_law_flux'] = out['flux']
odf['calving_water_level'] = out['water_level']
odf['calving_inversion_k'] = out['inversion_calving_k']
odf['calving_front_slope'] = slope
odf['calving_front_water_depth'] = out['water_depth']
odf['calving_front_free_board'] = out['free_board']
odf['calving_front_thick'] = out['thick']
odf['calving_front_width'] = out['width']
for k, v in odf.items():
gdir.add_to_diagnostics(k, v)
return odf
``` |
{
"source": "jmalles/water_terminating_module",
"score": 3
} |
#### File: water_terminating_module/model_code/FluxModel.py
```python
import logging
import numpy as np
import os
import warnings
from oggm.core.flowline import FlowlineModel
import oggm.cfg as cfg
from oggm import utils
# Constants
from oggm.cfg import SEC_IN_DAY, SEC_IN_YEAR
from oggm.cfg import G, GAUSSIAN_KERNEL
# Module logger
log = logging.getLogger(__name__)
class FluxBasedModelWaterFront(FlowlineModel):
"""The flowline model used by OGGM in production.
It solves for the SIA along the flowline(s) using a staggered grid. It
computes the *ice flux* between grid points and transports the mass
accordingly (also between flowlines).
This model is numerically less stable than fancier schemes, but it
is fast and works with multiple flowlines of any bed shape (rectangular,
parabolic, trapeze, and any combination of them).
We test that it conserves mass in most cases, but not on very stiff cliffs.
"""
def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None,
fs=0., inplace=False, fixed_dt=None, cfl_number=None,
min_dt=None, flux_gate_thickness=None,
flux_gate=None, flux_gate_build_up=100,
do_kcalving=None, calving_k=None, calving_use_limiter=None,
calving_limiter_frac=None, water_level=None,
**kwargs):
"""Instanciate the model.
Parameters
----------
flowlines : list
the glacier flowlines
mb_model : MassBalanceModel
the mass-balance model
y0 : int
initial year of the simulation
glen_a : float
Glen's creep parameter
fs : float
Oerlemans sliding parameter
inplace : bool
whether or not to make a copy of the flowline objects for the run
setting to True implies that your objects will be modified at run
time by the model (can help to spare memory)
fixed_dt : float
set to a value (in seconds) to prevent adaptive time-stepping.
cfl_number : float
Defaults to cfg.PARAMS['cfl_number'].
For adaptive time stepping (the default), dt is chosen from the
CFL criterion (dt = cfl_number * dx / max_u).
To choose the "best" CFL number we would need a stability
analysis - we used an empirical analysis (see blog post) and
settled on 0.02 for the default cfg.PARAMS['cfl_number'].
min_dt : float
Defaults to cfg.PARAMS['cfl_min_dt'].
At high velocities, time steps can become very small and your
model might run very slowly. In production, it might be useful to
set a limit below which the model will just error.
is_tidewater: bool, default: False
is this a tidewater glacier?
is_lake_terminating: bool, default: False
is this a lake terminating glacier?
mb_elev_feedback : str, default: 'annual'
'never', 'always', 'annual', or 'monthly': how often the
mass-balance should be recomputed from the mass balance model.
'Never' is equivalent to 'annual' but without elevation feedback
at all (the heights are taken from the first call).
check_for_boundaries: bool, default: True
raise an error when the glacier grows bigger than the domain
boundaries
flux_gate_thickness : float or array
flux of ice from the left domain boundary (and tributaries).
Units of m of ice thickness. Note that unrealistic values won't be
met by the model, so this is really just a rough guidance.
It's better to use `flux_gate` instead.
flux_gate : float or function or array of floats or array of functions
flux of ice from the left domain boundary (and tributaries)
(unit: m3 of ice per second). If set to a high value, consider
changing the flux_gate_buildup time. You can also provide
a function (or an array of functions) returning the flux
(unit: m3 of ice per second) as a function of time.
This is overriden by `flux_gate_thickness` if provided.
flux_gate_buildup : int
number of years used to build up the flux gate to full value
do_kcalving : bool
switch on the k-calving parameterisation. Ignored if not a
tidewater glacier. Use the option from PARAMS per default
calving_k : float
the calving proportionality constant (units: yr-1). Use the
one from PARAMS per default
calving_use_limiter : bool
whether to switch on the calving limiter on the parameterisation
makes the calving fronts thicker but the model is more stable
calving_limiter_frac : float
limit the front slope to a fraction of the calving front.
"3" means 1/3. Setting it to 0 limits the slope to sea-level.
water_level : float
the water level. It should be zero m a.s.l, but:
- sometimes the frontal elevation is unrealistically high (or low).
- lake terminating glaciers
- other uncertainties
The default is 0. For lake terminating glaciers,
it is inferred from PARAMS['free_board_lake_terminating'].
The best way to set the water level for real glaciers is to use
the same as used for the inversion (this is what
`flowline_model_run` does for you)
"""
super(FluxBasedModelWaterFront, self).__init__(flowlines,
mb_model=mb_model, y0=y0,
glen_a=glen_a, fs=fs,
inplace=inplace,
water_level=water_level,
**kwargs)
self.fixed_dt = fixed_dt
if min_dt is None:
min_dt = cfg.PARAMS['cfl_min_dt']
if cfl_number is None:
cfl_number = cfg.PARAMS['cfl_number']
self.min_dt = min_dt
self.cfl_number = cfl_number
# Do we want to use shape factors?
self.sf_func = None
use_sf = cfg.PARAMS.get('use_shape_factor_for_fluxbasedmodel')
if use_sf == 'Adhikari' or use_sf == 'Nye':
self.sf_func = utils.shape_factor_adhikari
elif use_sf == 'Huss':
self.sf_func = utils.shape_factor_huss
# Calving params
if do_kcalving is None:
do_kcalving = cfg.PARAMS['use_kcalving_for_run']
self.do_calving = do_kcalving and self.is_tidewater
# if calving_k is None:
# calving_k = cfg.PARAMS['calving_k']
self.calving_k = calving_k / cfg.SEC_IN_YEAR
if calving_use_limiter is None:
calving_use_limiter = cfg.PARAMS['calving_use_limiter']
self.calving_use_limiter = calving_use_limiter
if calving_limiter_frac is None:
calving_limiter_frac = cfg.PARAMS['calving_limiter_frac']
if calving_limiter_frac > 0:
raise NotImplementedError('calving limiter other than 0 not '
'implemented yet')
self.calving_limiter_frac = calving_limiter_frac
self.rho_o = 1028 # Ocean density, must be >= ice density
# Flux gate
self.flux_gate = utils.tolist(flux_gate, length=len(self.fls))
self.flux_gate_m3_since_y0 = 0.
if flux_gate_thickness is not None:
# Compute the theoretical ice flux from the slope at the top
flux_gate_thickness = utils.tolist(flux_gate_thickness,
length=len(self.fls))
self.flux_gate = []
for fl, fgt in zip(self.fls, flux_gate_thickness):
# We set the thickness to the desired value so that
# the widths work ok
fl = copy.deepcopy(fl)
fl.thick = fl.thick * 0 + fgt
slope = (fl.surface_h[0] - fl.surface_h[1]) / fl.dx_meter
if slope == 0:
raise ValueError('I need a slope to compute the flux')
flux = find_sia_flux_from_thickness(slope,
fl.widths_m[0],
fgt,
shape=fl.shape_str[0],
glen_a=self.glen_a,
fs=self.fs)
self.flux_gate.append(flux)
# convert the floats to function calls
for i, fg in enumerate(self.flux_gate):
if fg is None:
continue
try:
# Do we have a function? If yes all good
fg(self.yr)
except TypeError:
# If not, make one
self.flux_gate[i] = partial(flux_gate_with_build_up,
flux_value=fg,
flux_gate_yr=(flux_gate_build_up +
self.y0))
# Optim
self.slope_stag = []
self.thick_stag = []
self.section_stag = []
self.depth_stag = []
self.u_drag = []
self.u_slide = []
self.u_stag = []
self.shapefac_stag = []
self.flux_stag = []
self.trib_flux = []
for fl, trib in zip(self.fls, self._tributary_indices):
nx = fl.nx
# This is not staggered
self.trib_flux.append(np.zeros(nx))
# We add an additional fake grid point at the end of tributaries
if trib[0] is not None:
nx = fl.nx + 1
# +1 is for the staggered grid
self.slope_stag.append(np.zeros(nx+1))
self.thick_stag.append(np.zeros(nx+1))
self.section_stag.append(np.zeros(nx+1))
self.depth_stag.append(np.zeros(nx+1))
self.u_stag.append(np.zeros(nx+1))
self.u_drag.append(np.zeros(nx+1))
self.u_slide.append(np.zeros(nx+1))
self.shapefac_stag.append(np.ones(nx+1)) # beware the ones!
self.flux_stag.append(np.zeros(nx+1))
def step(self, dt):
"""Advance one step."""
# Just a check to avoid useless computations
if dt <= 0:
raise InvalidParamsError('dt needs to be strictly positive')
# Simple container
mbs = []
# Loop over tributaries to determine the flux rate
for fl_id, fl in enumerate(self.fls):
# This is possibly less efficient than zip() but much clearer
trib = self._tributary_indices[fl_id]
slope_stag = self.slope_stag[fl_id]
thick_stag = self.thick_stag[fl_id]
section_stag = self.section_stag[fl_id]
depth_stag = self.depth_stag[fl_id]
sf_stag = self.shapefac_stag[fl_id]
flux_stag = self.flux_stag[fl_id]
trib_flux = self.trib_flux[fl_id]
u_stag = self.u_stag[fl_id]
u_drag = self.u_drag[fl_id]
u_slide = self.u_slide[fl_id]
flux_gate = self.flux_gate[fl_id]
# Flowline state
surface_h = fl.surface_h
thick = fl.thick
width = fl.widths_m
section = fl.section
dx = fl.dx_meter
depth = utils.clip_min(0,self.water_level - fl.bed_h)
# If it is a tributary, we use the branch it flows into to compute
# the slope of the last grid point
is_trib = trib[0] is not None
if is_trib:
fl_to = self.fls[trib[0]]
ide = fl.flows_to_indice
surface_h = np.append(surface_h, fl_to.surface_h[ide])
thick = np.append(thick, thick[-1])
section = np.append(section, section[-1])
width = np.append(width, width[-1])
depth = np.append(depth, depth[-1])
# elif self.do_calving and self.calving_use_limiter:
# We lower the max possible ice deformation
# by clipping the surface slope here. It is completely
# arbitrary but reduces ice deformation at the calving front.
# I think that in essence, it is also partly
# a "calving process", because this ice deformation must
# be less at the calving front. The result is that calving
# front "free boards" are quite high.
# Note that 0 is arbitrary, it could be any value below SL
#
# This is deprecated and should not have an effect anymore
# with the implementation of frontal dynamics below.
# surface_h = utils.clip_min(surface_h, self.water_level)
# Staggered gradient
stretch_dist_p = cfg.PARAMS.get('stretch_dist', 8e3)
min_l = cfg.PARAMS.get('min_ice_thick_for_length', 0)
slope_stag[0] = 0
slope_stag[1:-1] = (surface_h[0:-1] - surface_h[1:]) / dx
slope_stag[-1] = slope_stag[-2]
thick_stag[1:-1] = (thick[0:-1] + thick[1:]) / 2.
thick_stag[[0, -1]] = thick[[0, -1]]
depth_stag[1:-1] = (depth[0:-1] + depth[1:]) / 2.
depth_stag[[0, -1]] = depth[[0, -1]]
h=[]
d=[]
no_ice=[]
last_ice=[]
last_above_wl=[]
has_ice=[]
ice_above_wl=[]
A = self.glen_a
N = self.glen_n
if self.sf_func is not None:
# TODO: maybe compute new shape factors only every year?
sf = self.sf_func(fl.widths_m, fl.thick, fl.is_rectangular)
if is_trib:
# for inflowing tributary, the sf makes no sense
sf = np.append(sf, 1.)
sf_stag[1:-1] = (sf[0:-1] + sf[1:]) / 2.
sf_stag[[0, -1]] = sf[[0, -1]]
# Determine if and where ice bodies are; if no there is no ice above
# water_level, we fall back to the standard version in the else
# below.
ice_above_wl = np.any((fl.surface_h > self.water_level) &
(fl.thick > min_l) & \
(fl.bed_h < self.water_level))
has_ice = np.any(fl.thick > min_l)
# We compute more complex dynamics when we have ice below water
if has_ice and ice_above_wl and self.do_calving:
last_above_wl = np.nonzero((fl.surface_h > self.water_level) &
(fl.bed_h < self.water_level) &
(fl.thick > min_l))[0][-1]
# last_above_wl = np.nonzero((fl.surface_h > self.water_level) &
# (fl.bed_h < self.water_level) &
# (fl.thick >= (self.rho_o/self.rho)*
# depth))[0][-1]
no_ice = np.nonzero((fl.thick < min_l))[0]
last_ice = np.where((fl.thick[no_ice-1] > min_l) & \
(fl.surface_h[no_ice-1] > self.water_level))[0]
last_ice = no_ice[last_ice]-1
if last_ice.size == 1:
first_ice = np.nonzero(fl.thick[0:last_above_wl+1]\
> min_l)[0][0]
elif last_ice.size > 1 and (last_ice[-2]+1 < last_above_wl+1):
first_ice = np.nonzero(fl.thick[(last_ice[-2]+1)\
:(last_above_wl+1)] > min_l)[0][0]
first_ice = last_ice[-2]+first_ice
elif last_ice.size > 1 :
first_ice = np.nonzero(fl.thick[0:(last_above_wl)]\
> min_l)[0][-1]
else:
first_ice = 0
# Determine water depth at the front
h = fl.thick[last_above_wl]
d = h - (fl.surface_h[last_above_wl] - self.water_level)
thick_stag[last_above_wl+1] = h
depth_stag[last_above_wl+1] = d
last_thick = np.nonzero((fl.thick > 0) &
(fl.surface_h >
self.water_level))[0][-1]
# Determine height above buoancy
z_a_b = utils.clip_min(0,thick_stag - depth_stag *
(self.rho_o/self.rho))
# Compute net hydrostatic force at the front.
# One could think about incorporating ice melange / sea ice here
# as an additional term. (And also in the frontal ablation
# formulation.)
if last_above_wl == last_thick:
pull_last = utils.clip_min(0,0.5 * G * (self.rho * h**2 -
self.rho_o * d**2))
# Determine distance over which above stress is distributed
stretch_length = (last_above_wl - first_ice) * dx
stretch_length = utils.clip_min(stretch_length, dx)
stretch_dist = utils.clip_max(stretch_length,
stretch_dist_p)
n_stretch = np.rint(stretch_dist/dx).astype(int)
# Define stretch factor and add to driving stress
stretch_factor = np.zeros(n_stretch)
for j in range(n_stretch):
stretch_factor[j] = 2*(j+1)/(n_stretch+1)
if dx > stretch_dist:
stretch_factor = stretch_dist / dx
n_stretch = 1
stretch_first = utils.clip_min(0,(last_above_wl+2)-
n_stretch).astype(int)
stretch_last = last_above_wl+2
# Take slope for stress calculation at boundary to last grid
# cell as the mean over the "stretched" distance (see above)
if last_above_wl+1 < len(fl.bed_h) and \
stretch_first != stretch_last-1:
slope_stag[last_above_wl+1] = np.nanmean(slope_stag\
[stretch_first:\
stretch_last-1])
stress = self.rho*G*slope_stag*thick_stag
#Add "stretching stress" do basal shear stress
if last_above_wl == last_thick:
stress[stretch_first:stretch_last] = (stress[stretch_first:
stretch_last] +
stretch_factor *
(pull_last /
stretch_dist))
# Compute velocities
u_drag[:] = thick_stag * stress**N * self._fd * sf_stag**N
# Arbitrarily manipulating u_slide for grid cells
# approaching buoancy in order to prevent it from going
# towards infinity...
u_slide[:] = (stress**N / z_a_b) * self.fs * sf_stag**N
u_slide = np.where(z_a_b < 0.1, 4*u_drag, u_slide)
# Inhibit flow out of grid cell adjacent to last above
# sea-level in order to prevent shelf dynamics. (Not sure if
# this is necessary though...)
#u_slide[last_above_wl+2:] = u_slide[last_above_wl+1]
#u_drag[last_above_wl+2:] = u_drag[last_above_wl+1]
u_stag[:] = u_drag + u_slide
# Staggered section
# For the flux out of the last grid cell, the staggered section
# is set to the cross section of the calving front.
section_stag[1:-1] = (section[0:-1] + section[1:]) / 2.
section_stag[[0, -1]] = section[[0, -1]]
section_stag[last_above_wl+1] = section[last_above_wl]
# Usual ice dynamics without water at the front
else:
rhogh = (self.rho*G*slope_stag)**N
u_stag[:] = ((thick_stag**(N+1)) * self._fd * rhogh
+ (thick_stag**(N-1)) * self.fs * rhogh) * \
sf_stag**N
# Staggered section
section_stag[1:-1] = (section[0:-1] + section[1:]) / 2.
section_stag[[0, -1]] = section[[0, -1]]
# Staggered flux rate
flux_stag[:] = u_stag * section_stag
# Add boundary condition
if flux_gate is not None:
flux_stag[0] = flux_gate(self.yr)
# CFL condition
if not self.fixed_dt:
maxu = np.max(np.abs(u_stag))
if maxu > cfg.FLOAT_EPS:
cfl_dt = self.cfl_number * dx / maxu
else:
cfl_dt = dt
# Update dt only if necessary
if cfl_dt < dt:
dt = cfl_dt
if cfl_dt < self.min_dt:
raise RuntimeError(
'CFL error: required time step smaller '
'than the minimum allowed: '
'{:.1f}s vs {:.1f}s. Happening at '
'simulation year {:.1f}, fl_id {}, '
'bin_id {} and max_u {:.3f} m yr-1.'
''.format(cfl_dt, self.min_dt, self.yr, fl_id,
np.argmax(np.abs(u_stag)),
maxu * cfg.SEC_IN_YEAR))
# Since we are in this loop, reset the tributary flux
trib_flux[:] = 0
# We compute MB in this loop, before mass-redistribution occurs,
# so that MB models which rely on glacier geometry to decide things
# (like PyGEM) can do wo with a clean glacier state
mbs.append(self.get_mb(fl.surface_h, self.yr,
fl_id=fl_id, fls=self.fls))
# Time step
if self.fixed_dt:
# change only if step dt is larger than the chosen dt
if self.fixed_dt < dt:
dt = self.fixed_dt
# A second loop for the mass exchange
for fl_id, fl in enumerate(self.fls):
flx_stag = self.flux_stag[fl_id]
trib_flux = self.trib_flux[fl_id]
tr = self._tributary_indices[fl_id]
dx = fl.dx_meter
is_trib = tr[0] is not None
# For these we had an additional grid point
if is_trib:
flx_stag = flx_stag[:-1]
# Mass-balance
widths = fl.widths_m
mb = mbs[fl_id]
# Allow parabolic beds to grow
mb = dt * mb * np.where((mb > 0.) & (widths == 0), 10., widths)
# Prevent surface melt below water level
if self.do_calving:
bed_below_sl = (fl.bed_h < self.water_level) & (fl.thick > 0)
mb[bed_below_sl] = utils.clip_min(mb[bed_below_sl],
-(fl.surface_h[bed_below_sl]
- self.water_level) *
widths[bed_below_sl])
mb[fl.surface_h < self.water_level] = 0
# Update section with ice flow and mass balance
new_section = (fl.section + (flx_stag[0:-1] - flx_stag[1:])*dt/dx +
trib_flux*dt/dx + mb)
# Keep positive values only and store
fl.section = utils.clip_min(new_section, 0)
self.calving_rate_myr = 0.
# Prevent remnants of detached ice below water level
section = fl.section
ice_above_wl = np.any((fl.surface_h > self.water_level) &
(fl.bed_h < self.water_level) &
(fl.thick >= (self.rho_o/self.rho)*depth))
if ice_above_wl and self.do_calving:
last_last_wl = []
above_wl = np.nonzero((fl.surface_h > self.water_level) &
(fl.bed_h < self.water_level) &
(fl.thick > (self.rho_o/self.rho)*depth))[0]
for i in above_wl:
if i+1 < len(fl.bed_h) and fl.thick[i+1] <= \
(self.rho_o/self.rho)*depth[i+1]:
last_last_wl = np.append(last_last_wl, i)
if len(last_last_wl) > 0:
last_above_wl = int(last_last_wl[0])
else:
last_above_wl = above_wl[-1]
last_ice = np.nonzero(fl.thick > 0)[0][-1]
if last_ice > last_above_wl+1:
for i in range(last_above_wl+2, last_ice+1):
if section[i] > 0 and fl.bed_h[i] < self.water_level:
add_calving = section[i] * dx
#fl.calving_bucket_m3 -= add_calving
#fl.calving_bucket_m3 = utils.clip_min(0,
# fl.calving_bucket_m3)
self.calving_m3_since_y0 += add_calving
self.calving_rate_myr += (dx / cfg.SEC_IN_YEAR)
section[i] = 0
fl.section = section
# If we use a flux-gate, store the total volume that came in
self.flux_gate_m3_since_y0 += flx_stag[0] * dt
# Add the last flux to the tributary
# this works because the lines are sorted in order
if is_trib:
# tr tuple: line_index, start, stop, gaussian_kernel
self.trib_flux[tr[0]][tr[1]:tr[2]] += \
utils.clip_min(flx_stag[-1], 0) * tr[3]
# --- The rest is for calving only ---
# If tributary, do calving only if we are not transferring mass
if is_trib and flx_stag[-1] > 0:
continue
# No need to do calving in these cases either
if not self.do_calving or not fl.has_ice():
continue
# We do calving only if the last glacier bed pixel is below water
# (this is to avoid calving elsewhere than at the front)
if fl.bed_h[fl.thick > 0][-1] > self.water_level:
continue
# We do calving only if there is some ice below wl
# ice_above_wl = np.any((fl.surface_h > self.water_level) &
# (fl.bed_h < self.water_level) &
# (fl.thick > 0))
ice_above_wl = np.any((fl.surface_h > self.water_level) &
(fl.bed_h < self.water_level) &
(fl.thick >= (self.rho_o/self.rho)*depth))
to_remove=0
add_calving=0
first_below_sl = np.nonzero((fl.bed_h < self.water_level) &
(fl.thick > 0))[0][0]
if ice_above_wl:
# last_above_wl = np.nonzero((fl.surface_h > self.water_level) &
# (fl.bed_h < self.water_level) &
# (fl.thick > 0))[0][-1]
last_above_wl = np.nonzero((fl.surface_h > self.water_level) &
(fl.bed_h < self.water_level) &
(fl.thick >= (self.rho_o/self.rho)*
depth))[0][-1]
if fl.bed_h[last_above_wl+1] > self.water_level:
continue
# OK, we're really calving
section = fl.section
# Calving law
h = fl.thick[last_above_wl]
d = h - (fl.surface_h[last_above_wl] - self.water_level)
k = self.calving_k
q_calving = k * d * h * fl.widths_m[last_above_wl]
q_calving = utils.clip_min(0,q_calving)
# Add to the bucket and the diagnostics
fl.calving_bucket_m3 += q_calving * dt
self.calving_m3_since_y0 += q_calving * dt
self.calving_rate_myr += (q_calving / section[last_above_wl] *
cfg.SEC_IN_YEAR)
# See if we have ice below flotation to clean out first
below_sl = ((fl.bed_h < self.water_level) &
(fl.thick < (self.rho_o/self.rho)*depth) &
(fl.thick > 0))
to_remove = np.sum(section[below_sl]) * fl.dx_meter
bed_below_sl = (fl.bed_h < self.water_level) & (fl.thick > 0)
if 0 < to_remove < fl.calving_bucket_m3:
# This is easy, we remove everything
section[below_sl] = 0
fl.calving_bucket_m3 -= to_remove
elif to_remove > 0 and fl.calving_bucket_m3 > 0:
# We can only remove part of if
section[below_sl] = 0
section[last_above_wl+1] = ((to_remove - fl.calving_bucket_m3)
/ fl.dx_meter)
fl.calving_bucket_m3 = 0
elif to_remove > 0:
section[below_sl] = 0
section[last_above_wl+1] = to_remove / fl.dx_meter
# The rest of the bucket might calve an entire grid point
# (or more)
vol_last = section[last_above_wl] * fl.dx_meter
while fl.calving_bucket_m3 > vol_last and \
last_above_wl >= bed_below_sl[0]:
fl.calving_bucket_m3 -= vol_last
section[last_above_wl] = 0
# OK check if we need to continue (unlikely)
last_above_wl -= 1
vol_last = section[last_above_wl] * fl.dx_meter
fl.section = section
# Deal with surface height at front becoming too high because of
# elif above, i.e. when too much volume falls below flotation and
# is then accumulated in the "last" grid cell. Everything that
# is higher than the previous grid point is therefore
# redistributed at the front or calved off.
section = fl.section
while ((last_above_wl+1 < len(fl.bed_h)) and
(fl.surface_h[last_above_wl+1] > fl.surface_h[last_above_wl])
and section[last_above_wl+1] > 0):
add_calving = ((fl.surface_h[last_above_wl+1] -
fl.surface_h[last_above_wl]) *
fl.widths_m[last_above_wl+1] * dx)
if ((last_above_wl+2 < len(fl.bed_h)) and
((add_calving / (fl.widths_m[last_above_wl+2] * dx)) > \
(self.rho_o/self.rho)*depth[last_above_wl+2])):
section[last_above_wl+2] += add_calving / dx
else:
#fl.calving_bucket_m3 -= add_calving
#fl.calving_bucket_m3 = utils.clip_min(0,
# fl.calving_bucket_m3)
self.calving_m3_since_y0 += add_calving
self.calving_rate_myr += ((add_calving /
section[last_above_wl+1]) /
cfg.SEC_IN_YEAR)
section[last_above_wl+1] -= add_calving / dx
section[last_above_wl+1] = utils.clip_min(0,
section[last_above_wl+1])
add_calving = 0
fl.section = section
section = fl.section
last_above_wl += 1
# We update the glacier with our changes
fl.section = section
# Here we remove detached ice that might be left
elif fl.thick[first_below_sl-1] == 0:
section = fl.section
leftover = ((fl.bed_h < self.water_level) &
(fl.thick <= (self.rho_o/self.rho)*depth) &
(fl.thick > 0))
add_calving = np.sum(section[leftover] * dx)
#fl.calving_bucket_m3 -= add_calving
#fl.calving_bucket_m3 = utils.clip_min(0, fl.calving_bucket_m3)
self.calving_m3_since_y0 += add_calving
self.calving_rate_myr += (np.size(section[leftover]) * dx /
cfg.SEC_IN_YEAR)
section[leftover] = 0
fl.section = section
# Next step
self.t += dt
return dt
``` |
{
"source": "jmalliaros/CDL-Hackathon",
"score": 2
} |
#### File: CDL-Hackathon/app/routes.py
```python
import matplotlib
matplotlib.use("Agg")
import traceback
from flask import render_template, jsonify, request
from app import app
import app.optimusform as opform
import sys
sys.path.append("..")
from hardware_router import route
from stats import get_dwave_plot, get_networkx_plot_of_qubo
from optimus_parser import refresh_globals
from stats_ibm import plot_this as plot_this_ibm
from stats_rigetti import plot_this_rigetti
@app.route('/')
def compute():
return render_template('base.html')
@app.route('/submit_program', methods=['POST'])
def submit_program():
program_string = request.form['program_string']
refresh_globals()
e = None
base64 = None
shots = None
base64_2 = None
ibm_base64 = None
rig_base64 = None
try:
res, shots, qubo, old_qubo, res_ibm, res_rig = route(program_string)
if res:
base64 = get_dwave_plot(res).decode('utf8')
else:
base64 = None
base64_2 = get_networkx_plot_of_qubo(qubo, old_qubo).decode('utf8')
if res_ibm:
ibm_base64 = plot_this_ibm(res_ibm).decode('utf8')
else:
ibm_base64 = None
if res_rig:
rig_base64 = plot_this_rigetti(res_rig).decode('utf8')
else:
rig_base64 = None
except Exception:
e = traceback.format_exc()
print("dsadas", e)
return jsonify({"results_block": render_template("results_block.html", e=e, shots=shots, ibm_base64=ibm_base64, rig_base64=rig_base64, result=base64, image_2=base64_2), "status": "success"})
```
#### File: jmalliaros/CDL-Hackathon/convertToH.py
```python
from optimus_parser import parse_optimization_model
from sympy import *
def problemToH(obj, conlist):
nc = len(conlist)
Hlist = []
for i in range(0,nc):
Hlist.append((conlist[i][0]-conlist[i][2])**2)
Hlist.append(obj)
return sum(Hlist)
```
#### File: jmalliaros/CDL-Hackathon/hardware_router.py
```python
import random
import string
from dwave.system import CutOffComposite
from optimus_dwave import run_dwave
from optimus_ibm import run_IBM
from optimus_rigetti import run_Rigetti
from convertToH import problemToH
from optimus_parser import parse_optimization_model
from pyqubo import Binary
def randomString(stringLength=10):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def route(input_string):
##Get string from webapp and store here
d = input_string
objective_function, constraints, variables, solve_parameters = parse_optimization_model(d.strip())
print("solve_parameters2", solve_parameters)
H = problemToH(objective_function, constraints)
# random_string_to_variable = {}
# for v in variables:
# rs = randomString()
# objective_function = objective_function.subs(v, rs)
# random_string_to_variable[rs] = v
# objective_function = str(objective_function)
# for v, k in random_string_to_variable.items():
# objective_function = objective_function.replace(v, "Binary('%s')" % k)
# print("objective_function", objective_function)
# H = eval(objective_function)
if "ibm" in variables:
del variables["ibm"]
if "dwave" in variables:
del variables["dwave"]
if "rigetti" in variables:
del variables["rigetti"]
try:
run_on = solve_parameters["run_on"]
run_on = list(map(lambda a: a.label, run_on))
except KeyError:
run_on = ["dwave"]
print("run_on", run_on)
res, qubo, old_qubo = run_dwave(H, solve_parameters=solve_parameters)
if "dwave" not in run_on:
res = None
qubo_to_use = old_qubo
print("res, qubo, old_qubo", res, qubo, old_qubo)
if qubo:
qubo_to_use = qubo
if "ibm" in run_on:
res_ibm = run_IBM(qubo_to_use, variables=variables.keys())
else:
res_ibm = None
if "rigetti" in run_on:
res_rig = run_Rigetti(qubo_to_use, variables=variables.keys())
else:
res_rig = None
shots = []
# for i,(smpl, energy) in enumerate(res.data(['sample','energy'])):
# shots.append([smpl, energy])
return res, shots, qubo, old_qubo, res_ibm, res_rig
```
#### File: jmalliaros/CDL-Hackathon/stats_ibm.py
```python
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 03:40:03 2019
@author: Michele
"""
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from plotnine import *
def plot_this(result):
plt.clf()
custom_cols = ["#3D9CD9", "#33DAB0", "#486BDD","#3D9CD9", "#33DAB0", "#486BDD","#3D9CD9", "#33DAB0", "#486BDD","#3D9CD9", "#33DAB0", "#486BDD"]
df_ibm = pd.DataFrame([result.keys(), result.values()]).T
df_ibm.columns = ['sol','num_occurrences']
df_ibm['solnum'] = '[' + df_ibm['sol'].apply(lambda x: ' '.join(x)) + ']: ' + df_ibm['num_occurrences'].astype(str)
df_ibm['rel_occ'] = df_ibm['num_occurrences']/df_ibm['num_occurrences'].sum()
p = ggplot(df_ibm,aes(x='sol', y='rel_occ', fill='solnum',label='num_occurrences'))
p = p + geom_bar(stat='identity',width=0.5, show_legend=False)
p = p + geom_text(position = position_stack(vjust=0.5),size=10)
# p = p + scale_fill_manual(values = custom_cols)
p = p + labs(x = 'Solution', y = 'Relative frequency', title="IBM: Frequency of the different solutions")
p = p + theme(axis_text_x=element_text(rotation=45, hjust=1))
p.draw()
plt.savefig("temp.png", format="PNG")
import base64
with open("temp.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
return encoded_string
```
#### File: jmalliaros/CDL-Hackathon/xanadu_gbs.py
```python
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import sys
sys.path.append("/Users/mat/Desktop/Xanadu/FlashDrive/src/gbsapps")
adj = np.array(
[
[0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 0],
[0, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 0, 1],
[1, 1, 0, 0, 1, 0],
]
)
graph2 = nx.Graph(adj)
import random
def find_dense_subgraph(graph, nodes=2):
import gbsapps.graph.dense_subgraph as dense
print("graph.edges", graph.edges)
result = dense.find_dense_subgraph(
graph=graph, number_of_nodes=nodes, iterations=1, method="random-search"
)
subgraph = graph.subgraph(result[1])
# labels = {0: "Quantum", 1: "Toronto", 2: "<NAME>", 3: "Government", 4: "CDL", 5: "Advantage"}
# pos = nx.spring_layout(graph, seed=1)
# nx.draw_networkx_edges(
# graph, pos, edgelist=subgraph.edges, width=8, alpha=0.5, edge_color="#ff3300"
# )
# nx.draw_networkx_nodes(graph, pos, nodelist=subgraph.nodes, node_size=500, node_color="#ff3300")
# nx.draw_networkx_nodes(graph, pos, node_color="#63AC9A")
# nx.draw_networkx_edges(graph, pos, width=2, edge_color="#63AC9A")
# nx.draw_networkx_labels(graph, pos, labels, font_size=16)
# l, r = plt.xlim()
# plt.xlim(l - 0.35, r + 0.35)
# plt.axis("off")
density = np.round(result[0], 2)
print("Density: {}".format(density))
return subgraph, density
if __name__ == "__main__":
g = nx.Graph()
g.add_edge("x", "y", weight=0.4)
g.add_edge("z", "y", weight=0.4)
g.add_edge("c", "x", weight=0.0)
print("dadas", find_dense_subgraph(g))
##############################################################################
# Extensions
# ----------
#
# This tutorial has focused on a relatively simple :math:`6`-node graph. Here, the densest
# :math:`4`-node subgraph is simple to find: a brute force search requires only :math:`15`
# possibilities. To really appreciate the difficulty of the densest-:math:`k` subgraph problem we
# need to increase the size of the graph. Suppose we want to search for the densest
# :math:`10`-node subgraph of a :math:`30`-node graph like the one shown below. There are now
# just over thirty million possibilities!
#
# .. image:: ../_static/graph.png
# :align: center
# :width: 50%
# :alt: Example graph
#
# The densest :math:`10`-node subgraph is highlighted in red. As a challenge, see if you can use
# the tools learned in this tutorial to write a script for finding dense :math:`10`-node
# subgraphs. The graph shown above is available as an `adjacency matrix
# <https://en.wikipedia.org/wiki/Adjacency_matrix>`__ in CSV format :download:`here
# <../_static/graph.csv>`.
#
# .. warning::
#
# Through `Strawberry Fields <https://strawberryfields.readthedocs.io/en/latest/>`__,
# GBSApps uses a combination of algorithms available in the `Hafnian
# <https://hafnian.readthedocs.io/en/latest/>`__ library to carry out sampling from GBS.
# Nevertheless, simulating GBS is a computationally tough task, and smaller PCs may exhibit a
# slower sample rate for :func:`~gbsapps.graph.dense_subgraph.dense_subgraph_sampler_gbs` with
# increasing graph and target subgraph size.
``` |
{
"source": "jmalliaros/qcbm",
"score": 3
} |
#### File: qcbm/examples/plot_qcbm_opt_history.py
```python
import json
import matplotlib
# matplotlib.use("Agg")
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
from matplotlib.cbook import get_sample_data
import matplotlib.gridspec as gridspec
import numpy as np
def get_ordered_list_of_bitstrings(num_qubits):
bitstrings = []
for i in range(2**num_qubits):
bitstring = "{0:b}".format(i)
while len(bitstring) < num_qubits:
bitstring = "0" + bitstring
bitstrings.append(bitstring)
return bitstrings
# Insert the path to your JSON file here
with open('./examples/qcbm-example.json') as f:
data = json.load(f)
# Extract lists of energies, bond lengths, and basis sets.
distances = []
minimum_distances = []
bistring_distributions = []
current_minimum = 100000
for step_id in data:
step = data[step_id]
if step["class"] == "optimize-variational-qcbm-circuit":
ordered_bitstrings = get_ordered_list_of_bitstrings(int(step["inputParam:n-qubits"]))
for evaluation in step["optimization-results"]["history"]:
distances.append(evaluation["value"])
current_minimum = min(current_minimum, evaluation["value"])
minimum_distances.append(current_minimum)
bitstring_dist = []
for key in ordered_bitstrings:
try:
bitstring_dist.append(evaluation["bitstring_distribution"][key])
except:
bitstring_dist.append(0)
bistring_distributions.append(bitstring_dist)
fig = plt.figure(figsize=(16,8))
gs = gridspec.GridSpec(nrows=8, ncols=3, width_ratios=[16,1,1])
ax1 = fig.add_subplot(gs[:4,0])
ax2 = fig.add_subplot(gs[5:,0])
axs = [fig.add_subplot(gs[i,1]) for i in range(8)] + [fig.add_subplot(gs[i,2]) for i in range(8)]
evals = []
plotted_distances = []
plotted_min_distances = []
line_widths = []
images = [np.array([0,0,0,0]),
np.array([0,0,0,1]),
np.array([0,0,1,0]),
np.array([0,0,1,1]),
np.array([0,1,0,0]),
np.array([0,1,0,1]),
np.array([0,1,1,0]),
np.array([0,1,1,1]),
np.array([1,0,0,0]),
np.array([1,0,0,1]),
np.array([1,0,1,0]),
np.array([1,0,1,1]),
np.array([1,1,0,0]),
np.array([1,1,0,1]),
np.array([1,1,1,0]),
np.array([1,1,1,1])]
def animate(i):
evals.append(i)
plotted_distances.append(distances[i])
plotted_min_distances.append(minimum_distances[i])
line_widths.append(1)
ax1.clear()
ax1.set(xlabel='Evaluation Index', ylabel='Clipped negative log-likelihood cost function')
ax1.set_ylim([1.5, 3.5])
ax1.scatter(evals, plotted_distances, color="green", linewidths=line_widths, marker=".")
ax1.plot(evals, plotted_min_distances, color="purple", linewidth=2)
ax2.clear()
ax2.set(xlabel='Bitstring', ylabel='Measured Probability')
ax2.set_ylim([0, .25])
ax2.bar(ordered_bitstrings, bistring_distributions[i], facecolor='green')
if distances[i] == minimum_distances[i]:
normalized_distribution = np.array(bistring_distributions[i])/max(bistring_distributions[i])
for j in range(len(ordered_bitstrings)):
axs[j].clear()
axs[j].set_xticks(np.arange(-.5, 2, 1), minor=True)
axs[j].set_yticks(np.arange(-.5, 2, 1), minor=True)
axs[j].tick_params(axis='x', colors=(0,0,0,0))
axs[j].tick_params(axis='y', colors=(0,0,0,0))
axs[j].grid(which='minor', color='k', linestyle='-', linewidth=2)
fading_factor = normalized_distribution[j]
axs[j].imshow((images[j].reshape((2,2))), alpha=fading_factor, vmin=0, vmax=1, cmap='PiYG')
return tuple([ax1, ax2] + axs)
anim = FuncAnimation(fig, animate, frames=700, interval=1)
# # Set up formatting for the movie files
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
# anim.save('qcbm_opt_700_iterations.mp4', writer=writer)
plt.show()
```
#### File: zquantum/qcbm/ansatz_test.py
```python
import unittest
import numpy as np
import cirq
import itertools
from pyquil import Program
import pyquil.gates
from zquantum.core.circuit import Circuit, Qubit, Gate
from zquantum.core.utils import compare_unitary, RNDSEED
from .ansatz import ( get_qcbm_ansatz, build_qcbm_circuit_ion_trap, get_single_qubit_layer,
get_all_topology, get_line_topology, get_entangling_layer, generate_random_initial_params )
class TestQCBMAnsatz(unittest.TestCase):
def test_get_qcbm_ansatz(self):
# Given
n_qubits = 5
n_layers = 2
topology = 'all'
target_ansatz = {'ansatz_type': 'QCBM_ion_trap',
'ansatz_module': 'zquantum.qcbm.ansatz',
'ansatz_func' : 'build_qcbm_circuit_ion_trap',
'ansatz_kwargs' : {
'n_qubits' : n_qubits,
'n_layers' : n_layers,
'topology' : topology}}
# When
ansatz = get_qcbm_ansatz(n_qubits, n_layers, topology)
# Then
self.assertEqual(ansatz, target_ansatz)
def test_get_single_qubit_layer_wrong_num_params(self):
# Given
single_qubit_gates = ["Ry"]
n_qubits = 2
params = np.ones(3)
# When/Then
self.assertRaises(AssertionError, lambda: get_single_qubit_layer(params, n_qubits, single_qubit_gates))
def test_get_single_qubit_layer_one_gate(self):
# Given
single_qubit_gates = ["Ry"]
n_qubits_list = [2,3,4,10]
for n_qubits in n_qubits_list:
# Given
params = [x for x in range(0, n_qubits)]
test = cirq.Circuit()
qubits = [cirq.LineQubit(x) for x in range(0, n_qubits)]
for i in range(0, n_qubits):
test.append(cirq.Ry(params[i]).on(qubits[i]))
u_cirq = test._unitary_()
# When
circ = get_single_qubit_layer(params, n_qubits, single_qubit_gates)
unitary = circ.to_cirq()._unitary_()
# Then
self.assertEqual(circ.n_multiqubit_gates, 0)
self.assertEqual(compare_unitary(unitary, u_cirq, tol=1e-10), True)
def test_get_single_qubit_layer_multiple_gates(self):
# Given
single_qubit_gates = ["Ry", "Rx", "Rz"]
n_qubits_list = [2,3,4,10]
for n_qubits in n_qubits_list:
# Given
params = [x for x in range(0, 3*n_qubits)]
test = cirq.Circuit()
qubits = [cirq.LineQubit(x) for x in range(0, n_qubits)]
for i in range(0, n_qubits):
test.append(cirq.Ry(params[i]).on(qubits[i]))
for i in range(0, n_qubits):
test.append(cirq.Rx(params[n_qubits + i]).on(qubits[i]))
for i in range(0, n_qubits):
test.append(cirq.Rz(params[2*n_qubits + i]).on(qubits[i]))
u_cirq = test._unitary_()
# When
circ = get_single_qubit_layer(params, n_qubits, single_qubit_gates)
unitary = circ.to_cirq()._unitary_()
# Then
self.assertEqual(circ.n_multiqubit_gates, 0)
self.assertEqual(compare_unitary(unitary, u_cirq, tol=1e-10), True)
def test_get_all_topology(self):
# Given
n_qubits = 4
static_entangler = "XX"
topology = "all"
params = np.asarray([0,0,0,0,0,0])
# When
ent_layer = get_all_topology(params, n_qubits, static_entangler)
# Then
for gate in ent_layer.gates:
self.assertTrue(gate.name, "XX")
# XX on 0, 1
self.assertEqual(ent_layer.gates[0].qubits[0].index, 0)
self.assertEqual(ent_layer.gates[0].qubits[1].index, 1)
# XX on 0, 2
self.assertEqual(ent_layer.gates[1].qubits[0].index, 0)
self.assertEqual(ent_layer.gates[1].qubits[1].index, 2)
# XX on 0, 3
self.assertEqual(ent_layer.gates[2].qubits[0].index, 0)
self.assertEqual(ent_layer.gates[2].qubits[1].index, 3)
# XX on 1, 2
self.assertEqual(ent_layer.gates[3].qubits[0].index, 1)
self.assertEqual(ent_layer.gates[3].qubits[1].index, 2)
# XX on 1, 3
self.assertEqual(ent_layer.gates[4].qubits[0].index, 1)
self.assertEqual(ent_layer.gates[4].qubits[1].index, 3)
# XX on 2, 3
self.assertEqual(ent_layer.gates[5].qubits[0].index, 2)
self.assertEqual(ent_layer.gates[5].qubits[1].index, 3)
def test_get_line_topology(self):
# Given
n_qubits = 4
single_qubit_gate = "Rx"
static_entangler = "XX"
topology = "line"
params = np.asarray([0,0,0])
# When
ent_layer = get_line_topology(params, n_qubits, static_entangler)
# Then
for gate in ent_layer.gates:
self.assertTrue(gate.name, "XX")
# XX on 0, 1
self.assertEqual(ent_layer.gates[0].qubits[0].index, 0)
self.assertEqual(ent_layer.gates[0].qubits[1].index, 1)
# XX on 1, 2
self.assertEqual(ent_layer.gates[1].qubits[0].index, 1)
self.assertEqual(ent_layer.gates[1].qubits[1].index, 2)
# XX on 2, 3
self.assertEqual(ent_layer.gates[2].qubits[0].index, 2)
self.assertEqual(ent_layer.gates[2].qubits[1].index, 3)
def test_get_entangling_layer_toplogy_supported(self):
# Given
n_qubits_list = [2,3,4,5]
single_qubit_gate = "Rx"
static_entangler = "XX"
topology = "all"
for n_qubits in n_qubits_list:
# Given
params = np.zeros((int((n_qubits*(n_qubits-1))/2)))
all_topology_layer = get_all_topology(params, n_qubits, static_entangler)
# When
entangling_layer = get_entangling_layer(params, n_qubits, static_entangler, topology)
# Then
self.assertEqual(all_topology_layer, entangling_layer)
# Given
topology = "line"
for n_qubits in n_qubits_list:
# Given
params = np.zeros((n_qubits-1))
line_topology_layer = get_line_topology(params, n_qubits, static_entangler)
# When
entangling_layer = get_entangling_layer(params, n_qubits, static_entangler, topology)
# Then
self.assertEqual(line_topology_layer, entangling_layer)
def test_get_entangling_layer_toplogy_not_supported(self):
# Given
n_qubits = 2
single_qubit_gate = "Rx"
static_entangler = "XX"
topology = "NOT SUPPORTED"
params = np.zeros(1)
# When
self.assertRaises(RuntimeError, lambda: get_entangling_layer(params, n_qubits, static_entangler, topology))
def test_build_qcbm_circuit_iontrap_too_many_parameters(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits)]
params = np.concatenate(params)
topology = "all"
n_layers = 2
# When/Then
self.assertRaises(RuntimeError, lambda: build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology))
def test_build_qcbm_circuit_iontrap_one_layer(self):
# Given
n_qubits = 4
params = [np.ones(n_qubits)]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
expected_circuit = Circuit(expected_pycircuit)
params = np.concatenate(params)
n_layers = 1
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_build_qcbm_circuit_iontrap_two_layers(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2))]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[0][i+n_qubits], i))
expected_circuit = Circuit(expected_pycircuit)
expected_circuit += get_entangling_layer(params[1], n_qubits, "XX", topology)
params = np.concatenate(params)
n_layers = 2
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_build_qcbm_circuit_iontrap_three_layers(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits)]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[0][i+n_qubits], i))
expected_first_layer = Circuit(expected_pycircuit)
expected_second_layer = get_entangling_layer(params[1], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[2][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i+n_qubits], i))
expected_third_layer = Circuit(expected_pycircuit)
expected_circuit = expected_first_layer + expected_second_layer + expected_third_layer
params = np.concatenate(params)
n_layers = 3
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_build_qcbm_circuit_iontrap_four_layers(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(3*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2))]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[0][i+n_qubits], i))
expected_first_layer = Circuit(expected_pycircuit)
expected_second_layer = get_entangling_layer(params[1], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[2][i+n_qubits], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i+2*n_qubits], i))
expected_third_layer = Circuit(expected_pycircuit)
expected_fourth_layer = get_entangling_layer(params[3], n_qubits, "XX", topology)
expected_circuit = expected_first_layer + expected_second_layer + expected_third_layer + expected_fourth_layer
params = np.concatenate(params)
n_layers = 4
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_build_qcbm_circuit_iontrap_five_layers(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(3*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits)]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[0][i+n_qubits], i))
expected_first_layer = Circuit(expected_pycircuit)
expected_second_layer = get_entangling_layer(params[1], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[2][i+n_qubits], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i+2*n_qubits], i))
expected_third_layer = Circuit(expected_pycircuit)
expected_fourth_layer = get_entangling_layer(params[3], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[4][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[4][i+n_qubits], i))
expected_fifth_layer = Circuit(expected_pycircuit)
expected_circuit = expected_first_layer + expected_second_layer + expected_third_layer + expected_fourth_layer + expected_fifth_layer
params = np.concatenate(params)
n_layers = 5
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_build_qcbm_circuit_iontrap_six_layers(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(3*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2))]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[0][i+n_qubits], i))
expected_first_layer = Circuit(expected_pycircuit)
expected_second_layer = get_entangling_layer(params[1], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[2][i+n_qubits], i))
expected_third_layer = Circuit(expected_pycircuit)
expected_fourth_layer = get_entangling_layer(params[3], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[4][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[4][i+n_qubits], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[4][i+2*n_qubits], i))
expected_fifth_layer = Circuit(expected_pycircuit)
expected_sixth_layer = get_entangling_layer(params[5], n_qubits, "XX", topology)
expected_circuit = expected_first_layer + expected_second_layer + expected_third_layer + expected_fourth_layer + expected_fifth_layer + expected_sixth_layer
params = np.concatenate(params)
n_layers = 6
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_build_qcbm_circuit_iontrap_seven_layers(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(3*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits)]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[0][i+n_qubits], i))
expected_first_layer = Circuit(expected_pycircuit)
expected_second_layer = get_entangling_layer(params[1], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[2][i+n_qubits], i))
expected_third_layer = Circuit(expected_pycircuit)
expected_fourth_layer = get_entangling_layer(params[3], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[4][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[4][i+n_qubits], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[4][i+2*n_qubits], i))
expected_fifth_layer = Circuit(expected_pycircuit)
expected_sixth_layer = get_entangling_layer(params[5], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[6][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[6][i+n_qubits], i))
expected_seventh_layer = Circuit(expected_pycircuit)
expected_circuit = expected_first_layer + expected_second_layer + expected_third_layer + expected_fourth_layer + expected_fifth_layer + expected_sixth_layer + expected_seventh_layer
params = np.concatenate(params)
n_layers = 7
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_build_qcbm_circuit_iontrap_eight_layers(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(3*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2))]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[0][i+n_qubits], i))
expected_first_layer = Circuit(expected_pycircuit)
expected_second_layer = get_entangling_layer(params[1], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[2][i+n_qubits], i))
expected_third_layer = Circuit(expected_pycircuit)
expected_fourth_layer = get_entangling_layer(params[3], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[4][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[4][i+n_qubits], i))
expected_fifth_layer = Circuit(expected_pycircuit)
expected_sixth_layer = get_entangling_layer(params[5], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[6][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[6][i+n_qubits], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[6][i+2*n_qubits], i))
expected_seventh_layer = Circuit(expected_pycircuit)
expected_eigth_layer = get_entangling_layer(params[7], n_qubits, "XX", topology)
expected_circuit = expected_first_layer + expected_second_layer + expected_third_layer + expected_fourth_layer + expected_fifth_layer + expected_sixth_layer + expected_seventh_layer + expected_eigth_layer
params = np.concatenate(params)
n_layers = 8
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_build_qcbm_circuit_iontrap_nine_layers(self):
# Given
n_qubits = 4
params = [np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(3*n_qubits), np.ones(int((n_qubits*(n_qubits-1))/2)), np.ones(2*n_qubits)]
topology = "all"
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[0][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[0][i+n_qubits], i))
expected_first_layer = Circuit(expected_pycircuit)
expected_second_layer = get_entangling_layer(params[1], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[2][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[2][i+n_qubits], i))
expected_third_layer = Circuit(expected_pycircuit)
expected_fourth_layer = get_entangling_layer(params[3], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[4][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[4][i+n_qubits], i))
expected_fifth_layer = Circuit(expected_pycircuit)
expected_sixth_layer = get_entangling_layer(params[5], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[6][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[6][i+n_qubits], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[6][i+2*n_qubits], i))
expected_seventh_layer = Circuit(expected_pycircuit)
expected_eigth_layer = get_entangling_layer(params[7], n_qubits, "XX", topology)
expected_pycircuit = Program()
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RZ(params[8][i], i))
for i in range(n_qubits):
expected_pycircuit += Program(pyquil.gates.RX(params[8][i+n_qubits], i))
expected_ninth_layer = Circuit(expected_pycircuit)
expected_circuit = expected_first_layer + expected_second_layer + expected_third_layer + expected_fourth_layer + expected_fifth_layer + expected_sixth_layer + expected_seventh_layer + expected_eigth_layer + expected_ninth_layer
params = np.concatenate(params)
n_layers = 9
# When
circuit = build_qcbm_circuit_ion_trap(params, n_qubits, n_layers, topology)
# Then
self.assertEqual(circuit, expected_circuit)
def test_generate_random_params_all_toplogy(self):
# Given
n_qubits = 4
n_layers = 2
topology = "all"
# When
params = generate_random_initial_params(n_qubits, n_layers, topology, seed=RNDSEED)
# Then
self.assertEqual(len(params), 2*n_qubits+int((n_qubits*(n_qubits-1))/2))
def test_generate_random_params_line_toplogy(self):
# Given
n_qubits = 4
n_layers = 2
topology = "line"
# When
params = generate_random_initial_params(n_qubits, n_layers, topology, seed=RNDSEED)
# Then
self.assertEqual(len(params), 2*n_qubits+n_qubits-1)
def test_generate_random_params_toplogy_not_supported(self):
# Given
n_qubits = 4
n_layers = 2
topology = "NOT SUPPORTED"
# When/Then
self.assertRaises(RuntimeError, lambda: generate_random_initial_params(n_qubits, n_layers, topology, seed=RNDSEED))
def test_generate_random_params_one_layer(self):
# Given
n_qubits = 4
topology = "line"
n_layers = 1
# When
params = generate_random_initial_params(n_qubits, n_layers, topology, seed=RNDSEED)
# Then
self.assertEqual(len(params), n_qubits)
def test_generate_random_params_two_layers(self):
# Given
n_qubits = 4
topology = "line"
n_layers = 2
# When
params = generate_random_initial_params(n_qubits, n_layers, topology, seed=RNDSEED)
# Then
self.assertEqual(len(params), 2*n_qubits+n_qubits-1)
def test_generate_random_params_three_layers(self):
# Given
n_qubits = 4
topology = "line"
n_layers = 3
# When
params = generate_random_initial_params(n_qubits, n_layers, topology, seed=RNDSEED)
# Then
self.assertEqual(len(params), 4*n_qubits+n_qubits-1)
def test_generate_random_params_even_layers(self):
# Given
n_qubits = 4
topology = "line"
n_layers_list = [4, 6, 8, 10, 12]
for n_layers in n_layers_list:
expected_num_params = n_qubits*n_layers + (n_qubits-1)*int(n_layers/2) + n_qubits
# When
params = generate_random_initial_params(n_qubits, n_layers, topology, seed=RNDSEED)
# Then
self.assertEqual(len(params), expected_num_params)
def test_generate_random_params_odd_layers(self):
# Given
n_qubits = 4
topology = "line"
n_layers_list = [5, 7, 9, 11]
for n_layers in n_layers_list:
expected_num_params = n_qubits*n_layers + (n_qubits-1)*int(n_layers/2) + 2*n_qubits
# When
params = generate_random_initial_params(n_qubits, n_layers, topology, seed=RNDSEED)
# Then
self.assertEqual(len(params), expected_num_params)
``` |
{
"source": "Jmallone/CompiladorTPP",
"score": 2
} |
#### File: BCC__BCC36B__P[1]__Michel_1858351/implementacao/lexica.py
```python
import ply.lex as lex
import sys
import re
tokens = (
'NUM_INTEIRO',
'MAIS',
'MENOS',
'MULTIPLICACAO',
'DIVISAO',
'DOIS_PONTOS',
'VIRGULA',
'MENOR',
'MAIOR',
'IGUAL',
'DIFERENTE',
'FIM',
'MENOR_IGUAL',
'MAIOR_IGUAL',
'E_LOGICO',
'OU_LOGICO',
'NEGACAO',
'ABRE_PARENTESE',
'FECHA_PARENTESE',
'ABRE_COLCHETE',
'FECHA_COLCHETE',
'SE',
'ENTAO',
'SENAO',
'REPITA',
'ATE',
'ATRIBUICAO',
'LEIA',
'ESCREVA',
'RETORNA',
'INTEIRO',
'FLUTUANTE',
'NUM_PONTO_FLUTUANTE',
'NUM_NOTACAO_CIENTIFICA',
'ID',
'ABRE_CHAVES',
'FECHA_CHAVES',
'COMENTARIO'
)
t_FIM = r'fim'
t_SE = r'(se)'
t_SENAO = r'(senão)'
t_ATE = r'até'
t_LEIA = r'leia'
t_ENTAO = r'então'
t_REPITA = r'repita'
t_ESCREVA = r'escreva'
t_RETORNA = r'retorna'
t_INTEIRO = r'inteiro'
t_FLUTUANTE = r'flutuante'
#t_MAIS = r'\+'
#t_MENOS = r'-'
t_MENOR = r'<'
t_MAIOR = r'>'
t_IGUAL = r'=='
t_DIVISAO = r'/'
t_VIRGULA = r','
t_NEGACAO = r'!'
t_E_LOGICO = r'&&'
t_DIFERENTE = r'<>'
t_OU_LOGICO = r'\|\|'
t_ATRIBUICAO = r':='
t_DOIS_PONTOS = r':'
t_MENOR_IGUAL = r'<='
t_MAIOR_IGUAL = r'>='
#t_FECHA_CHAVES = r'\}'
#t_ABRE_CHAVES = r'\{'
t_COMENTARIO = r'\{[\w\n\r\s\t]+.*\}'
t_MULTIPLICACAO = r'\*'
t_ABRE_COLCHETE = r'\['
t_FECHA_COLCHETE = r'\]'
t_ABRE_PARENTESE = r'\('
t_FECHA_PARENTESE = r'\)'
t_ID = r'\w+'
def t_NUM_NOTACAO_CIENTIFICA(t):
r'\d+\^+\d+'
t.value = t.value
return t
def t_NUM_PONTO_FLUTUANTE(t):
r'\d+\.\d*'
t.value = float(t.value)
return t
def t_NUM_INTEIRO(t):
#r'\d+'
#r'^[-+]?\d+$'
#r'[^\d][-+]?\d+'
#r'[^\d|^(][-]?\d+|[^\d+(\+)]\d+'
#r'[^\d+|^(][-]?\d+|\d+'
#r'((:=)?(\s)?(\+|-)?(\d+))'
#t.value = t.value.replace(":=","")
#r'((\D)[+-]\d+)|\d+'
r'((?<=\D)[+-]\d+)|\d+'
t.value = int(t.value)
return t
def t_MAIS(t):
#r'\+'
r'\+'
return t
def t_MENOS(t):
r'-'
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
t_ignore = ' \t'
def t_error(t):
print("Caractere Ilegal '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
def proxToken(data):
lexer.input(data)
while True:
tok = lexer.token()
if not tok:
break
print(tok.type, tok.value)
#print(tok.type)
def main():
f = open(sys.argv[1])
data = str(f.read())
proxToken(data)
if __name__ == '__main__':
main()
``` |
{
"source": "jmalobicky/azure-sdk-for-python",
"score": 2
} |
#### File: batch/models/cloud_service_configuration.py
```python
from msrest.serialization import Model
class CloudServiceConfiguration(Model):
"""The configuration for nodes in a pool based on the Azure Cloud Services
platform.
Variables are only populated by the server, and will be ignored when
sending a request.
:param os_family: The Azure Guest OS family to be installed on the virtual
machines in the pool. Possible values are: 2 - OS Family 2, equivalent to
Windows Server 2008 R2 SP1. 3 - OS Family 3, equivalent to Windows Server
2012. 4 - OS Family 4, equivalent to Windows Server 2012 R2. 5 - OS Family
5, equivalent to Windows Server 2016. For more information, see Azure
Guest OS Releases
(https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases).
:type os_family: str
:param target_os_version: The Azure Guest OS version to be installed on
the virtual machines in the pool. The default value is * which specifies
the latest operating system version for the specified OS family.
:type target_os_version: str
:ivar current_os_version: The Azure Guest OS Version currently installed
on the virtual machines in the pool. This may differ from targetOSVersion
if the pool state is Upgrading. In this case some virtual machines may be
on the targetOSVersion and some may be on the currentOSVersion during the
upgrade process. Once all virtual machines have upgraded, currentOSVersion
is updated to be the same as targetOSVersion.
:vartype current_os_version: str
"""
_validation = {
'os_family': {'required': True},
'current_os_version': {'readonly': True},
}
_attribute_map = {
'os_family': {'key': 'osFamily', 'type': 'str'},
'target_os_version': {'key': 'targetOSVersion', 'type': 'str'},
'current_os_version': {'key': 'currentOSVersion', 'type': 'str'},
}
def __init__(self, os_family, target_os_version=None):
super(CloudServiceConfiguration, self).__init__()
self.os_family = os_family
self.target_os_version = target_os_version
self.current_os_version = None
```
#### File: batch/models/job_release_task.py
```python
from msrest.serialization import Model
class JobReleaseTask(Model):
"""A Job Release task to run on job completion on any compute node where the
job has run.
The Job Release task runs when the job ends, because of one of the
following: The user calls the Terminate Job API, or the Delete Job API
while the job is still active, the job's maximum wall clock time constraint
is reached, and the job is still active, or the job's Job Manager task
completed, and the job is configured to terminate when the Job Manager
completes. The Job Release task runs on each compute node where tasks of
the job have run and the Job Preparation task ran and completed. If you
reimage a compute node after it has run the Job Preparation task, and the
job ends without any further tasks of the job running on that compute node
(and hence the Job Preparation task does not re-run), then the Job Release
task does not run on that node. If a compute node reboots while the Job
Release task is still running, the Job Release task runs again when the
compute node starts up. The job is not marked as complete until all Job
Release tasks have completed. The Job Release task runs in the background.
It does not occupy a scheduling slot; that is, it does not count towards
the maxTasksPerNode limit specified on the pool.
:param id: A string that uniquely identifies the Job Release task within
the job. The ID can contain any combination of alphanumeric characters
including hyphens and underscores and cannot contain more than 64
characters. If you do not specify this property, the Batch service assigns
a default value of 'jobrelease'. No other task in the job can have the
same ID as the Job Release task. If you try to submit a task with the same
id, the Batch service rejects the request with error code
TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the
HTTP status code is 409 (Conflict).
:type id: str
:param command_line: The command line of the Job Release task. The command
line does not run under a shell, and therefore cannot take advantage of
shell features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux. If the command line refers to file paths, it should
use a relative path (relative to the task working directory), or use the
Batch provided environment variable
(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
:type command_line: str
:param container_settings: The settings for the container under which the
Job Release task runs. When this is specified, all directories recursively
below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on
the node) are mapped into the container, all task environment variables
are mapped into the container, and the task command line is executed in
the container.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. Files listed
under this element are located in the task's working directory.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param environment_settings: A list of environment variable settings for
the Job Release task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param max_wall_clock_time: The maximum elapsed time that the Job Release
task may run on a given compute node, measured from the time the task
starts. If the task does not complete within the time limit, the Batch
service terminates it. The default value is 15 minutes. You may not
specify a timeout longer than 15 minutes. If you do, the Batch service
rejects it with an error; if you are calling the REST API directly, the
HTTP status code is 400 (Bad Request).
:type max_wall_clock_time: timedelta
:param retention_time: The minimum time to retain the task directory for
the Job Release task on the compute node. After this time, the Batch
service may delete the task directory and all its contents. The default is
infinite, i.e. the task directory will be retained until the compute node
is removed or reimaged.
:type retention_time: timedelta
:param user_identity: The user identity under which the Job Release task
runs. If omitted, the task runs as a non-administrative user unique to the
task.
:type user_identity: ~azure.batch.models.UserIdentity
"""
_validation = {
'command_line': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'},
'retention_time': {'key': 'retentionTime', 'type': 'duration'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
}
def __init__(self, command_line, id=None, container_settings=None, resource_files=None, environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None):
super(JobReleaseTask, self).__init__()
self.id = id
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.environment_settings = environment_settings
self.max_wall_clock_time = max_wall_clock_time
self.retention_time = retention_time
self.user_identity = user_identity
```
#### File: batch/models/job_update_parameter.py
```python
from msrest.serialization import Model
class JobUpdateParameter(Model):
"""The set of changes to be made to a job.
:param priority: The priority of the job. Priority values can range from
-1000 to 1000, with -1000 being the lowest priority and 1000 being the
highest priority. If omitted, it is set to the default value 0.
:type priority: int
:param constraints: The execution constraints for the job. If omitted, the
constraints are cleared.
:type constraints: ~azure.batch.models.JobConstraints
:param pool_info: The pool on which the Batch service runs the job's
tasks. You may change the pool for a job only when the job is disabled.
The Update Job call will fail if you include the poolInfo element and the
job is not disabled. If you specify an autoPoolSpecification specification
in the poolInfo, only the keepAlive property can be updated, and then only
if the auto pool has a poolLifetimeOption of job.
:type pool_info: ~azure.batch.models.PoolInformation
:param metadata: A list of name-value pairs associated with the job as
metadata. If omitted, it takes the default value of an empty list; in
effect, any existing metadata is deleted.
:type metadata: list[~azure.batch.models.MetadataItem]
:param on_all_tasks_complete: The action the Batch service should take
when all tasks in the job are in the completed state. If omitted, the
completion behavior is set to noaction. If the current value is
terminatejob, this is an error because a job's completion behavior may not
be changed from terminatejob to noaction. You may not change the value
from terminatejob to noaction - that is, once you have engaged automatic
job termination, you cannot turn it off again. If you try to do this, the
request fails and Batch returns status code 400 (Bad Request) and an
'invalid property value' error response. If you do not specify this
element in a PUT request, it is equivalent to passing noaction. This is an
error if the current value is terminatejob. Possible values include:
'noAction', 'terminateJob'
:type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete
"""
_validation = {
'pool_info': {'required': True},
}
_attribute_map = {
'priority': {'key': 'priority', 'type': 'int'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
}
def __init__(self, pool_info, priority=None, constraints=None, metadata=None, on_all_tasks_complete=None):
super(JobUpdateParameter, self).__init__()
self.priority = priority
self.constraints = constraints
self.pool_info = pool_info
self.metadata = metadata
self.on_all_tasks_complete = on_all_tasks_complete
```
#### File: batch/models/pool_endpoint_configuration.py
```python
from msrest.serialization import Model
class PoolEndpointConfiguration(Model):
"""The endpoint configuration for a pool.
:param inbound_nat_pools: A list of inbound NAT pools that can be used to
address specific ports on an individual compute node externally. The
maximum number of inbound NAT pools per Batch pool is 5. If the maximum
number of inbound NAT pools is exceeded the request fails with HTTP status
code 400.
:type inbound_nat_pools: list[~azure.batch.models.InboundNATPool]
"""
_validation = {
'inbound_nat_pools': {'required': True},
}
_attribute_map = {
'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'},
}
def __init__(self, inbound_nat_pools):
super(PoolEndpointConfiguration, self).__init__()
self.inbound_nat_pools = inbound_nat_pools
```
#### File: batch/models/task_id_range.py
```python
from msrest.serialization import Model
class TaskIdRange(Model):
"""A range of task IDs that a task can depend on. All tasks with IDs in the
range must complete successfully before the dependent task can be
scheduled.
The start and end of the range are inclusive. For example, if a range has
start 9 and end 12, then it represents tasks '9', '10', '11' and '12'.
:param start: The first task ID in the range.
:type start: int
:param end: The last task ID in the range.
:type end: int
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'start', 'type': 'int'},
'end': {'key': 'end', 'type': 'int'},
}
def __init__(self, start, end):
super(TaskIdRange, self).__init__()
self.start = start
self.end = end
```
#### File: batch/models/upload_batch_service_logs_result.py
```python
from msrest.serialization import Model
class UploadBatchServiceLogsResult(Model):
"""The result of uploading Batch service log files from a specific compute
node.
:param virtual_directory_name: The virtual directory within Azure Blob
Storage container to which the Batch Service log file(s) will be uploaded.
The virtual directory name is part of the blob name for each log file
uploaded, and it is built based poolId, nodeId and a unique identifier.
:type virtual_directory_name: str
:param number_of_files_uploaded: The number of log files which will be
uploaded.
:type number_of_files_uploaded: int
"""
_validation = {
'virtual_directory_name': {'required': True},
'number_of_files_uploaded': {'required': True},
}
_attribute_map = {
'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'},
'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'},
}
def __init__(self, virtual_directory_name, number_of_files_uploaded):
super(UploadBatchServiceLogsResult, self).__init__()
self.virtual_directory_name = virtual_directory_name
self.number_of_files_uploaded = number_of_files_uploaded
```
#### File: batch/models/user_identity.py
```python
from msrest.serialization import Model
class UserIdentity(Model):
"""The definition of the user identity under which the task is run.
Specify either the userName or autoUser property, but not both. On
CloudServiceConfiguration pools, this user is logged in with the
INTERACTIVE flag. On Windows VirtualMachineConfiguration pools, this user
is logged in with the BATCH flag.
:param user_name: The name of the user identity under which the task is
run. The userName and autoUser properties are mutually exclusive; you must
specify one but not both.
:type user_name: str
:param auto_user: The auto user under which the task is run. The userName
and autoUser properties are mutually exclusive; you must specify one but
not both.
:type auto_user: ~azure.batch.models.AutoUserSpecification
"""
_attribute_map = {
'user_name': {'key': 'username', 'type': 'str'},
'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'},
}
def __init__(self, user_name=None, auto_user=None):
super(UserIdentity, self).__init__()
self.user_name = user_name
self.auto_user = auto_user
```
#### File: contentmoderator/models/classification.py
```python
from msrest.serialization import Model
class Classification(Model):
"""The classification details of the text.
:param category1:
:type category1:
~azure.cognitiveservices.vision.contentmoderator.models.Score
:param category2:
:type category2:
~azure.cognitiveservices.vision.contentmoderator.models.Score
:param category3:
:type category3:
~azure.cognitiveservices.vision.contentmoderator.models.Score
:param review_recommended: The review recommended flag.
:type review_recommended: bool
"""
_attribute_map = {
'category1': {'key': 'Category1', 'type': 'Score'},
'category2': {'key': 'Category2', 'type': 'Score'},
'category3': {'key': 'Category3', 'type': 'Score'},
'review_recommended': {'key': 'ReviewRecommended', 'type': 'bool'},
}
def __init__(self, category1=None, category2=None, category3=None, review_recommended=None):
super(Classification, self).__init__()
self.category1 = category1
self.category2 = category2
self.category3 = category3
self.review_recommended = review_recommended
```
#### File: datamigration/models/connect_to_source_sql_server_task_input_py3.py
```python
from msrest.serialization import Model
class ConnectToSourceSqlServerTaskInput(Model):
"""Input for the task that validates connection to SQL Server and also
validates source server requirements.
All required parameters must be populated in order to send to Azure.
:param source_connection_info: Required. Connection information for Source
SQL Server
:type source_connection_info:
~azure.mgmt.datamigration.models.SqlConnectionInfo
:param check_permissions_group: Permission group for validations. Possible
values include: 'Default', 'MigrationFromSqlServerToAzureDB'
:type check_permissions_group: str or
~azure.mgmt.datamigration.models.ServerLevelPermissionsGroup
:param collect_logins: Flag for whether to collect logins from source
server. Default value: False .
:type collect_logins: bool
:param collect_agent_jobs: Flag for whether to collect agent jobs from
source server. Default value: False .
:type collect_agent_jobs: bool
"""
_validation = {
'source_connection_info': {'required': True},
}
_attribute_map = {
'source_connection_info': {'key': 'sourceConnectionInfo', 'type': 'SqlConnectionInfo'},
'check_permissions_group': {'key': 'checkPermissionsGroup', 'type': 'str'},
'collect_logins': {'key': 'collectLogins', 'type': 'bool'},
'collect_agent_jobs': {'key': 'collectAgentJobs', 'type': 'bool'},
}
def __init__(self, *, source_connection_info, check_permissions_group=None, collect_logins: bool=False, collect_agent_jobs: bool=False, **kwargs) -> None:
super(ConnectToSourceSqlServerTaskInput, self).__init__(**kwargs)
self.source_connection_info = source_connection_info
self.check_permissions_group = check_permissions_group
self.collect_logins = collect_logins
self.collect_agent_jobs = collect_agent_jobs
```
#### File: datamigration/models/database_py3.py
```python
from msrest.serialization import Model
class Database(Model):
"""Information about a single database.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Unique identifier for the database
:vartype id: str
:ivar name: Name of the database
:vartype name: str
:ivar compatibility_level: SQL Server compatibility level of database.
Possible values include: 'CompatLevel80', 'CompatLevel90',
'CompatLevel100', 'CompatLevel110', 'CompatLevel120', 'CompatLevel130',
'CompatLevel140'
:vartype compatibility_level: str or
~azure.mgmt.datamigration.models.DatabaseCompatLevel
:ivar collation: Collation name of the database
:vartype collation: str
:ivar server_name: Name of the server
:vartype server_name: str
:ivar fqdn: Fully qualified name
:vartype fqdn: str
:ivar install_id: Install id of the database
:vartype install_id: str
:ivar server_version: Version of the server
:vartype server_version: str
:ivar server_edition: Edition of the server
:vartype server_edition: str
:ivar server_level: Product level of the server (RTM, SP, CTP).
:vartype server_level: str
:ivar server_default_data_path: Default path of the data files
:vartype server_default_data_path: str
:ivar server_default_log_path: Default path of the log files
:vartype server_default_log_path: str
:ivar server_default_backup_path: Default path of the backup folder
:vartype server_default_backup_path: str
:ivar server_core_count: Number of cores on the server
:vartype server_core_count: int
:ivar server_visible_online_core_count: Number of cores on the server that
have VISIBLE ONLINE status
:vartype server_visible_online_core_count: int
:ivar database_state: State of the database. Possible values include:
'Online', 'Restoring', 'Recovering', 'RecoveryPending', 'Suspect',
'Emergency', 'Offline', 'Copying', 'OfflineSecondary'
:vartype database_state: str or
~azure.mgmt.datamigration.models.DatabaseState
:ivar server_id: The unique Server Id
:vartype server_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'compatibility_level': {'readonly': True},
'collation': {'readonly': True},
'server_name': {'readonly': True},
'fqdn': {'readonly': True},
'install_id': {'readonly': True},
'server_version': {'readonly': True},
'server_edition': {'readonly': True},
'server_level': {'readonly': True},
'server_default_data_path': {'readonly': True},
'server_default_log_path': {'readonly': True},
'server_default_backup_path': {'readonly': True},
'server_core_count': {'readonly': True},
'server_visible_online_core_count': {'readonly': True},
'database_state': {'readonly': True},
'server_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'compatibility_level': {'key': 'compatibilityLevel', 'type': 'str'},
'collation': {'key': 'collation', 'type': 'str'},
'server_name': {'key': 'serverName', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
'install_id': {'key': 'installId', 'type': 'str'},
'server_version': {'key': 'serverVersion', 'type': 'str'},
'server_edition': {'key': 'serverEdition', 'type': 'str'},
'server_level': {'key': 'serverLevel', 'type': 'str'},
'server_default_data_path': {'key': 'serverDefaultDataPath', 'type': 'str'},
'server_default_log_path': {'key': 'serverDefaultLogPath', 'type': 'str'},
'server_default_backup_path': {'key': 'serverDefaultBackupPath', 'type': 'str'},
'server_core_count': {'key': 'serverCoreCount', 'type': 'int'},
'server_visible_online_core_count': {'key': 'serverVisibleOnlineCoreCount', 'type': 'int'},
'database_state': {'key': 'databaseState', 'type': 'str'},
'server_id': {'key': 'serverId', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(Database, self).__init__(**kwargs)
self.id = None
self.name = None
self.compatibility_level = None
self.collation = None
self.server_name = None
self.fqdn = None
self.install_id = None
self.server_version = None
self.server_edition = None
self.server_level = None
self.server_default_data_path = None
self.server_default_log_path = None
self.server_default_backup_path = None
self.server_core_count = None
self.server_visible_online_core_count = None
self.database_state = None
self.server_id = None
```
#### File: datamigration/models/migration_validation_database_summary_result.py
```python
from msrest.serialization import Model
class MigrationValidationDatabaseSummaryResult(Model):
"""Migration Validation Database level summary result.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Result identifier
:vartype id: str
:ivar migration_id: Migration Identifier
:vartype migration_id: str
:ivar source_database_name: Name of the source database
:vartype source_database_name: str
:ivar target_database_name: Name of the target database
:vartype target_database_name: str
:ivar started_on: Validation start time
:vartype started_on: datetime
:ivar ended_on: Validation end time
:vartype ended_on: datetime
:ivar status: Current status of validation at the database level. Possible
values include: 'Default', 'NotStarted', 'Initialized', 'InProgress',
'Completed', 'CompletedWithIssues', 'Failed', 'Stopped'
:vartype status: str or ~azure.mgmt.datamigration.models.ValidationStatus
"""
_validation = {
'id': {'readonly': True},
'migration_id': {'readonly': True},
'source_database_name': {'readonly': True},
'target_database_name': {'readonly': True},
'started_on': {'readonly': True},
'ended_on': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'migration_id': {'key': 'migrationId', 'type': 'str'},
'source_database_name': {'key': 'sourceDatabaseName', 'type': 'str'},
'target_database_name': {'key': 'targetDatabaseName', 'type': 'str'},
'started_on': {'key': 'startedOn', 'type': 'iso-8601'},
'ended_on': {'key': 'endedOn', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(self, **kwargs):
super(MigrationValidationDatabaseSummaryResult, self).__init__(**kwargs)
self.id = None
self.migration_id = None
self.source_database_name = None
self.target_database_name = None
self.started_on = None
self.ended_on = None
self.status = None
```
#### File: datamigration/models/query_execution_result.py
```python
from msrest.serialization import Model
class QueryExecutionResult(Model):
"""Describes query analysis results for execution in source and target.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar query_text: Query text retrieved from the source server
:vartype query_text: str
:ivar statements_in_batch: Total no. of statements in the batch
:vartype statements_in_batch: long
:ivar source_result: Query analysis result from the source
:vartype source_result:
~azure.mgmt.datamigration.models.ExecutionStatistics
:ivar target_result: Query analysis result from the target
:vartype target_result:
~azure.mgmt.datamigration.models.ExecutionStatistics
"""
_validation = {
'query_text': {'readonly': True},
'statements_in_batch': {'readonly': True},
'source_result': {'readonly': True},
'target_result': {'readonly': True},
}
_attribute_map = {
'query_text': {'key': 'queryText', 'type': 'str'},
'statements_in_batch': {'key': 'statementsInBatch', 'type': 'long'},
'source_result': {'key': 'sourceResult', 'type': 'ExecutionStatistics'},
'target_result': {'key': 'targetResult', 'type': 'ExecutionStatistics'},
}
def __init__(self, **kwargs):
super(QueryExecutionResult, self).__init__(**kwargs)
self.query_text = None
self.statements_in_batch = None
self.source_result = None
self.target_result = None
```
#### File: datamigration/models/schema_comparison_validation_result_type.py
```python
from msrest.serialization import Model
class SchemaComparisonValidationResultType(Model):
"""Description about the errors happen while performing migration validation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar object_name: Name of the object that has the difference
:vartype object_name: str
:ivar object_type: Type of the object that has the difference. e.g
(Table/View/StoredProcedure). Possible values include: 'StoredProcedures',
'Table', 'User', 'View', 'Function'
:vartype object_type: str or ~azure.mgmt.datamigration.models.ObjectType
:ivar update_action: Update action type with respect to target. Possible
values include: 'DeletedOnTarget', 'ChangedOnTarget', 'AddedOnTarget'
:vartype update_action: str or
~azure.mgmt.datamigration.models.UpdateActionType
"""
_validation = {
'object_name': {'readonly': True},
'object_type': {'readonly': True},
'update_action': {'readonly': True},
}
_attribute_map = {
'object_name': {'key': 'objectName', 'type': 'str'},
'object_type': {'key': 'objectType', 'type': 'str'},
'update_action': {'key': 'updateAction', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SchemaComparisonValidationResultType, self).__init__(**kwargs)
self.object_name = None
self.object_type = None
self.update_action = None
```
#### File: datamigration/models/validation_error_py3.py
```python
from msrest.serialization import Model
class ValidationError(Model):
"""Description about the errors happen while performing migration validation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar text: Error Text
:vartype text: str
:ivar severity: Severity of the error. Possible values include: 'Message',
'Warning', 'Error'
:vartype severity: str or ~azure.mgmt.datamigration.models.Severity
"""
_validation = {
'text': {'readonly': True},
'severity': {'readonly': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(ValidationError, self).__init__(**kwargs)
self.text = None
self.severity = None
```
#### File: datamigration/models/wait_statistics.py
```python
from msrest.serialization import Model
class WaitStatistics(Model):
"""Wait statistics gathered during query batch execution.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar wait_type: Type of the Wait
:vartype wait_type: str
:ivar wait_time_ms: Total wait time in millisecond(s). Default value: 0 .
:vartype wait_time_ms: float
:ivar wait_count: Total no. of waits
:vartype wait_count: long
"""
_validation = {
'wait_type': {'readonly': True},
'wait_time_ms': {'readonly': True},
'wait_count': {'readonly': True},
}
_attribute_map = {
'wait_type': {'key': 'waitType', 'type': 'str'},
'wait_time_ms': {'key': 'waitTimeMs', 'type': 'float'},
'wait_count': {'key': 'waitCount', 'type': 'long'},
}
def __init__(self, **kwargs):
super(WaitStatistics, self).__init__(**kwargs)
self.wait_type = None
self.wait_time_ms = None
self.wait_count = None
```
#### File: media/models/akamai_signature_header_authentication_key.py
```python
from msrest.serialization import Model
class AkamaiSignatureHeaderAuthenticationKey(Model):
"""Akamai Signature Header authentication key.
:param identifier: identifier of the key
:type identifier: str
:param base64_key: authentication key
:type base64_key: str
:param expiration: The exact time the authentication key.
:type expiration: datetime
"""
_attribute_map = {
'identifier': {'key': 'identifier', 'type': 'str'},
'base64_key': {'key': 'base64Key', 'type': 'str'},
'expiration': {'key': 'expiration', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(AkamaiSignatureHeaderAuthenticationKey, self).__init__(**kwargs)
self.identifier = kwargs.get('identifier', None)
self.base64_key = kwargs.get('base64_key', None)
self.expiration = kwargs.get('expiration', None)
```
#### File: media/models/audio_analyzer_preset.py
```python
from .preset import Preset
class AudioAnalyzerPreset(Preset):
"""The Audio Analyzer preset applies a pre-defined set of AI-based analysis
operations, including speech transcription. Currently, the preset supports
processing of content with a single audio track.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoAnalyzerPreset
All required parameters must be populated in order to send to Azure.
:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param audio_language: The language for the audio payload in the input
using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list
of supported languages are, 'en-US', 'en-GB', 'es-ES', 'es-MX', 'fr-FR',
'it-IT', 'ja-JP', 'pt-BR', 'zh-CN'.
:type audio_language: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'audio_language': {'key': 'audioLanguage', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'#Microsoft.Media.VideoAnalyzerPreset': 'VideoAnalyzerPreset'}
}
def __init__(self, **kwargs):
super(AudioAnalyzerPreset, self).__init__(**kwargs)
self.audio_language = kwargs.get('audio_language', None)
self.odatatype = '#Microsoft.Media.AudioAnalyzerPreset'
```
#### File: media/models/job_output.py
```python
from msrest.serialization import Model
class JobOutput(Model):
"""Describes all the properties of a JobOutput.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JobOutputAsset
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar error: If the JobOutput is in the Error state, it contains the
details of the error.
:vartype error: ~azure.mgmt.media.models.JobError
:ivar state: Describes the state of the JobOutput. Possible values
include: 'Canceled', 'Canceling', 'Error', 'Finished', 'Processing',
'Queued', 'Scheduled'
:vartype state: str or ~azure.mgmt.media.models.JobState
:ivar progress: If the JobOutput is in a Processing state, this contains
the job completion percentage. The value is an estimate and not intended
to be used to predict job completion times. To determine if the JobOutput
is complete, use the State property.
:vartype progress: int
:param odatatype: Required. Constant filled by server.
:type odatatype: str
"""
_validation = {
'error': {'readonly': True},
'state': {'readonly': True},
'progress': {'readonly': True},
'odatatype': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'JobError'},
'state': {'key': 'state', 'type': 'JobState'},
'progress': {'key': 'progress', 'type': 'int'},
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'#Microsoft.Media.JobOutputAsset': 'JobOutputAsset'}
}
def __init__(self, **kwargs):
super(JobOutput, self).__init__(**kwargs)
self.error = None
self.state = None
self.progress = None
self.odatatype = None
```
#### File: web/models/abnormal_time_period.py
```python
from msrest.serialization import Model
class AbnormalTimePeriod(Model):
"""Class representing Abnormal Time Period identified in diagnosis.
:param start_time: Start time of the downtime
:type start_time: datetime
:param end_time: End time of the downtime
:type end_time: datetime
:param events: List of Possible Cause of downtime
:type events: list[~azure.mgmt.web.models.DetectorAbnormalTimePeriod]
:param solutions: List of proposed solutions
:type solutions: list[~azure.mgmt.web.models.Solution]
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'events': {'key': 'events', 'type': '[DetectorAbnormalTimePeriod]'},
'solutions': {'key': 'solutions', 'type': '[Solution]'},
}
def __init__(self, start_time=None, end_time=None, events=None, solutions=None):
super(AbnormalTimePeriod, self).__init__()
self.start_time = start_time
self.end_time = end_time
self.events = events
self.solutions = solutions
```
#### File: web/models/auto_heal_custom_action.py
```python
from msrest.serialization import Model
class AutoHealCustomAction(Model):
"""Custom action to be executed
when an auto heal rule is triggered.
:param exe: Executable to be run.
:type exe: str
:param parameters: Parameters for the executable.
:type parameters: str
"""
_attribute_map = {
'exe': {'key': 'exe', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
}
def __init__(self, exe=None, parameters=None):
super(AutoHealCustomAction, self).__init__()
self.exe = exe
self.parameters = parameters
```
#### File: web/models/backup_request.py
```python
from .proxy_only_resource import ProxyOnlyResource
class BackupRequest(ProxyOnlyResource):
"""Description of a backup which will be performed.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param backup_request_name: Name of the backup.
:type backup_request_name: str
:param enabled: True if the backup schedule is enabled (must be included
in that case), false if the backup schedule should be disabled.
:type enabled: bool
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param backup_schedule: Schedule for the backup if it is executed
periodically.
:type backup_schedule: ~azure.mgmt.web.models.BackupSchedule
:param databases: Databases included in the backup.
:type databases: list[~azure.mgmt.web.models.DatabaseBackupSetting]
:param backup_request_type: Type of the backup. Possible values include:
'Default', 'Clone', 'Relocation', 'Snapshot'
:type backup_request_type: str or
~azure.mgmt.web.models.BackupRestoreOperationType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'backup_request_name': {'required': True},
'storage_account_url': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_request_name': {'key': 'properties.name', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'backup_schedule': {'key': 'properties.backupSchedule', 'type': 'BackupSchedule'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'backup_request_type': {'key': 'properties.type', 'type': 'BackupRestoreOperationType'},
}
def __init__(self, backup_request_name, storage_account_url, kind=None, enabled=None, backup_schedule=None, databases=None, backup_request_type=None):
super(BackupRequest, self).__init__(kind=kind)
self.backup_request_name = backup_request_name
self.enabled = enabled
self.storage_account_url = storage_account_url
self.backup_schedule = backup_schedule
self.databases = databases
self.backup_request_type = backup_request_type
```
#### File: web/models/host_name.py
```python
from msrest.serialization import Model
class HostName(Model):
"""Details of a hostname derived from a domain.
:param name: Name of the hostname.
:type name: str
:param site_names: List of apps the hostname is assigned to. This list
will have more than one app only if the hostname is pointing to a Traffic
Manager.
:type site_names: list[str]
:param azure_resource_name: Name of the Azure resource the hostname is
assigned to. If it is assigned to a Traffic Manager then it will be the
Traffic Manager name otherwise it will be the app name.
:type azure_resource_name: str
:param azure_resource_type: Type of the Azure resource the hostname is
assigned to. Possible values include: 'Website', 'TrafficManager'
:type azure_resource_type: str or ~azure.mgmt.web.models.AzureResourceType
:param custom_host_name_dns_record_type: Type of the DNS record. Possible
values include: 'CName', 'A'
:type custom_host_name_dns_record_type: str or
~azure.mgmt.web.models.CustomHostNameDnsRecordType
:param host_name_type: Type of the hostname. Possible values include:
'Verified', 'Managed'
:type host_name_type: str or ~azure.mgmt.web.models.HostNameType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'site_names': {'key': 'siteNames', 'type': '[str]'},
'azure_resource_name': {'key': 'azureResourceName', 'type': 'str'},
'azure_resource_type': {'key': 'azureResourceType', 'type': 'AzureResourceType'},
'custom_host_name_dns_record_type': {'key': 'customHostNameDnsRecordType', 'type': 'CustomHostNameDnsRecordType'},
'host_name_type': {'key': 'hostNameType', 'type': 'HostNameType'},
}
def __init__(self, name=None, site_names=None, azure_resource_name=None, azure_resource_type=None, custom_host_name_dns_record_type=None, host_name_type=None):
super(HostName, self).__init__()
self.name = name
self.site_names = site_names
self.azure_resource_name = azure_resource_name
self.azure_resource_type = azure_resource_type
self.custom_host_name_dns_record_type = custom_host_name_dns_record_type
self.host_name_type = host_name_type
```
#### File: web/models/metric_specification.py
```python
from msrest.serialization import Model
class MetricSpecification(Model):
"""Definition of a single resource metric.
:param name:
:type name: str
:param display_name:
:type display_name: str
:param display_description:
:type display_description: str
:param unit:
:type unit: str
:param aggregation_type:
:type aggregation_type: str
:param supports_instance_level_aggregation:
:type supports_instance_level_aggregation: bool
:param enable_regional_mdm_account:
:type enable_regional_mdm_account: bool
:param source_mdm_account:
:type source_mdm_account: str
:param source_mdm_namespace:
:type source_mdm_namespace: str
:param metric_filter_pattern:
:type metric_filter_pattern: str
:param fill_gap_with_zero:
:type fill_gap_with_zero: bool
:param is_internal:
:type is_internal: bool
:param dimensions:
:type dimensions: list[~azure.mgmt.web.models.Dimension]
:param category:
:type category: str
:param availabilities:
:type availabilities: list[~azure.mgmt.web.models.MetricAvailability]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'supports_instance_level_aggregation': {'key': 'supportsInstanceLevelAggregation', 'type': 'bool'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'metric_filter_pattern': {'key': 'metricFilterPattern', 'type': 'str'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'is_internal': {'key': 'isInternal', 'type': 'bool'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
'category': {'key': 'category', 'type': 'str'},
'availabilities': {'key': 'availabilities', 'type': '[MetricAvailability]'},
}
def __init__(self, name=None, display_name=None, display_description=None, unit=None, aggregation_type=None, supports_instance_level_aggregation=None, enable_regional_mdm_account=None, source_mdm_account=None, source_mdm_namespace=None, metric_filter_pattern=None, fill_gap_with_zero=None, is_internal=None, dimensions=None, category=None, availabilities=None):
super(MetricSpecification, self).__init__()
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.supports_instance_level_aggregation = supports_instance_level_aggregation
self.enable_regional_mdm_account = enable_regional_mdm_account
self.source_mdm_account = source_mdm_account
self.source_mdm_namespace = source_mdm_namespace
self.metric_filter_pattern = metric_filter_pattern
self.fill_gap_with_zero = fill_gap_with_zero
self.is_internal = is_internal
self.dimensions = dimensions
self.category = category
self.availabilities = availabilities
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.