metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jpape/daoc-log-parser",
"score": 3
} |
#### File: jpape/daoc-log-parser/app.py
```python
from flask import Flask, request, render_template, Response
from flask_cors import CORS
import json
import combat_parser
import pve_parser
import craft_parser
app = Flask(__name__, static_url_path='', static_folder='dist')
CORS(app)
ALLOWED_EXTENSIONS = set(['txt', 'log'])
API_VERSION = 'v2.0'
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/test', methods=['GET'])
def test_endpoint():
return render_template('test.html')
@app.route('/upload/<string:version>', methods=['POST'])
def capture_upload(version):
if version != API_VERSION:
resp = {}
resp['Errors'] = ['Version mismatch']
return json.dumps(resp)
if 'logfile' not in request.files:
return 'Missing logfile'
file = request.files['logfile']
if file.filename == '' or not allowed_file(file.filename):
return 'No selected file or incorrect file type'
else:
return Response(handle_uploaded_file(file), mimetype='application/json')
def handle_uploaded_file(upload):
error_messages = []
result = {}
result['Combat'] = combat_parser.parse_uploaded_file(upload, error_messages)
result['Crafting'] = craft_parser.parse_crafting(upload, error_messages)
result['PvE'] = pve_parser.parse_pve(upload, error_messages)
result['Messages'] = error_messages
j_result = json.dumps(result)
return j_result
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jpapejr/zoo-HttpAnimal",
"score": 3
} |
#### File: zoo-HttpAnimal/src/main.py
```python
from flask import Flask
import os
import random
app = Flask(__name__)
@app.route('/')
def hello():
return ("(animals can't talk, right?)", 200)
@app.route('/exhibit')
def ex():
if os.getenv('IMGURL') is not None:
return ('<img src="' + os.getenv('IMGURL') + '" height="25%" width="25%" />', 200)
else:
return ('<img src="https://zoologyfoundation.org/wp-content/uploads/woocommerce-placeholder.png" height="25%" width="25%" />', 200)
@app.route('/action')
def action():
if os.getenv('NAME') is not None:
name = os.getenv("NAME")
else:
name = 'animal'
if os.getenv('ACTIONS') is not None:
actionList = os.getenv('ACTIONS').split(',')
random.seed()
selection = random.randint(0, len(actionList) -1)
else:
return ('The animal is not visible right now.', 200)
return ('The ' + name + ' ' + actionList[selection], 200)
``` |
{
"source": "jpapon/minimal_ros_nodes",
"score": 2
} |
#### File: src/cnn_classifier/cnn_classifier.py
```python
import sys, time, os
# numpy
import numpy as np
from scipy.misc import imsave
# OpenCV
import cv2
import tensorflow as tf
import tensorvision.utils as tv_utils
import tensorvision.core as core
# Ros libraries
import rospy
import rospkg
# Ros Messages
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class cnn_classifier:
def __init__(self, save_output = False):
'''Initialize ros publisher, ros subscriber'''
self.save_output_ = save_output
# topic where we publish RGB version of classified result
self.pub_rgb_labels = rospy.Publisher("rgb_labels",Image,queue_size=1)
self.pub_labels = rospy.Publisher("labels",Image,queue_size=1)
self.bridge = CvBridge()
# subscribed Topic
self.subscriber = rospy.Subscriber("image", Image, self.image_callback,
queue_size=1)
rospy.loginfo ("Initializing Network...")
rospack = rospkg.RosPack()
network_path = rospack.get_path('cnn_weights')
self.network_path_ = os.path.join(network_path , 'networks','segnet')
self.hypes_ = tv_utils.load_hypes_from_logdir(self.network_path_)
self.num_classes_ = self.hypes_['arch']['num_classes']
rospy.loginfo("Hypes loaded successfully.")
# Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
self.modules_ = tv_utils.load_modules_from_logdir(self.network_path_)
rospy.loginfo("Modules loaded, building tf graph.")
# Create tf graph and build module.
with tf.Graph().as_default():
# Create placeholder for input
self.image_pl_ = tf.placeholder(tf.float32)
image = tf.expand_dims(self.image_pl_, 0)
self.hypes_['dirs']['data_dir'] = self.network_path_
# build Tensorflow graph using the model from logdir
self.prediction_ = core.build_inference_graph(self.hypes_, self.modules_,
image=image)
rospy.loginfo("Graph built successfully.")
# Create a session for running Ops on the Graph.
self.sess_ = tf.Session()
self.saver_ = tf.train.Saver()
# Load weights from logdir
core.load_weights(self.network_path_, self.sess_, self.saver_)
rospy.loginfo("Weights loaded successfully.")
#Build map for colorizing
self.label_colors_ = {}
self.label_colors_alpha_ = {}
for key in self.hypes_['data'].keys():
if ('_color' in key):
color = np.array(self.hypes_['data'][key])
self.label_colors_[color[0]] = (color[1],color[2],color[3],255)
self.label_colors_alpha_[color[0]] = (color[1],color[2],color[3],128)
rospy.logwarn ("Done loading neural network!")
self.image_count_ = 0
self.output_dir_ = "CNN_test_output"
if not os.path.exists(self.output_dir_):
os.makedirs(self.output_dir_)
#---------------------------------------------------------------------------
# Image subscriber callback.
#---------------------------------------------------------------------------
def image_callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "rgb8")
except CvBridgeError as e:
rospy.logerr( "Error =" + e)
label_im, label_overlay_im = self.classify_image(cv_image)
if (self.save_output_):
imsave (os.path.join(self.output_dir_,
"input_{:05d}.png".format(self.image_count_)),
cv_image)
imsave (os.path.join(self.output_dir_,
"overlay_{:05d}.png".format(self.image_count_)),
label_overlay_im)
rospy.loginfo ("Saved frame {:05d}".format (self.image_count_))
self.image_count_ += 1
try:
self.pub_rgb_labels.publish(self.bridge.cv2_to_imgmsg(label_overlay_im, "rgb8"))
self.pub_labels.publish(self.bridge.cv2_to_imgmsg(label_im, "mono8"))
except CvBridgeError as e:
rospy.logerr( "Error =" + e)
def classify_image (self, input_img):
start = time.time()
#rospy.loginfo ("Classifying image size={}".format(input_img.shape))
# Run KittiSeg model on image
feed = {self.image_pl_: input_img}
softmax = self.prediction_['softmax']
output = self.sess_.run([softmax], feed_dict=feed)
# Reshape output from flat vector to 2D Image
shape = input_img.shape
output = output[0].reshape(shape[0], shape[1], self.num_classes_)
label_im = np.argmax(output, axis = 2).astype(np.uint8)
label_overlay_im = tv_utils.overlay_segmentation(input_img, label_im, self.label_colors_alpha_)
rospy.loginfo ("Time to run neural net on image = {:.3f}s".format(time.time()-start))
return label_im, label_overlay_im
def main(args):
'''Initializes and cleanup ros node'''
rospy.init_node('cnn_classifier', anonymous=True)
save_output = rospy.get_param('save_output', False)
classifier = cnn_classifier(save_output = save_output)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down CNN Classifier module")
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "jparajuli/game_theory_models",
"score": 3
} |
#### File: jparajuli/game_theory_models/brd.py
```python
from __future__ import division
import numpy as np
from normal_form_game import Player
class BRD(object):
def __init__(self, payoff_matrix, N):
A = np.asarray(payoff_matrix)
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('payoff matrix must be square')
self.num_actions = A.shape[0] # Number of actions
self.N = N # Number of players
self.player = Player(A) # "Representative player"
self.tie_breaking = 'smallest'
# Current action distribution
self.current_action_dist = np.zeros(self.num_actions, dtype=int)
self.current_action_dist[0] = self.N # Initialization
def set_init_action_dist(self, init_action_dist=None):
"""
Set the attribute `current_action_dist` to `init_action_dist`.
Parameters
----------
init_action_dist : array_like(float, ndim=1),
optional(default=None)
Array containing the initial action distribution. If not
supplied, randomly chosen uniformly over the set of possible
action distributions.
"""
if init_action_dist is None: # Randomly choose an action distribution
cutoffs = np.empty(self.num_actions, dtype=int)
cutoffs[-1] = self.N + self.num_actions - 1
cutoffs[:-1] = np.random.choice(self.N+self.num_actions-1,
self.num_actions-1, replace=False)
cutoffs[:-1].sort()
cutoffs[1:] -= cutoffs[:-1] + 1
init_action_dist = cutoffs
self.current_action_dist[:] = init_action_dist
def play(self, current_action):
self.current_action_dist[current_action] -= 1
opponent_action_dist = self.current_action_dist
next_action = self.player.best_response(opponent_action_dist,
tie_breaking=self.tie_breaking)
self.current_action_dist[next_action] += 1
def simulate(self, ts_length, init_action_dist=None):
action_dist_sequence = \
np.empty((ts_length, self.num_actions), dtype=int)
action_dist_sequence_iter = \
self.simulate_iter(ts_length, init_action_dist=init_action_dist)
for t, action_dist in enumerate(action_dist_sequence_iter):
action_dist_sequence[t] = action_dist
return action_dist_sequence
def simulate_iter(self, ts_length, init_action_dist=None):
self.set_init_action_dist(init_action_dist=init_action_dist)
# Sequence of randomly chosen players to revise
player_ind_sequence = np.random.randint(self.N, size=ts_length)
for t in range(ts_length):
yield self.current_action_dist
action = np.searchsorted(
self.current_action_dist.cumsum(), player_ind_sequence[t],
side='right'
) # Action the revising player is playing
self.play(current_action=action)
def replicate(self, T, num_reps, init_action_dist=None):
out = np.empty((num_reps, self.num_actions), dtype=int)
for j in range(num_reps):
action_dist_sequence_iter = \
self.simulate_iter(T+1, init_action_dist=init_action_dist)
for action_dist in action_dist_sequence_iter:
x = action_dist
out[j] = x
return out
class KMR(BRD):
def __init__(self, payoff_matrix, N, epsilon=0.1):
BRD.__init__(self, payoff_matrix, N)
# Mutation probability
self.epsilon = epsilon
def play(self, current_action):
if np.random.random() < self.epsilon: # Mutation
self.current_action_dist[current_action] -= 1
next_action = self.player.random_choice()
self.current_action_dist[next_action] += 1
else: # Best response
BRD.play(self, current_action)
class SamplingBRD(BRD):
def __init__(self, payoff_matrix, N, k=2):
BRD.__init__(self, payoff_matrix, N)
# Sample size
self.k = k
def play(self, current_action):
self.current_action_dist[current_action] -= 1
opponent_action_dist = self.current_action_dist
actions = np.random.choice(self.num_actions, size=self.k, replace=True,
p=opponent_action_dist/(self.N-1))
sample_action_dist = np.bincount(actions, minlength=self.num_actions)
next_action = self.player.best_response(sample_action_dist,
tie_breaking=self.tie_breaking)
self.current_action_dist[next_action] += 1
```
#### File: jparajuli/game_theory_models/test_brd.py
```python
from __future__ import division
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import eq_, ok_, raises
from brd import BRD
class TestBRD:
'''Test the methods of BRD'''
def setUp(self):
'''Setup a BRD instance'''
# 2x2 coordination game with action 1 risk-dominant
payoff_matrix = [[4, 0],
[3, 2]]
self.N = 4 # 4 players
self.brd = BRD(payoff_matrix, self.N)
def test_set_init_action_dist_with_given_init_action_dist(self):
self.brd.set_init_action_dist([1, 3])
assert_array_equal(self.brd.current_action_dist, [1, 3])
def test_set_init_action_dist_when_init_action_dist_None(self):
self.brd.set_init_action_dist() # Action dist randomly chosen
ok_(all(self.brd.current_action_dist >= 0))
ok_(self.brd.current_action_dist.sum() == self.N)
def test_play(self):
self.brd.set_init_action_dist([2, 2])
self.brd.play(current_action=1) # Player playing 1 revises
ok_(np.array_equal(self.brd.current_action_dist, [3, 1]) or
np.array_equal(self.brd.current_action_dist, [2, 2]))
def test_simulate_rest_point(self):
assert_array_equal(
self.brd.simulate(ts_length=3, init_action_dist=[4, 0]),
[[4, 0],
[4, 0],
[4, 0]]
)
def test_simulate(self):
np.random.seed(22)
assert_array_equal(
self.brd.simulate(ts_length=3, init_action_dist=[2, 2]),
[[2, 2],
[1, 3],
[0, 4]]
)
# Invalid inputs #
@raises(ValueError)
def test_brd_invalid_input_nonsquare_payoff_matrix():
brd = BRD(payoff_matrix=np.zeros((2, 3)), N=5)
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
```
#### File: jparajuli/game_theory_models/test_localint.py
```python
from __future__ import division
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import eq_, ok_, raises
from localint import LocalInteraction
class TestLocalInteraction:
'''Test the methods of LocalInteraction'''
def setUp(self):
'''Setup a LocalInteraction instance'''
# Circle network with 5 players
adj_matrix = [[0, 1, 0, 0, 1],
[1, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 1],
[1, 0, 0, 1, 0]]
# 2x2 coordination game with action 1 risk-dominant
payoff_matrix = [[4, 0],
[2, 3]]
self.li = LocalInteraction(payoff_matrix, adj_matrix)
def test_set_init_actions_with_given_init_actions(self):
self.li.set_init_actions([0, 1, 1, 0, 0])
assert_array_equal(self.li.current_actions, [0, 1, 1, 0, 0])
def test_set_init_actions_when_init_actions_None(self):
self.li.set_init_actions() # Actions randomly assigned
ok_(all(
action in list(range(2)) for action in self.li.current_actions)
)
def test_play_when_player_ind_None(self):
self.li.set_init_actions([1, 0, 0, 0, 0])
self.li.play() # All players revise
assert_array_equal(self.li.current_actions, [0, 1, 0, 0, 1])
def test_play_when_player_ind_int(self):
self.li.set_init_actions([1, 0, 0, 0, 0])
self.li.play(player_ind=1) # Player 1 revises
assert_array_equal(self.li.current_actions, [1, 1, 0, 0, 0])
def test_play_when_player_ind_list(self):
self.li.set_init_actions([1, 0, 0, 0, 0])
self.li.play(player_ind=[0, 1, 2]) # Players 0, 1, and 2 revises
assert_array_equal(self.li.current_actions, [0, 1, 0, 0, 0])
def test_simulate_with_simultaneous_revision(self):
assert_array_equal(
self.li.simulate(ts_length=3, init_actions=[1, 0, 0, 0, 1]),
[[1, 0, 0, 0, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1]]
)
def test_simulate_with_sequential_revison(self):
np.random.seed(60)
assert_array_equal(
self.li.simulate(ts_length=4, init_actions=[1, 0, 0, 0, 1],
revision='sequential'),
[[1, 0, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 1, 1, 1, 1]]
)
# Invalid inputs #
@raises(ValueError)
def test_localint_invalid_input_nonsquare_adj_matrix():
li = LocalInteraction(payoff_matrix=np.zeros((2, 2)),
adj_matrix=np.zeros((2, 3)))
@raises(ValueError)
def test_localint_invalid_input_nonsquare_payoff_matrix():
li = LocalInteraction(payoff_matrix=np.zeros((2, 3)),
adj_matrix=np.zeros((2, 2)))
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
``` |
{
"source": "jpardobl/cacti_rest",
"score": 2
} |
#### File: cacti_rest/cacti_rest/models.py
```python
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
import simplejson
class Host(models.Model):
id = models.IntegerField(primary_key=True)
host_template_id = models.IntegerField()
description = models.CharField(max_length=150L)
hostname = models.CharField(max_length=250L, blank=True)
notes = models.TextField(blank=True)
snmp_community = models.CharField(max_length=100L, blank=True)
snmp_version = models.IntegerField()
snmp_username = models.CharField(max_length=50L, blank=True)
snmp_password = models.CharField(max_length=50L, blank=True)
snmp_auth_protocol = models.CharField(max_length=5L, blank=True)
snmp_priv_passphrase = models.CharField(max_length=200L, blank=True)
snmp_priv_protocol = models.CharField(max_length=6L, blank=True)
snmp_context = models.CharField(max_length=64L, blank=True)
snmp_port = models.IntegerField()
snmp_timeout = models.IntegerField()
availability_method = models.IntegerField()
ping_method = models.IntegerField(null=True, blank=True)
ping_port = models.IntegerField(null=True, blank=True)
ping_timeout = models.IntegerField(null=True, blank=True)
ping_retries = models.IntegerField(null=True, blank=True)
max_oids = models.IntegerField(null=True, blank=True)
device_threads = models.IntegerField()
disabled = models.CharField(max_length=2L, blank=True)
thold_send_email = models.IntegerField()
thold_host_email = models.IntegerField()
monitor = models.CharField(max_length=3L)
monitor_text = models.TextField()
status = models.IntegerField()
status_event_count = models.IntegerField()
status_fail_date = models.DateTimeField()
status_rec_date = models.DateTimeField()
status_last_error = models.CharField(max_length=255L, blank=True)
min_time = models.DecimalField(null=True, max_digits=12, decimal_places=5, blank=True)
max_time = models.DecimalField(null=True, max_digits=12, decimal_places=5, blank=True)
cur_time = models.DecimalField(null=True, max_digits=12, decimal_places=5, blank=True)
avg_time = models.DecimalField(null=True, max_digits=12, decimal_places=5, blank=True)
total_polls = models.IntegerField(null=True, blank=True)
failed_polls = models.IntegerField(null=True, blank=True)
availability = models.DecimalField(max_digits=10, decimal_places=5)
class Meta:
db_table = 'host'
def to_json(self):
return simplejson.dumps({
"resource_type": "host",
"hostname": self.hostname,
"id": self.id,
"description": self.description,
"datasources": reverse('resource_host_datasource', args=[self.id, ])})
class DataTemplateData(models.Model):
id = models.IntegerField(primary_key=True)
local_data_template_data_id = models.IntegerField()
local_data_id = models.ForeignKey("DataLocal", db_column="local_data_id", related_name="template")
data_template_id = models.ForeignKey("DataTemplate", db_column="data_template_id")
data_input_id = models.IntegerField()
t_name = models.CharField(max_length=2L, blank=True)
name = models.CharField(max_length=250L)
name_cache = models.CharField(max_length=255L)
data_source_path = models.CharField(max_length=255L, blank=True)
t_active = models.CharField(max_length=2L, blank=True)
active = models.CharField(max_length=2L, blank=True)
t_rrd_step = models.CharField(max_length=2L, blank=True)
rrd_step = models.IntegerField()
t_rra_id = models.CharField(max_length=2L, blank=True)
class Meta:
db_table = 'data_template_data'
def __unicode__(self):
return u"%s" % self.name
class DataTemplate(models.Model):
id = models.IntegerField(primary_key=True)
hash = models.CharField(max_length=32L)
name = models.CharField(max_length=150L)
class Meta:
db_table = 'data_template'
class DataLocal(models.Model):
id = models.IntegerField(primary_key=True)
data_template_id = models.ForeignKey(DataTemplateData, db_column="data_template_id")
host_id = models.ForeignKey(Host,db_column="host_id")
snmp_query_id = models.IntegerField()
snmp_index = models.CharField(max_length=255L)
class Meta:
db_table = 'data_local'
def to_json(self):
return simplejson.dumps({
"resource_type": "datasource",
"id": self.id,
"host_id": self.host_id.description,
"template": u"%s" % self.template.get().data_template_id.name,
"data": reverse('resource_datasource', args=[self.id, ]),
})
def get_rra_path(self):
return self.data_template_id.data_source_path
class Settings(models.Model):
name = models.CharField(max_length=50L, primary_key=True)
value = models.CharField(max_length=1024L)
class Meta:
db_table = 'settings'
```
#### File: cacti_rest/cacti_rest/resource_datasource.py
```python
from cacti_rest.utils import retrieve_param
from django.http import HttpResponseServerError
from django.shortcuts import render_to_response
from cacti_rest.models import DataTemplateData, DataLocal
import logging, simplejson#, traceback
from cacti_rest.rra import extract_data, convert_to_json
from cacti_rest.settings import RRA_PATH
from django.core.urlresolvers import reverse
def get(request, id_ds):
try:
obj = DataTemplateData.objects.get(local_data_id=id_ds)
path = obj.data_source_path.replace("<path_rra>", RRA_PATH)
if path is None:
raise("Can't find data related to datasouce %s" % data)
start = retrieve_param(request, "start", "-1h")
data = convert_to_json(extract_data(path, 300, start))
response = render_to_response(
"cacti_rest/json/datasource.json",
{
"resource_type": "datasource",
"data": data,
"id": id_ds,
"url": reverse("resource_datasource", args=[id_ds, ]),
"host_resource": reverse("resource_host", args=[obj.local_data_id.host_id.id, ]),
},
content_type="application/json")
response['Cache-Control'] = 'no-cache'
return response
except Exception, ex:
logging.error("Resource get error: %s" % ex)
response = HttpResponseServerError(
content=simplejson.dumps({"errors": str(ex)}),
content_type="application/json")
response['Cache-Control'] = 'no-cache'
return response
#@csrf_exempt
#@access_required
def entrance(request, *args, **kwargs):
if request.method == "GET":
return get(request, *args, **kwargs)
``` |
{
"source": "jpardobl/django-hautomation-suite",
"score": 2
} |
#### File: management/commands/backup_config.py
```python
import logging, os
from ha_cfg import paths
from django.core.management.base import BaseCommand
logger = logging.getLogger("backup_config")
class Command(BaseCommand):
args = ''
help = 'Create a ready to run Django settings file for Home Automation Python Project'
def handle(self, *args, **options):
cwd = os.getcwd()
logger.info("Backing up settings from every involved module to %s" % cwd)
shutil.copyfile(paths.x10_plugin_settings(), os.path.join(cwd, "x10_plugin_settings.bak"))
logger.info("Backed up file: %s" % os.path.join(cwd, "x10_plugin_settings.bak"))
shutil.copyfile(paths.django_thermostat_settings(), os.path.join(cwd, "django_thermostat_settings.bak"))
logger.info("Backed up file: %s" % os.path.join(cwd, "django_thermostat_settings.bak"))
```
#### File: management/commands/ha_apply_config.py
```python
import logging, re, shutil, pytz
from ha_cfg import paths
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = ''
help = 'Create a ready to run Django settings file for Home Automation Python Project'
DISTRO_SETTINGS_PATH = "django_hautomation_suite/distro_settings.py"
RUNNING_SETTINGS_PATH = "django_hautomation_suite/settings.py"
CONFIGURED_SETTINGS_PATH = "django_hautomation_suite/settings_configured.py"
HAUTOMATION_X10_PATH = "django_hautomation_suite/distro_hautomation_x10_settings.py"
THERMOSTAT_PATH = "django_hautomation_suite/distro_thermostat_settings.py"
FQDN_PATTERN = r"(?=^.{4,255}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)"
IP_PATTERN = r""
def gen_settings(self):
shutil.copyfile(Command.DISTRO_SETTINGS_PATH, Command.CONFIGURED_SETTINGS_PATH)
def handle(self, *args, **options):
logging.basicConfig(level=logging.DEBUG)
logging.info("Applying settings from %s to %s" % (self.CONFIGURED_SETTINGS_PATH, self.RUNNING_SETTINGS_PATH))
shutil.copyfile(self.CONFIGURED_SETTINGS_PATH, self.RUNNING_SETTINGS_PATH)
``` |
{
"source": "jpardobl/django-thermostat",
"score": 2
} |
#### File: backends/rawfile/RAWFile.py
```python
import os
import sys
import time
import copy
try:
import cPickle as pickle
except:
import pickle
from threading import Thread, Lock
from pypelib.resolver.Resolver import Resolver
'''
@author: lbergesio,omoya,cbermudo
@organization: i2CAT, OFELIA FP7
RAWFile
Implementes persistence engine to a raw file for RuleTables
'''
class RAWFile():
#XXX: lbergesio: Is it necessary to use a mutex here?
_mutex = Lock()
@staticmethod
def save(obj, parser, **kwargs):
if "fileName" not in kwargs:
raise Exception("FileName is required")
with RAWFile._mutex:
fileObj = open(kwargs["fileName"], "wb" )
try:
cObj = obj.clone()
except Exception,e:
print "Could not clone original obj %s\n%s" %(str(obj),str(e))
pickle.dump(cObj,fileObj)
fileObj.close()
@staticmethod
def load(tableName, resolverMappings, parser, **kwargs):
with RAWFile._mutex:
if not kwargs["fileName"]:
raise Exception("FileName is required")
fileObj = open(kwargs["fileName"], "r" )
table = pickle.load(fileObj)
table._mutex = Lock()
table._mappings = resolverMappings
table._resolver = Resolver(table._mappings)
fileObj.close()
if table.name != tableName:
raise Exception("Table name mismatch; did you specify the correct file?")
return table
```
#### File: pypelib/persistence/PersistenceEngine.py
```python
import os
import sys
import time
'''
@author: msune,omoya,CarolinaFernandez
@@organization: i2CAT, OFELIA FP7
Persistence engine
Implementes driver-based persistence backend selection
'''
class PersistenceEngine():
#Default Class Attributes
_defaultParser = "RegexParser"
_defaultPersistence = "Django"
#Drivers
_drivers = ["Django","RAWFile"]
#Fill with appropiate path
PATH_TO_DRIVERS="backends"
def __init__(self):
raise Exception("Static class cannot be instanciated")
@staticmethod
def _getDriver(driverName):
print "driver name: %s" %driverName
if driverName == "Django":
PATH = PersistenceEngine.PATH_TO_DRIVERS + '.django.Django'
try:
exec('from ' + PATH + ' import Django')
return Django
except:
raise Exception(driverName + ' persistence driver not found in ' + PersistenceEngine.PATH_TO_DRIVERS)
elif driverName == "RAWFile":
PATH = PersistenceEngine.PATH_TO_DRIVERS + '.rawfile.RAWFile'
try:
exec('from ' + PATH + ' import RAWFile')
return RAWFile
except:
raise Exception(driverName + ' persistence driver not found in ' + PersistenceEngine.PATH_TO_DRIVERS)
else:
raise Exception(driverName + ' not supported')
@staticmethod
def save(obj, pBackend, parser=None, **kwargs):
return PersistenceEngine._getDriver(pBackend).save(obj, parser, **kwargs)
@staticmethod
def load(tableName, pBackend, resolverMappings, parser=None, **kwargs):
return PersistenceEngine._getDriver(pBackend).load(tableName, resolverMappings, parser, **kwargs)
'''
Retrieves every Driver's PolicyRuleTable object for a given name.
This method should be seldom used.
'''
@staticmethod
def loadAll(tableName, pBackend):
return PersistenceEngine._getDriver(pBackend).loadAll(tableName)
'''
Deletes a Driver's PolicyRuleTable object for a given ID.
This method should be seldom used.
'''
@staticmethod
def delete(tableID, pBackend):
return PersistenceEngine._getDriver(pBackend).delete(tableID)
```
#### File: pypelib/resolver/Resolver.py
```python
import os
import sys
import time
from threading import Thread, Lock
'''
@author: msune
@organization: i2CAT, OFELIA FP7
Resolver class
Attemps to resolve keywords agains values/actions through mappings
'''
#Resolvers dictionary
_resolvers = {}
'''Resolver class'''
class Resolver():
#Class attributes
_mappings = None
_mutex = None
#Constructor
def __init__(self,mappings):
self._mutex=Lock()
self._mappings =mappings
pass
#set mappings
#@mappings: dictionary containing name <-> mapping (object...)
def setMappings(self,mappings):
with self._mutex:
self._mappings = mappings
#Get or generate the resolver
@staticmethod
def getOrGenerateResolver(theId,mappings=None):
if theId in _resolvers:
return _resolvers[theId]
if mappings == None:
raise Exception("Could not find the resolver with id:"+theId)
instance = Resolver(mappings)
#Save instance
_resolvers[theId] = instance
return instance
#Try to parse as a number
def _getNumericValue(self,string):
try:
#Try to parse integer
return int(string)
except:
#Try a floating point
try:
return float(string)
except:
return string
#Resolve a key
def resolve(self, key, metaObj):
with self._mutex:
#print "mappings: %s\n mmmmmmmmmmmmmmmmmmmmmmmm" % self._mappings
if not (isinstance(key,str) or isinstance(key,unicode)):
raise Exception("Only string keys are able to be resolved")
if key not in self._mappings:
#print "[DEBUG] "+str(self._getNumericValue(key))
return self._getNumericValue(key)
if isinstance(self._mappings[key],str):
#print "aaaaaaaaaaaaaaaaaa[DEBUG] resolved"+str(eval(self._mappings[key]))
return eval(self._mappings[key])
else:
#print "[DEBUG] Action: %s" % self._mappings[key]
return self._mappings[key](metaObj)
#resolver = Resolver.getOrGenerateResolver("hola",{"test":"metaObj","test2":2})
#metaObj = 3
#print "Value:"+str(resolver.resolve("test",metaObj))
#print "Value:"+str(resolver.resolve("test2",metaObj))
```
#### File: django_thermostat/pypelib/Rule.py
```python
import os
import sys
import time
import exceptions
import uuid
import logging
'''
@author: msune,lbergesio,omoya,CarolinaFernandez
@organization: i2CAT, OFELIA FP7
PolicyEngine Rule class
Encapsulates logic of a simple Rule
'''
from django_thermostat.pypelib.Condition import Condition
from django_thermostat.pypelib.persistence.PersistenceEngine import PersistenceEngine
from django_thermostat.pypelib.utils.Logger import Logger
class TerminalMatch(exceptions.Exception):
value = None
desc = None
def __init__(self,rType,desc):
if isinstance(rType['value'],bool):
self.value = rType['value']
else:
raise Exception("Unknown rule type")
self.desc = desc
def __str__(self):
return "%s "%self.desc
class Rule():
logger = Logger.getLogger()
#Class Attributes
_condition = None
_description = None
_errorMsg = None
_uuid = None #uuid.uuid4().hex
_defaultParser = "RegexParser"
_defaultPersistence = "Django"
#Types of rule
POSITIVE_TERMINAL={'value':True,'terminal':True}
POSITIVE_NONTERMINAL={'value':True,'terminal':False}
NEGATIVE_TERMINAL={'value':False,'terminal':True}
NEGATIVE_NONTERMINAL={'value':False,'terminal':False}
_types = [POSITIVE_TERMINAL,POSITIVE_NONTERMINAL,NEGATIVE_TERMINAL, NEGATIVE_NONTERMINAL]
#Rule type
_type = None
#Rule match Action
_matchAction=None
#Getters
def getCondition(self):
return self._condition
def getDescription(self):
return self._description
def getType(self):
return self._type
def getErrorMsg(self):
return self._errorMsg
def getMatchAction(self):
return self._matchAction
def getUUID(self):
return self._uuid
#setters
def setUUID(self,UUID):
self._uuid = UUID
#Constructor
def __init__(self,condition,description,errorMsg,ruleType=POSITIVE_TERMINAL,action=None,uuid=None):
if not isinstance(condition,Condition):
raise Exception("Object must be an instance of Condition")
if ruleType not in self._types:
raise Exception("Unknown rule type")
if action == None and (ruleType == self.NEGATIVE_NONTERMINAL or ruleType == self.POSITIVE_NONTERMINAL):
raise Exception("You cannot create non-terminal actionless rules")
self._condition = condition
self._matchAction = action
self._type = ruleType
self._description = description
self._errorMsg = errorMsg
self._uuid = uuid
def dump(self):
#Debug dump
toReturn = self._condition.dump()
toReturn+="=> %s "%str(self._type['value'])
if self._matchAction != None:
toReturn += "(%s) "%str(self._matchAction)
if self._type['terminal']:
toReturn += "[TERM] "
if self._description:
toReturn+=" #"+self._description
return toReturn
#Resolver is passed at evaluation time to be able to dynamically redirect actions
def evaluate(self,metaObj,resolver):
try:
Rule.logger.setLevel(logging.DEBUG)
result = self._condition.evaluate(metaObj,resolver)
Rule.logger.debug('Result was: %s rule: [%s]' % (str(result), self.dump()))
except Exception as e:
Rule.logger.error('Error on rule: %s',self.dump())
Rule.logger.error('Exception: %s', str(e))
Rule.logger.error('Rule will be skiped!')
result = False
if result:
if self._matchAction != None:
resolver.resolve(self._matchAction,metaObj)
#If is terminal raise TerminalMatch
if self._type['terminal']:
raise TerminalMatch(self._type,self._errorMsg)
#return whatever
return
def getConditionDump(self):
return self.getCondition().dump()
```
#### File: pypelib/utils/Logger.py
```python
import logging
from django.conf import settings
class Logger():
@staticmethod
def getLogger():
logger = logging.getLogger("thermostat.rules")
logger.setLevel(settings.LOG_LEVEL)
return logger
```
#### File: django-thermostat/django_thermostat/rules.py
```python
from django_thermostat.mappings import get_mappings
from pypelib.RuleTable import RuleTable
from django_thermostat.utils import gen_comparing_time
from django_thermostat.models import Rule
import logging
from django.conf import settings
logger = logging.getLogger("thermostat.rules")
logger.setLevel(settings.LOG_LEVEL)
def evaluate_non_themp():
mappings = get_mappings()
table = RuleTable(
"Non thermostat rules",
mappings,
"RegexParser",
#rawfile,
"RAWFile",
None)
table.setPolicy(False)
for rule in Rule.objects.filter(active=True, thermostat=False).order_by("pk"):
table.addRule(rule.to_pypelib())
if settings.DEBUG:
table.dump(logger)
try:
table.evaluate({})
logger.debug("Table NONTHERM evaluated True")
except Exception as ex:
logger.debug("Table NONTHERM evaluated False")
def evaluate():
mappings = get_mappings()
table = RuleTable(
"Decide tunned temp",
mappings,
"RegexParser",
#rawfile,
"RAWFile",
None)
logger.debug("current time: %s " % mappings["current_time"]())
logger.debug("current day of week: %s" % mappings["current_day_of_week"]())
logger.debug("current temp %s" % mappings["current_internal_temperature"]())
logger.debug("economic %s" % mappings["economic_temperature"]())
logger.debug("confort %s" % mappings["confort_temperature"]())
logger.debug("tuned %s" % mappings["tuned_temperature"]())
logger.debug("flame %s" % mappings["flame_on"]())
logger.debug("heat on %s" % mappings["heater_on"]())
table.setPolicy(False)
table.addRule("if heater_manual = 1 then ACCEPT")
for rule in Rule.objects.filter(active=True, thermostat=True).order_by("pk"):
table.addRule(rule.to_pypelib())
if settings.DEBUG:
table.dump(logger)
metaObj = {}
try:
table.evaluate(metaObj)
logger.debug("Table THERM1 evaluated True")
mappings["tune_to_confort"]()
except Exception:
logger.debug("Table THERM1 evaluated False")
mappings["tune_to_economic"]()
table1 = RuleTable(
"Decide flame status",
mappings,
"RegexParser",
#rawfile,
"RAWFile",
None)
table1.addRule("if heater_on = 0 then deny")
table1.addRule("if current_internal_temperature < tuned_temperature then accept")
if settings.DEBUG:
table1.dump(logger)
try:
table1.evaluate(metaObj)
logger.debug("Table THERM2 evaluated True")
try:
mappings["start_flame"]()
except Exception as ex:
logger.critical("ERROR: Cant start flame: %s" % ex)
except Exception as e:
logger.debug("Table THERM2 evaluated False")
try:
mappings["stop_flame"]()
except Exception as ex:
logger.critical("ERROR: Cant stop flame: %s" % ex)
```
#### File: django-thermostat/django_thermostat/tests.py
```python
import unittest, os, subprocess, logging
from django_thermostat.models import *
from django_thermostat.mappings.weather import log_flame_stats
from django_thermostat.mappings.timings import is_at_night
from django_thermostat import settings
from time import sleep
class Testaws(unittest.TestCase):
def test_sns_msg(self):
from django_thermostat.aws import ses_send_email
print ses_send_email("subject", "arn:aws:sns:eu-west-1:837355510129:JavierPardo", "hola")
class TestMappings(unittest.TestCase):
def test_is_at_night(self, ):
from mappings.timings import is_at_night
self.assertTrue(is_at_night() == 1,
"Not properly calculating is_at_night")
class TestFlameStatsParser(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def test_regular(self):
print "Testing regular, 5 seconds flaming..."
os.remove(settings.FLAME_STATS_PATH)
log_flame_stats(True)
sleep(5)
log_flame_stats(False)
out = subprocess.check_output(["python", "manage.py", "parse_flame_stats", "1"])
#self.assertRegexpMatches(text, expected_regexp, msg)
self.assertRegexpMatches(
out,
"5\n$",
"not properly parsing regular example of flame stats: %s" % out)
def test_without_final_off(self):
print "testing without final off plus and flame started 5 s before parsing..."
os.remove(settings.FLAME_STATS_PATH)
log_flame_stats(True)
sleep(5)
out = subprocess.check_output(["python", "manage.py", "parse_flame_stats", "1"])
self.assertRegexpMatches(
out,
"5\n$",
"not properly parsing without final_off: %s" % out)
def test_starts_before_time_range(self):
print "Testing if flamings starts before time range, it takes 1 minute..."
os.remove(settings.FLAME_STATS_PATH)
log_flame_stats(True)
sleep(61)
out = subprocess.check_output(["python", "manage.py", "parse_flame_stats", "1"])
self.assertRegexpMatches(
out,
"60\n$",
"not properly parsing when flaming starts before time range: %s" % out)
def test_first_data_is_an_off(self):
print "Testing when first data line included in time range is OFF"
os.remove(settings.FLAME_STATS_PATH)
log_flame_stats(False)
sleep(10)
out = subprocess.check_output(["python", "manage.py", "parse_flame_stats", "1"])
self.assertRegexpMatches(
out,
"49\n$",
"not properly parsing when flaming starts before time range: %s" % out)
class TestRules(unittest.TestCase):
def setup(self, ):
Day(name="Mon", value="Mon").save()
Day(name="Tue", value="Tue").save()
Day(name="Wed", value="Wed").save()
Day(name="Thu", value="Thu").save()
Day(name="Fri", value="Fri").save()
def test_to_pyplib(self, ):
self.setup()
d = Day.objects.get(name="Mon")
tr = TimeRange(start="6:00:00", end="7:00:00")
tr.save()
r = Rule()
r.active = True
r.action = "confort_temperature"
r.save()
r.days.add(d)
r.ranges.add(tr)
self.assertEquals(
r.to_pypelib(),
"if ( (current_day_of_week = Mon) ) && ( (1385528400.0 > current_time && current_time < 1385532000.0) ) then do confort_temperature",
"Not properly trasnsforming to pypelib, got: %s" % r.to_pypelib()
)
r.days.add(Day.objects.get(name="Tue"))
self.assertEquals(
r.to_pypelib(),
"if ( (current_day_of_week = Mon) || (current_day_of_week = Tue) ) && ( (1385528400.0 > current_time && current_time < 1385532000.0) ) then do confort_temperature",
"Not properly trasnsforming to pypelib, got: %s" % r.to_pypelib()
)
r.days.add(Day.objects.get(name="Fri"))
self.assertEquals(
r.to_pypelib(),
"if ( (current_day_of_week = Mon) || (current_day_of_week = Tue) || (current_day_of_week = Fri) ) && ( (1385528400.0 > current_time && current_time < 1385532000.0) ) then do confort_temperature",
"Not properly trasnsforming to pypelib, got: %s" % r.to_pypelib()
)
r.days.all().delete()
self.assertEquals(
r.to_pypelib(),
"if ( (1385528400.0 > current_time && current_time < 1385532000.0) ) then do confort_temperature",
"Not properly trasnsforming to pypelib, got: %s" % r.to_pypelib()
)
r.ranges.all().delete()
self.assertEquals(
r.to_pypelib(),
"if 1 = 1 then do confort_temperature",
"Not properly trasnsforming to pypelib, got: %s" % r.to_pypelib()
)
class TestMappings(unittest.TestCase):
def test_is_at_night(self):
logging.basicConfig(level=logging.DEBUG)
print ( "Is at nigh? it says: %s" % is_at_night())
def main():
unittest.main()
if __name__ == "__main__":
unittest.main()
```
#### File: jpardobl/django-thermostat/parse_flame.py
```python
import requests, logging, re
from django_thermostat import settings
from time import localtime, time, mktime
def time_range(self, length):
current = time()
return [float(current - length * 60), float(current)]
def handle(self, *args, **kwargs):
# logging.basicConfig(level=logging.DEBUG)
if len(args) != 1:
self.stderr.write("Please send argument of how many minutes")
exit(1)
with open(settings.FLAME_STATS_PATH) as f:
contents = f.readlines()
t_range = time_range(int(args[0]))
# logging.debug("Time range %s" % t_range)
cont = 0
last_start = None
data = []
for line in contents:
cont = cont + 1
m = re.search("(\d+\.\d+)\n$", line)
if m is None:
logging.warn("Format of line %d not correct, cannot find time from epoch at the end" % cont)
continue
time = float(m.groups(0)[0])
m = re.search("^(ON|OFF)", line)
if m is None:
logging.warn("Format of line %d not correct, cannot find ON|OFF action at the beginning" % cont)
continue
action = m.groups(0)[0]
if time < t_range[0] or time > t_range[1]:
logging.debug("Dejamos la linea %d fuera porque se sale del rango pedido" % cont)
#initialize the last_start so we know it even if it occur before the time range
#if the line is OFF we put last_start = None so the state of the flame is off
if action == "ON":
last_start = time
else:
last_start = None
continue
data.append([action, time])
print data
print "last_Start: %s " % last_start
last_heating_period = None
total_heating_period = 0
for action, time in data:
if action == "ON":
last_start = time
last_heating_period = None
if action == "OFF" and last_start is not None:
#TODO que pasa si last_start es None
last_heating_period = time - last_start
last_start = None
total_heating_period = int(total_heating_period) + int((last_heating_period if last_heating_period is not None else 0))
"""
logging.debug("action: %s time: %d; %d %d %d" % (
action,
time,
last_heating_period if last_heating_period is not None else 0,
last_start if last_start is not None else 0,
total_heating_period))
"""
#si there is no data (for example: every line was out of time range, we need to know
#the status of the flame. Therefore we use the last_start, which is set above, when the time range uis checked
if len(data) == 0 and last_start is not None:
print "como todas las lineas son anteriores al periodo y esta arrancado desde antes, le meto el tiempo desde el principio del periodo"
total_heating_period = int(args[0]) * 60
print total_heating_period
#if the first action in data is OFF, means the flame was on by the starting of the time range
#it is needed to add this time
if len(data) and data[0][0] == "OFF":
# print data[0][1]
print "como la pimera linea es OFF tenemos que add el tiempo desde el principip del periodo hasta el OFF, ates: %s" % total_heating_period
total_heating_period = int(total_heating_period) + int(data[0][1] - t_range[0])
print "despuest %s " % total_heating_period
#if the last action is ON, need to add the time from the instance of that last action,
#to the end of the range
if len(data) and data[len(data)-1][0] == "ON":
total_heating_period = int(total_heating_period) + int(t_range[1] - data[0][1])
try:
#calculate the percent
total_seconds = int(args[1]) * 60
print("absolute:%d percent:%.2f" % (total_heating_period, (100 * total_heating_period ) / total_seconds))
exit(0)
except Exception, er:
logging.error("Exception while trying to return the value: %s " % er)
``` |
{
"source": "jpardobl/hautomation_x10",
"score": 2
} |
#### File: hautomation_x10/hautomation_x10/cmds.py
```python
from utils import validate_address
from driver import netcat
try:
from django.conf import settings
except ImportError:
import settings
import logging
def pl_switch(address, value):
if value not in ["on", "off"]:
raise ValueError("Switch value must be 'on' or 'off'")
validate_address(address)
cmd = b"pl %s %s\n" % (address, value)
netcat(settings.MOCHAD_HOST, settings.MOCHAD_PORT, cmd)
def pl_dim(address, value):
if int(value) not in range(0, 32):
raise ValueError("Dim value must be in the range(0, 32)")
validate_address(address)
cmd = "pl %s dim %s\n" % (address, value)
netcat(settings.MOCHAD_HOST, settings.MOCHAD_PORT, cmd)
def pl_bri(address, value):
if int(value) not in range(0, 32):
raise ValueError("Dim value must be in the range(0, 32)")
validate_address(address)
cmd = "pl %s bright %s\n" % (address, value)
netcat(settings.MOCHAD_HOST, settings.MOCHAD_PORT, cmd)
def pl_all_lights_on(group):
cmd = "pl %s1 extended_code_1 0 5\n" % group
netcat(settings.MOCHAD_HOST, settings.MOCHAD_PORT, cmd)
def pl_all_lights_off(group):
cmd = "pl %s1 extended_code_1 0 11\n" % group
netcat(settings.MOCHAD_HOST, settings.MOCHAD_PORT, cmd)
```
#### File: hautomation_x10/hautomation_x10/deploy.py
```python
import os
import sys
def populate_db():
pwd = os.getcwd()
sys.path.append(pwd)
try:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
raise ImportError("No DJANGO_SETTINGS_MODULE env variable found")
from hacore.models import Protocol
if Protocol.objects.filter(name="X10").count() == 0:
Protocol(
name="X10",
gobj_name="driver_X10",
module="hautomation_x10",
validate_address_module="hautomation_x10.utils").save()
sys.stdout.writelines("Protocoll successfully populated into db")
else:
sys.stdout.writelines("Protocol is already at the db. No changes made.")
sys.path.remove(os.getcwd())
sys.exit(0)
except Exception, ex:
sys.path.remove(os.getcwd())
sys.stderr.writelines(ex.message)
sys.exit(1)
```
#### File: hautomation_x10/hautomation_x10/driver.py
```python
import socket
try:
from django.conf import settings
except ImportError:
import settings
import logging
driver_logger = logging.getLogger("driver")
driver_logger.setLevel(settings.LOG_LEVEL)
def netcat(hostname, port, content):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
driver_logger.debug("Trying connection to: %s:%s" % (hostname, port))
s.connect((hostname, port))
driver_logger.debug("Connected to: %s:%s" % (hostname, port))
s.sendall(b"%s\n" % content)
driver_logger.debug("sent: %s" % content)
s.shutdown(socket.SHUT_WR)
buff = ""
while True:
data = s.recv(1024)
if data == "":
break
buff = "%s%s" % (buff, data)
driver_logger.debug("Received: %s" % repr(buff))
s.close()
driver_logger.debug("Connection closed.")
return repr(buff)
except Exception as ex:
driver_logger.error("ERROR: %s" % ex)
raise ex
``` |
{
"source": "jpardobl/odoo_file_export",
"score": 2
} |
#### File: odoo_file_export/modules/blob.py
```python
import logging
import os
import re
import pandas as pd
from odoo import api, fields, models
from odoo.tools.config import config
from azure.core.exceptions import ResourceExistsError
from azure.storage.blob import BlobClient
_logger = logging.getLogger(__name__)
class BlobUpload(models.Model):
_name = 'odoo_file_export.blob'
_description = 'Blob File Upload'
name = fields.Char(required=True)
file = fields.Char(required=True)
storage_account_url = fields.Char(required=True)
container = fields.Char(required=True)
blob_name = fields.Char(required=True)
credential = fields.Char(required=True)
def _do_upload(self, blob, file_full_path):
with open(file_full_path, "rb") as data:
blob.upload_blob(data)
_logger.info("Local file {} uploaded to Blob Storage: {}".format(file_full_path, self.blob_name))
def upload(self, remove_local_data_file=True, overwrite=True):
for record in self:
try:
file_full_path = os.path.join(config.get('data_dir'), record.file)
if not os.path.exists(file_full_path):
_logger.error("Cannot find file ({}), thus not uploading".format(file_full_path))
continue
blob = BlobClient(
account_url=record.storage_account_url,
container_name=record.container,
blob_name=record.blob_name,
credential=record.credential)
try:
record._do_upload(blob, file_full_path)
except ResourceExistsError as ex:
if not overwrite:
_logger.info("El fichero existe, no sd sobreescribe por que overwrite=False")
continue
_logger.debug("El fichero existe, hay que sobreescribirlo")
blob.delete_blob()
_logger.debug("El fichero se ha borrado para sobreescribirlo")
record._do_upload(blob, file_full_path)
if remove_local_data_file: os.unlink(file_full_path)
except Exception as ex:
_logger.error("Error uploading to Blob: type({}), {}".format(type(ex), ex))
continue
```
#### File: odoo_file_export/modules/google_drive.py
```python
import logging
import os
import re
import pandas as pd
from odoo import api, fields, models
from odoo.tools.config import config
from odoo.tools.safe_eval import safe_eval
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from pydrive.files import ApiRequestError, FileNotUploadedError
from google.cloud import storage
_logger = logging.getLogger(__name__)
#TODO post messages to the screen
#TODO add credentials tool to the module
class GoogleDriveUpload(models.Model):
_name = 'odoo_file_export.google_drive'
_description = 'Google Drive File Upload'
name = fields.Char(required=True)
file = fields.Char(required=True)
file_mime_type = fields.Char(required=True, default='text/csv')
target_folder_id = fields.Char(required=False)
target_file_name = fields.Char(required=True)
convet_to_google_format = fields.Boolean(default=True)
google_drive_file_id = fields.Char()
def _delete_gdrive_file(self, gdrive, file_id):
try:
_logger.debug("Trying to delete gdrive file: {}".format(file_id))
gdrive.CreateFile({'id': file_id}).Delete()
_logger.debug("File deleted")
except ApiRequestError as aex:
_logger.error("Error while deleting gdrive file: {}".format(aex))
except FileNotUploadedError:
pass
def upload(self, overwrite=True, remove_local_data_file=True):
settings_file = self.env['res.config.settings'].browse().gdrive_settings_file_name()
_logger.debug("Reading gdrive settings: {}".format(settings_file))
gauth = GoogleAuth(settings_file=settings_file)
gdrive_client = GoogleDrive(gauth)
for record in self:
try:
file_full_path = os.path.join(config.get('data_dir'), record.file)
if not os.path.exists(file_full_path):
_logger.error("Cannot find extract file ({}), thus not uploading".format(file_full_path))
continue
if overwrite and record.google_drive_file_id:
record._delete_gdrive_file(gdrive_client, record.google_drive_file_id)
params = {
'title': record.target_file_name,
'mimeType': record.file_mime_type,
}
if not self.target_folder_id is False:
params['parents'] = [{'id': record.target_folder_id}]
_logger.debug("Uploading file to Google Drive with params: {}".format(params))
f = gdrive_client.CreateFile(params)
f.SetContentFile(file_full_path)
f.Upload(param={
'convert': record.convet_to_google_format
})
self.google_drive_file_id = f['id']
_logger.info("Uploaded extract: {}".format(file_full_path))
if remove_local_data_file: os.unlink(file_full_path)
except Exception as ex:
_logger.error("Error uploading extract to gdrive: {}".format(ex))
continue
```
#### File: odoo_file_export/tests/test_gdrive_res_config_settings.py
```python
import os
import unittest
import yaml
from odoo.tests.common import TransactionCase
from odoo.tools.config import config
class TestResConfigSettings(TransactionCase):
def setUp(self):
super(TestResConfigSettings, self).setUp()
self.config = self.env['res.config.settings'].with_user(self.env.user.id)
def off_test_config_save_in_file(self):
cc = self.config.create({
'google_drive_client_id': 'client_id_testing',
'google_drive_client_secret': 'client_secret_testing'
})
cc.set_values()
assert os.path.isfile(cc.gdrive_settings_file_name())
with open(cc.gdrive_settings_file_name(), 'r') as reader:
data = yaml.load(reader, Loader=yaml.FullLoader)
assert 'client_id_testing' == data['client_config']['client_id']
assert 'client_secret_testing' == data['client_config']['client_secret']
self.assertIn('https://www.googleapis.com/auth/drive', data['oauth_scope'])
self.assertIn('https://www.googleapis.com/auth/drive.install', data['oauth_scope'])
assert data['save_credentials']
assert 'file' == data['save_credentials_backend']
credentials_file = os.path.join(config.get('data_dir'), "{}.json".format(cc.gdrive_settings_file_name()))
assert credentials_file == data['save_credentials_file']
def off_test_config_read_from_file(self):
cc = self.config.create({
'google_drive_client_id': 'client_id_testing_reading',
'google_drive_client_secret': 'client_secret_testing_reading'
})
cc.set_values()
values = cc.get_values()
assert 'client_id_testing_reading' == values['google_drive_client_id']
assert 'client_secret_testing_reading' == values['google_drive_client_secret']
"""
assert values['save_credentials']
assert 'file' == 'save_credentials_backend'
credentials_file = os.path.join(config.get('data_dir'), "{}.json".format(cc.gdrive_settings_file_name()))
assert credentials_file == values['save_credentials_file']
"""
``` |
{
"source": "jparenas/Hack-a-Ton",
"score": 3
} |
#### File: app/api/database.py
```python
import os
import sqlite3 as lite
def get_database():
connection = lite.connect('app.db', check_same_thread=False)
cursor = connection.cursor()
if os.getenv('DELETE_TABLES'):
print('Restarted Tables!')
cursor.execute("DROP TABLE PLAN")
cursor.execute("DROP TABLE USERS")
cursor.execute("DROP TABLE QUERIES")
cursor.execute("DROP TABLE CITIES")
cursor.execute("DROP TABLE IMAGES")
cursor.execute("""
CREATE TABLE IF NOT EXISTS QUERIES
(
query_id INT NOT NULL,
query_hash BINARY(32) NOT NULL,
uuid INT NOT NULL,
time DATETIME NOT NULL,
response_code INT,
departure CHAR(3),
budget INT,
start_day CHAR(10),
end_day CHAR(10),
num_passengers INT,
PRIMARY KEY (query_id),
FOREIGN KEY (uuid) REFERENCES USERS(uuid))
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS USERS
(
uuid INT NOT NULL,
last_query INT,
PRIMARY KEY (uuid),
FOREIGN KEY (last_query) REFERENCES QUERIES(query_id))
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS CITIES
(
iata_name CHAR(3) NOT NULL,
city_name VARCHAR(255),
latitude VARCHAR(255),
longitude VARCHAR(255),
PRIMARY KEY (iata_name))
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS IMAGES
(
iata_name CHAR(3) NOT NULL,
image VARCHAR(1024) NOT NULL,
FOREIGN KEY (iata_name) REFERENCES CITIES(iata_name))
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS PLAN
(
start_date DATE,
end_date DATE,
origin CHAR(3),
destination CHAR(3),
price INT,
url VARCHAR(1024),
like BIT,
query_id INT,
FOREIGN KEY (query_id) REFERENCES QUERIES(query_id),
FOREIGN KEY (destination) REFERENCES CITIES(iata_name))
""")
return connection, cursor
```
#### File: app/api/resources.py
```python
import os
import json
import re
import datetime
import requests
import hashlib
import random
import bisect
import requests
from amadeus import Client, Location, ResponseError, NotFoundError, ServerError
import googlemaps
from datetime import datetime, timedelta
from flask import request, Response, jsonify
from flask_restplus import Resource
from sklearn.neighbors import KNeighborsRegressor
from .security import require_auth
from .database import get_database
from . import api_rest
amadeus = Client(
client_id=os.getenv('AMADEUS_API_KEY'),
client_secret=os.getenv('AMADEUS_SECRET_KEY'),
#log_level='debug'
)
gmaps = googlemaps.Client(key=os.getenv('GOOGLE_MAPS_SERVER_KEY'))
cache_timeout = os.getenv('CACHE_TIMEOUT', 30)
db_connection, db_cursor = get_database()
def check_date(date):
if re.match(r'\d{4}-\d{2}-\d{2}', date):
return True
else:
return False
class SecureResource(Resource):
""" Calls require_auth decorator on all requests """
method_decorators = [require_auth]
@api_rest.route('/get_flights')
@api_rest.param('origin', 'Origin of the flight')
@api_rest.param('uuid', 'UUID of the user')
@api_rest.param('budget', 'Budget of the flight')
@api_rest.param('start_date', 'Start date of the flight')
@api_rest.param('end_date', 'End date of the flight')
@api_rest.param('num_passengers', 'Number of passengers')
class FlightResource(Resource):
def get(self):
arguments = {'currency': 'USD', 'nonStop': False}
if not request.args.get('origin'):
return {'error':'Origin city is mandatory', 'status':400}, 400
if not request.args.get('uuid'):
return {'error':'UUID is mandatory', 'status':400}, 400
arguments['origin'] = request.args.get('origin')
uuid = request.args.get('uuid')
if request.args.get('budget'):
arguments['maxPrice'] = abs(int(request.args.get('budget')))
if request.args.get('start_date'):
if not check_date(request.args.get('start_date')):
return {'error':'Start date is not using the right format', 'status':400}, 400
arguments['departureDate'] = request.args.get('start_date')
if request.args.get('end_date') and request.args.get('start_date'):
if not check_date(request.args.get('end_date')):
return {'error':'End date is not using the right format', 'status':400}, 400
start_date = datetime.strptime(request.args.get('start_date'), '%Y-%m-%d').date()
end_date = datetime.strptime(request.args.get('end_date'), '%Y-%m-%d').date()
if start_date > end_date:
return {'error':'End date is earlier than the start day', 'status':400}, 400
difference = end_date - start_date
arguments['duration'] = difference.days
if request.args.get('num_passengers'):
num_passengers = abs(int(request.args.get('num_passengers')))
else:
num_passengers = 1
arguments_hash = hashlib.sha256(str(arguments).encode('ascii')).hexdigest()
db_cursor.execute("SELECT query_id, time FROM QUERIES WHERE query_hash=? AND uuid==?", (arguments_hash, uuid))
result = []
query_cache_result = db_cursor.fetchone()
if query_cache_result and datetime.strptime(query_cache_result[1], '%Y-%m-%d %H-%M-%S') + timedelta(minutes=cache_timeout) > datetime.utcnow():
db_cursor.execute("SELECT PLAN.start_date, PLAN.end_date, PLAN.origin, PLAN.destination, PLAN.price FROM PLAN WHERE PLAN.query_id=?", (query_cache_result[0],))
for query_result in db_cursor.fetchall():
flight = {
'departureDate': query_result[0],
'returnDate': query_result[1],
'origin': query_result[2],
'destination': query_result[3],
'price': {
'total': query_result[4],
}
}
db_cursor.execute('SELECT image FROM IMAGES WHERE iata_name=?', (flight['destination'],))
flight['image'] = random.choice(db_cursor.fetchall())[0]
result.append(flight)
else:
try:
flights = amadeus.shopping.flight_destinations.get(**arguments).result
status_code = 200
except NotFoundError:
return {'flights': []}, 201
except ServerError:
return {'error':500, 'status':'Server Error', 'message':'Probably the city does not exist'}, 500
query_id = int(random.getrandbits(256)) % (2 << 63 - 1)
db_cursor.execute("INSERT INTO QUERIES VALUES(?,?,?,strftime('%Y-%m-%d %H-%M-%S','now'),?,?,?,?,?,?)",
(
query_id,
arguments_hash,
uuid,
status_code,
arguments['origin'],
request.args.get('budget') if request.args.get('budget') else None,
request.args.get('start_date') if request.args.get('start_date') else None,
request.args.get('end_date') if request.args.get('end_date') else None,
num_passengers
))
db_cursor.execute("INSERT OR IGNORE INTO USERS (uuid, last_query) VALUES (?,?)", (uuid, query_id))
db_cursor.execute("UPDATE USERS SET last_query=? WHERE uuid=?", (query_id, uuid))
for flight in flights['data']:
db_cursor.execute('INSERT INTO PLAN VALUES(?,?,?,?,?,?,?,?)', (
flight['departureDate'],
flight['returnDate'],
flight['origin'],
flight['destination'],
flight['price']['total'],
flight['links']['flightOffers'],
None,
query_id,
))
db_cursor.execute('SELECT image FROM IMAGES WHERE iata_name=?', (flight['destination'],))
query_result = db_cursor.fetchall()
if query_result == []:
"""
destination_name = amadeus.reference_data.locations.get(
keyword=flight['destination'],
subType=Location.CITY
)
if len(destination_name.result['data']) > 0:
destination_name = destination_name.result['data'][0]['address']['cityName'].lower()
else:
destination_name = flight['destination']
"""
destination_name = requests.get("https://iatacodes.org/api/v6/cities?api_key=" + os.getenv('IATA_API') + "&code=" + flight['destination'], verify=False).json()
if 'response' in destination_name:
destination_name = destination_name['response'][0]['name'].lower()
else:
destination_name = flight['destination']
"""
json_response = requests.get(f'https://api.teleport.org/api/urban_areas/slug:{destination_name}/images/')
try:
json_response = json_response.json()
if 'status' not in json_response:
if len(json_response['photos']) > 0:
image_url = json_response['photos'][0]['image']['mobile']
else:
image_url = json_response['photos']['image']['mobile']
else:
image_url = ''
except json.decoder.JSONDecodeError:
image_url = ''
"""
place_id = gmaps.find_place(destination_name, 'textquery')['candidates']
images = []
if len(place_id) > 0:
place_id = place_id[0]['place_id']
place_details = gmaps.place(place_id, random.getrandbits(256), ['photo', 'rating', 'geometry'])
if place_details['result'] != {}:
if 'photos' in place_details['result']:
for photo in place_details['result']['photos']:
image_url = 'https://maps.googleapis.com/maps/api/place/photo?maxheight=400&photoreference=' + photo['photo_reference'] + '&key=' + os.getenv('GOOGLE_MAPS_SERVER_KEY')
images.append(image_url)
else:
images.append('')
db_cursor.execute('INSERT INTO CITIES VALUES(?,?,?,?)', (
flight['destination'],
destination_name,
place_details['result']['geometry']['location']['lat'],
place_details['result']['geometry']['location']['lng'],
))
else:
images.append('')
db_cursor.execute('INSERT INTO CITIES VALUES(?,?,?,?)', (
flight['destination'],
destination_name,
None,
None,
))
else:
images.append('')
db_cursor.execute('INSERT INTO CITIES VALUES(?,?,?,?)', (
flight['destination'],
destination_name,
None,
None,
))
for image in images:
db_cursor.execute('INSERT INTO IMAGES VALUES(?,?)', (flight['destination'], image))
image_url = random.choice(images)
else:
image_url = random.choice(query_result)[0]
flight['image'] = image_url
del flight['type']
del flight['links']
if image_url != '':
result.append(flight)
for flight in result:
flight['price']['passenger'] = float(flight['price']['total'])
flight['price']['total'] = round(float(flight['price']['total']) * num_passengers, 2)
db_cursor.execute('SELECT city_name FROM CITIES WHERE iata_name=?', (flight['destination'],))
city_name = db_cursor.fetchone()[0]
flight['destination_name'] = city_name.title()
db_connection.commit()
return {'flights': result}
@api_rest.route('/like_place')
class CityLikeResource(Resource):
def post(self):
data = request.json if request.json else request.form
if 'uuid' not in data:
return {'error':'UUID is obligatory', 'status':400}, 400
if 'destination' not in data:
return {'error': 'destination is required', 'status': 400}, 400
if 'like' not in data:
return {'error': 'like status is required', 'status': 400}, 400
like = True if data['like'] else False
db_cursor.execute("SELECT last_query FROM USERS WHERE uuid=?", (data['uuid'],))
query_id = db_cursor.fetchone()
if query_id:
db_cursor.execute("UPDATE PLAN SET like=? WHERE query_id=? AND destination=?", (like, query_id[0], data['destination']))
db_cursor.execute("SELECT PLAN.destination, PLAN.price, CITIES.latitude, CITIES.longitude, PLAN.like FROM PLAN INNER JOIN CITIES ON PLAN.destination = CITIES.iata_name WHERE PLAN.like IS NOT NULL AND PLAN.query_id=?", (query_id[0],))
places = db_cursor.fetchall()
if len(places) > 4:
x = []
y = []
def get_features(place):
return [float(place[1]), float(place[2]), float(place[3])]
for place in places:
x.append(get_features(place))
y.append(float(place[4]))
neigh = KNeighborsRegressor(n_neighbors=2)
neigh.fit(x, y)
def calculate_number(place):
return neigh.predict([get_features(place)])[0]
places.sort(key=calculate_number)
db_connection.commit()
result = []
for i in range(len(places)):
if int(places[i][4]) == 1:
result.append(places[i][0])
return {'destinations':places}, 200
else:
return {'error': 'User does not exist', 'status': 404}, 404
@api_rest.route('/retrieve_previous_search')
@api_rest.param('uuid', 'UUID of the user')
class PreviousSearchResource(Resource):
def get(self):
if not request.args.get('uuid'):
return {'error':'UUID is obligatory', 'status':400}, 400
db_cursor.execute("SELECT last_query FROM USERS WHERE uuid=?", (request.args.get('uuid'),))
query_results = db_cursor.fetchone()
if not query_results:
result = {
'error': 'User not found',
'status': 404
}
return result, 404
db_cursor.execute("SELECT departure, budget, start_day, end_day, num_passengers FROM QUERIES WHERE query_id=?", (query_results[0],))
query_results = db_cursor.fetchone()
if query_results:
result = {
'departure': query_results[0],
'budget': query_results[1],
'start_day': query_results[2],
'end_day': query_results[3],
'num_passengers': query_results[4],
}
return result
else:
result = {
'error': 'No search found',
'status': 404
}
return result, 404
@api_rest.route('/get_tickets')
@api_rest.param('num_passengers', 'the number of passangers')
@api_rest.param('returnDate', 'the date of arrival')
@api_rest.param('departureDate', 'the date of departure')
@api_rest.param('destination', 'the destination')
@api_rest.param('origin', 'the origin')
class TicketResource(Resource):
def get(self):
arguments = {}
arguments['origin'] = request.args.get('origin')
arguments['destination'] = request.args.get('destination')
arguments['departureDate'] = request.args.get('departureDate')
arguments['returnDate'] = request.args.get('returnDate')
if request.args.get('num_passengers'):
num_passengers = int(request.args.get('num_passengers'))
else:
num_passengers = 1
try:
flights = amadeus.shopping.flight_offers.get(**arguments).result
status_code = 200
except NotFoundError:
return {'flights': []}, 201
except ServerError:
return {'error': 500, 'status': 'Server Error', 'message': 'Probably the city does not exist'}, 500
extracted_flight_list = []
for offer_item in flights['data']:
flight_data = {}
flight_data['price_per_passenger'] = float(offer_item['offerItems'][0]['price']['total']) + float(offer_item['offerItems'][0]['price']['totalTaxes'])
flight_data['price_total'] = round(flight_data['price_per_passenger'] * num_passengers, 2)
flight_data['layovers'] = len(offer_item['offerItems'][0]['services'][0]['segments']) - 1
extracted_flight_list.append(flight_data)
print(extracted_flight_list)
return extracted_flight_list, status_code
```
#### File: Hack-a-Ton/server/places.py
```python
from googleplaces import GooglePlaces, types, lang
import os
# if query_result.has_attributions:
# print query_result.html_attributions
def get_places():
API_KEY = os.getenv("GOOGLE_API_KEY")
google_places = GooglePlaces(API_KEY)
# You may prefer to use the text_search API, instead.
query_result = google_places.text_search(location='DePauw University', radius=5)
# If types param contains only 1 item the request to Google Places API
# will be send as type param to fullfil:
# http://googlegeodevelopers.blogspot.com.au/2016/02/changes-and-quality-improvements-in_16.html
places = query_result.places
return places
``` |
{
"source": "jparga/py4web-gae-example",
"score": 3
} |
#### File: apps/_default/controllers.py
```python
from py4web import action, request, abort, redirect, URL
from yatl.helpers import A
from .common import (
db,
session,
T,
cache,
auth,
logger,
authenticated,
unauthenticated,
flash,
)
@unauthenticated("index", "index.html")
def index():
user = auth.get_user()
flash.set("Hello myappforgae")
message = T("Hello {first_name}".format(**user) if user else "Hello")
return dict(message=message)
``` |
{
"source": "jparise/nosecomplete",
"score": 2
} |
#### File: jparise/nosecomplete/nosecomplete.py
```python
import os
import sys
import re
import ast
from optparse import OptionParser
class PythonTestFinder(object):
def find_functions(self, ast_body, matcher):
for obj in ast_body:
if not matcher(obj):
continue
if isinstance(obj, ast.FunctionDef):
yield obj.name
if isinstance(obj, ast.ClassDef):
for func in self.find_functions(obj.body, matcher):
yield '%s.%s' % (obj.name, func)
def get_module_tests(self, module):
with open(module) as f:
data = f.read()
result = ast.parse(data)
def matcher(obj):
if isinstance(obj, ast.FunctionDef):
return re.search('test', obj.name, re.IGNORECASE)
# Unlike nose, we're not able to determine whether this class
# inherits from unittest.TestCase
# So it may be the case that this class name lacks 'test'. As a
# compromise, match all classes
return isinstance(obj, ast.ClassDef)
tests = list(
self.find_functions(result.body, matcher)
)
return tests
class NoseTestFinder(object):
def _generate_tests(self, suite):
from nose.suite import ContextSuite
from nose.case import Test
for context in suite._tests:
if isinstance(context, Test):
yield context
continue
assert isinstance(context, ContextSuite)
for test in self._generate_tests(context):
yield test
def _get_test_name(self, test_wrapper):
from nose.case import FunctionTestCase
test = test_wrapper.test
if isinstance(test, FunctionTestCase):
return test.test.__name__
return test.__class__.__name__ + '.' + test._testMethodName
def _generate_test_names(self, suite):
return map(self._get_test_name, self._generate_tests(suite))
def get_module_tests(self, module):
import nose
loader = nose.loader.defaultTestLoader()
return self._generate_test_names(loader.loadTestsFromName(module))
def _get_prefixed(strings, prefix):
for string in strings:
if string.startswith(prefix):
yield string.replace(prefix, '', 1)
def _get_py_or_dirs(directory, prefix):
for entry in os.listdir(directory or '.'):
path = os.path.join(directory, entry)
if entry.startswith(prefix):
leftover = entry.replace(prefix, '', 1)
if os.path.isdir(path):
yield leftover + '/'
elif leftover.endswith('.py'):
yield leftover + ':'
def _complete(test_finder, thing):
if ':' in thing:
# complete a test
module, test_part = thing.split(':')
tests = list(test_finder.get_module_tests(module))
if '.' in test_part:
# complete a method
return _get_prefixed(strings=tests, prefix=test_part)
funcs = [test for test in tests if test.count('.') == 0]
classes = [test.split('.')[0] for test in tests if '.' in test]
if test_part in classes:
# indicate a method should be completed
return ['.']
return _get_prefixed(strings=funcs + classes, prefix=test_part)
if os.path.isdir(thing):
# complete directory contents
if thing != '.' and not thing.endswith('/'):
return ['/']
return _get_py_or_dirs(thing, '')
if os.path.exists(thing):
# add a colon to indicate search for specific class/func
return [':']
# path not exists, complete a partial path
directory, file_part = os.path.split(thing)
return _get_py_or_dirs(directory, file_part)
def complete(test_finder, thing):
for option in set(_complete(test_finder, thing)):
sys.stdout.write(thing + option + ' ') # avoid print for python 3
def main():
methods = {
'nose': NoseTestFinder,
'python': PythonTestFinder,
}
parser = OptionParser(usage='usage: %prog [options] ')
parser.add_option(
"-s",
"--search-method",
help="Search method to use when locating tests",
choices=list(methods.keys()),
default='python',
)
(options, args) = parser.parse_args()
finder_class = methods[options.search_method]
finder_instance = finder_class()
complete(finder_instance, './' if len(args) == 0 else args[0])
if __name__ == '__main__':
main()
```
#### File: tests/fixtures/basic.py
```python
import unittest
def test_red():
pass
class AwesomeTestCase(unittest.TestCase):
def test_yellow(self):
pass
def test_green(self):
pass
def test_blue():
pass
``` |
{
"source": "jparise/vesta",
"score": 2
} |
#### File: vesta/vesta/client.py
```python
from __future__ import annotations
from typing import Any
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from typing import Union
from urllib.parse import urljoin
import requests
from .chars import COLS
from .chars import ROWS
from .chars import Rows
__all__ = ["Client"]
class Session(requests.Session):
def __init__(self, base_url: str):
super().__init__()
self.base_url = base_url
def request(
self,
method: Union[str, bytes],
url: Union[str, bytes],
*args,
**kwargs,
) -> requests.Response:
url = urljoin(self.base_url, url if isinstance(url, str) else url.decode())
return super().request(method, url, *args, **kwargs)
class Client:
"""Provides a Vestaboard API client interface.
Credentials must be provided as an ``api_key`` and ``api_secret``.
Optional, an alternate ``base_url`` can be specified, as well as any
additional HTTP ``headers`` that should be sent with every request
(such as a custom `User-Agent` header).
"""
def __init__(
self,
api_key: str,
api_secret: str,
*,
base_url: str = "https://platform.vestaboard.com",
headers: Optional[Mapping[str, str]] = None,
):
self.session = Session(base_url)
self.session.headers.update(
{
"X-Vestaboard-Api-Key": api_key,
"X-Vestaboard-Api-Secret": api_secret,
}
)
if headers:
self.session.headers.update(headers)
def __repr__(self):
return f"{type(self).__name__}(base_url={self.session.base_url!r})"
def get_subscriptions(self) -> List[Dict[str, Any]]:
"""Lists all subscriptions to which the viewer has access."""
r = self.session.get("/subscriptions")
r.raise_for_status()
return r.json().get("subscriptions", [])
def get_viewer(self) -> Dict[str, Any]:
"""Describes the currently authenticated viewer."""
r = self.session.get("/viewer")
r.raise_for_status()
return r.json()
def post_message(
self,
subscription_id: str,
message: Union[str, Rows],
) -> Dict[str, Any]:
"""Post of a new message to a subscription.
The authenticated viewer must have access to the subscription.
`message` can be either a string of text or a two-dimensional (6, 22)
array of character codes representing the exact positions of characters
on the board.
If text is specified, lines will be centered horizontally and
vertically if possible. Character codes will be inferred for
alphanumeric and punctuation, or can be explicitly specified in-line in
the message with curly braces containing the character code.
:raises ValueError: if `message` is a list with unsupported dimensions
"""
data: Dict[str, Union[str, Rows]]
if isinstance(message, str):
data = {"text": message}
elif isinstance(message, list):
if len(message) != ROWS or not all(len(row) == COLS for row in message):
raise ValueError(
f"expected a ({ROWS}, {COLS}) array of encoded characters"
)
data = {"characters": message}
else:
raise TypeError(f"unsupported message type: {type(message)}")
r = self.session.post(
f"/subscriptions/{subscription_id}/message",
json=data,
)
r.raise_for_status()
return r.json()
``` |
{
"source": "jpark2111/rasaX",
"score": 3
} |
#### File: rasaX/actions/service.py
```python
import requests
address = "https://int-api.mx.com/users/USR-8fe5e260-fe63-47dd-b8cd-438cd5a48b94/accounts?page=1&records_per_page=10"
headers = {
"Accept": "application/vnd.mx.api.v1+json",
"Content-Type": "application/json",
}
userName = "6<PASSWORD>"
password = "<PASSWORD>"
def get_mx_balance():
response = requests.get(
address, verify=False, headers=headers, auth=(userName, password)
).json()["accounts"][0]["balance"]
return response
``` |
{
"source": "jpark2111/rasaXazure",
"score": 2
} |
#### File: rasaXazure/actions/handoff.py
```python
from rasa_sdk import Tracker, Action
from rasa_sdk.executor import CollectingDispatcher
import ruamel.yaml
import pathlib
from typing import Dict, Text, Any, List
from rasa_sdk.events import EventType
here = pathlib.Path(__file__).parent.absolute()
handoff_config = (
ruamel.yaml.safe_load(open(f"{here}/handoff_config.yml", "r")) or {}
).get("handoff_hosts", {})
class ActionHandoffOptions(Action):
def name(self) -> Text:
return "action_handoff_options"
async def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[EventType]:
if not any([config.get("url") for bot, config in handoff_config.items()]):
dispatcher.utter_message(template="utter_no_handoff")
else:
buttons = [
{
"title": config.get("title"),
"payload": f'/trigger_handoff{{"handoff_to":"{bot}"}}',
}
for bot, config in handoff_config.items()
]
dispatcher.utter_message(
text=(
"I can't transfer you to a human, "
"but I can transfer you to one of these bots"
),
buttons=buttons,
)
return []
class ActionHandoff(Action):
def name(self) -> Text:
return "action_handoff"
async def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[EventType]:
dispatcher.utter_message(template="utter_handoff")
handoff_to = tracker.get_slot("handoff_to")
handoff_bot = handoff_config.get(handoff_to, {})
url = handoff_bot.get("url")
if url:
if tracker.get_latest_input_channel() == "rest":
dispatcher.utter_message(
json_message={
"handoff_host": url,
"title": handoff_bot.get("title"),
}
)
else:
dispatcher.utter_message(
template="utter_wouldve_handed_off", handoffhost=url
)
else:
dispatcher.utter_message(template="utter_no_handoff")
return []
``` |
{
"source": "jpark2320/bbalmu",
"score": 2
} |
#### File: usone/posts/views.py
```python
from django.db.models import Q
from rest_framework import status, pagination
from rest_framework.decorators import (
authentication_classes, permission_classes
)
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
DestroyAPIView,
UpdateAPIView
)
from .models import Post, Image
from . import serializers
class BasicSizePagination(pagination.PageNumberPagination):
page_size = 25
page_size_query_param = 'page_size'
max_page_size = 1000
class PostByFilters(ListAPIView):
serializer_class = serializers.PostSerializer
pagination_class = BasicSizePagination
authentication_classes = ([])
permission_classes = ([])
def get_queryset(self):
queryset = Post.objects.all()
reg = self.request.query_params.get('reg', None)
cat = self.request.query_params.get('cat', None)
limit = self.request.query_params.get('limit', None)
sort_by = self.request.query_params.get('sort-by', None)
order_by = self.request.query_params.get('order-by', None)
# By region
if reg:
queryset = queryset.filter(region__iexact=reg)
# By category
if cat:
queryset = queryset.filter(category__iexact=cat)
# Sort by a given field, otherwise sorty by -created_at
sort_field = None
if sort_by:
sort_field = '-{0}'.format(sort_by)
else:
sort_field = '-created_at'
# Order by descending order as default, othewise order by ascending
if order_by and sort_by:
if order_by == 'asc':
sort_field = sort_field[1:]
# Order queryset based on sort_field if it is set
if sort_field:
queryset = queryset.order_by(sort_field)
return queryset
class ListAllImages(ListAPIView):
serializer_class = serializers.ImageSerializer
queryset = Image.objects.all()
authentication_classes = ([])
permission_classes = ([])
class CreatePost(CreateAPIView):
serializer_class = serializers.CreatePostSerializer
pagination_class = BasicSizePagination
queryset = Post.objects.all()
authentication_classes = ([])
permission_classes = ([])
class UpdatePost(UpdateAPIView):
serializer_class = serializers.CreatePostSerializer
pagination_class = BasicSizePagination
queryset = Post.objects.all()
authentication_classes = ([])
permission_classes = ([])
class DeletePost(DestroyAPIView):
serializer_class = serializers.CreatePostSerializer
pagination_class = BasicSizePagination
queryset = Post.objects.all()
authentication_classes = ([])
permission_classes = ([])
class ViewPost(RetrieveAPIView):
serializer_class = serializers.PostSerializer
pagination_class = BasicSizePagination
authentication_classes = ([])
permission_classes = ([])
queryset = Post.objects.all()
```
#### File: management/commands/ebuser.py
```python
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
# Used this function to create super user
class Command(BaseCommand):
def handle(self, *args, **options):
User = get_user_model()
User.objects.create_superuser('jpark2320', '<EMAIL>', '1234567890')
``` |
{
"source": "jpark9013/aiocodeforces",
"score": 3
} |
#### File: aiocodeforces/aiocodeforces/enum.py
```python
from enum import Enum
class CEnum(Enum):
def __str__(self):
return self.name
class ContestType(CEnum):
CF = 0
IOI = 1
ICPC = 2
class ContestPhase(CEnum):
BEFORE = 0
CODING = 1
PENDING_SYSTEM_TEST = 2
SYSTEM_TEST = 3
FINISHED = 4
class PartyParticipantType(CEnum):
CONTESTANT = 0
PRACTICE = 1
VIRTUAL = 2
MANAGER = 3
class ProblemType(CEnum):
PROGRAMMING = 0
QUESTION = 1
class SubmissionVerdict(CEnum):
FAILED = 0
OK = 1
PARTIAL = 2
COMPILATION_ERROR = 3
RUNTIME_ERROR = 4
WRONG_ANSWER = 5
PRESENTATION_ERROR = 6
TIME_LIMIT_EXCEEDED = 7
MEMORY_LIMIT_EXCEEDED = 8
IDLENESS_LIMIT_EXCEEDED = 9
SECURITY_VIOLATED = 10
CRASHED = 11
INPUT_PREPARATION_CRASHED = 12
CHALLENGED = 13
SKIPPED = 14
TESTING = 15
REJECTED = 16
class SubmissionTestSet(CEnum):
SAMPLES = 0
PRETESTS = 1
TESTS = 2
CHALLENGES = 3
TESTS1 = 4
TESTS2 = 5
TESTS3 = 6
TESTS4 = 7
TESTS5 = 8
TESTS6 = 9
TESTS7 = 10
TESTS8 = 11
TESTS9 = 12
TESTS10 = 13
class HackVerdict(CEnum):
HACK_SUCCESSFUL = 0
HACK_UNSUCCESSFUL = 1
INVALID_INPUT = 2
GENERATOR_INCOMPATIBLE = 3
GENERATOR_CRASHED = 4
IGNORED = 5
TESTING = 6
OTHER = 7
class ProblemResultType(CEnum):
PRELIMINARY = 0
FINAL = 1
```
#### File: aiocodeforces/aiocodeforces/party.py
```python
from aiocodeforces.enum import PartyParticipantType
from aiocodeforces.member import Member
class Party:
__slots__ = ["contest_id", "members", "participant_type", "team_id", "team_name", "ghost", "room",
"start_time_seconds"]
def __init__(self, dic):
self.contest_id: int = dic["contestId"]
self.members: list = [Member(i) for i in dic["members"]] # of Members
self.participant_type: PartyParticipantType = PartyParticipantType[dic["participantType"]]
self.team_id: int = dic.get("teamId") # Can be none
self.team_name: str = dic.get("teamName") # Can be none
self.ghost: bool = dic["ghost"]
self.room: int = dic.get("room") # Can be none
self.start_time_seconds: int = dic.get("startTimeSeconds") # Can be none
def __eq__(self, other):
return isinstance(other, Party) and self.contest_id == other.contest_id and self.members == other.members
def __ne__(self, other):
return not self.__eq__(other)
```
#### File: aiocodeforces/aiocodeforces/problem_result.py
```python
from aiocodeforces.enum import ProblemResultType
class ProblemResult:
__slots__ = ["points", "penalty", "rejected_attempt_count", "type", "best_submission_time_seconds"]
def __init__(self, dic):
self.points: float = dic["points"]
self.penalty: int = dic["penalty"]
self.rejected_attempt_count: int = dic["rejectedAttemptCount"]
self.type: ProblemResultType = ProblemResultType[dic["type"]] # Enum: PRELIMINARY, FINAL
self.best_submission_time_seconds: int = dic["bestSubmissionTimeSeconds"]
``` |
{
"source": "jpark96/capacity-of-metric-learners",
"score": 3
} |
#### File: jpark96/capacity-of-metric-learners/dataset.py
```python
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import pandas as pd
import skimage.io as io
import numpy as np
import matplotlib.pyplot as plt
# Load data
class LandmarksDataset(Dataset):
"""Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_metadata = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
# Bidict from ids to labels to keep labels within [0, num_classes]
self.id_to_label = dict()
self.label_to_id = dict()
self.num_classes = 0
def __len__(self):
return len(self.landmarks_metadata)
def __getitem__(self, idx):
landmark_id = self.landmarks_metadata['landmark_id'][idx]
id = self.landmarks_metadata['id'][idx]
img_name = self.root_dir + str(landmark_id) + "/" + str(id) + ".jpg"
image = io.imread(img_name)
# If id is not seen, add to id2label bidict
if landmark_id not in self.id_to_label:
self.id_to_label[landmark_id] = self.num_classes
self.label_to_id[self.num_classes] = landmark_id
self.num_classes += 1
if self.transform:
image = self.transform(image)
sample = {'image': image, 'label': self.id_to_label[landmark_id]}
return sample
class RandomDataset(Dataset):
"""Random dataset with input dimensions input_dims."""
def __init__(self, num_samples=1, input_dims=1, useLabels=False, labels=[]):
self.input_dims = input_dims
# Initialize dataset
self.dataset = np.random.normal(size=(num_samples, input_dims))
# Initialize labels
self.labels = labels if useLabels else np.random.randint(0, 2, size=num_samples)
def __len__(self):
return self.dataset.shape[0]
def __getitem__(self, idx):
return {'image': torch.FloatTensor(self.dataset[idx]), 'label': torch.from_numpy(np.array(self.labels[idx])).float()}
### SIAMESE DATA SAMPLER ###
class SiameseDataset(Dataset):
"""Landmarks dataset."""
def __init__(self, dataset):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# Get two items, with 50% chance similarity/dissimilarity
landmark_id0, landmark_id1 = None, None
should_be_similar = np.random.randint(2)
for i in range(10):
idx0, idx1 = np.random.choice(len(self.dataset), size=2)
landmark_id0 = self.dataset[idx0]['label']
landmark_id1 = self.dataset[idx1]['label']
if (should_be_similar and (landmark_id0 == landmark_id1)): break
if (not should_be_similar and (landmark_id0 != landmark_id1)): break
# Return sample
sample = {'image0': self.dataset[idx0]['image'], 'image1': self.dataset[idx1]['image'],
'label': torch.from_numpy(np.array(int(landmark_id0 != landmark_id1))).float()}
return sample
if __name__ == "__main__":
landmark_dataset = LandmarksDataset(csv_file='small-dataset/small-dataset.csv',
root_dir='small-dataset/',
transform=transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.RandomCrop(244),
transforms.ToTensor()]))
print("Dataset size: " + str(len(landmark_dataset)))
print("Row 0: " + str(landmark_dataset[0]))
siamese_landmark_dataset = SiameseDataset(dataset=landmark_dataset)
sample = next(iter(siamese_landmark_dataset))
image0, image1, label = sample['image0'], sample['image1'], sample['label']
plt.imshow(image0.transpose(0, 2).transpose(0, 1))
plt.show()
plt.imshow(image1.transpose(0, 2).transpose(0, 1))
plt.show()
print(label)
print(landmark_dataset[0]['label'])
random_dataset = RandomDataset(input_dims=1)
print(random_dataset[0]['label'])
``` |
{
"source": "jparkerholder/ASEBO",
"score": 3
} |
#### File: ASEBO/asebo/policies.py
```python
import numpy as np
from scipy.linalg import toeplitz
import gym
from copy import copy
# Toeplitz policy from Choromanski (2018)
# Can only have 2 layers
class ToeplitzPolicy(object):
def __init__(self, policy_params):
self.init_seed = policy_params['seed']
self.ob_dim = policy_params['ob_dim']
self.h_dim = policy_params['h_dim']
self.ac_dim = policy_params['ac_dim']
self.w1 = self.weight_init(self.ob_dim + self.h_dim -1, policy_params['zeros'])
self.w2 = self.weight_init(self.h_dim * 2 - 1, policy_params['zeros'])
self.w3 = self.weight_init(self.ac_dim + self.h_dim - 1, policy_params['zeros'])
self.W1 = self.build_layer(self.h_dim, self.ob_dim, self.w1)
self.W2 = self.build_layer(self.h_dim, self.h_dim, self.w2)
self.W3 = self.build_layer(self.ac_dim, self.h_dim, self.w3)
self.b1 = self.weight_init(self.h_dim, policy_params['zeros'])
self.b2 = self.weight_init(self.h_dim, policy_params['zeros'])
self.params = np.concatenate([self.w1, self.b1, self.w2, self.b2, self.w3])
self.N = len(self.params)
def weight_init(self, d, zeros):
if zeros:
w = np.zeros(d)
else:
np.random.seed(self.init_seed)
w = np.random.rand(d) / np.sqrt(d)
return(w)
def build_layer(self, d1, d2, v):
# len v = d1 + d2 - 1
col = v[:d1]
row = v[(d1-1):]
W = toeplitz(col, row)
return(W)
def update(self, vec):
self.params += vec
self.w1 += vec[:len(self.w1)]
vec = vec[len(self.w1):]
self.b1 += vec[:len(self.b1)]
vec = vec[len(self.b1):]
self.w2 += vec[:len(self.w2)]
vec = vec[len(self.w2):]
self.b2 += vec[:len(self.b2)]
vec = vec[len(self.b2):]
self.w3 += vec
self.W1 = self.build_layer(self.h_dim, self.ob_dim, self.w1)
self.W2 = self.build_layer(self.h_dim, self.h_dim, self.w2)
self.W3 = self.build_layer(self.ac_dim, self.h_dim, self.w3)
def evaluate(self, X):
#if len(X.shape) == 1:
# X = X.reshape(X.shape[0], 1)
z1 = np.tanh(np.dot(self.W1, X) + self.b1)
z2 = np.tanh(np.dot(self.W2, z1) + self.b2)
return(np.tanh(np.dot(self.W3, z2)))
class LinearPolicy(object):
def __init__(self, policy_params):
self.init_seed = policy_params['seed']
self.ob_dim = policy_params['ob_dim']
self.h_dim = policy_params['h_dim']
self.ac_dim = policy_params['ac_dim']
self.w = self.weight_init(self.ob_dim * self.ac_dim, policy_params['zeros'])
self.W = self.w.reshape(self.ac_dim, self.ob_dim)
self.params = copy(self.w)
self.N = len(self.params)
def weight_init(self, d, zeros):
if zeros:
w = np.zeros(d)
else:
np.random.seed(self.init_seed)
w = np.random.rand(d) / np.sqrt(d)
return(w)
def update(self, vec):
self.w += vec
self.W = self.w.reshape(self.ac_dim, self.ob_dim)
self.params = copy(self.w)
def evaluate(self, X):
X = X.reshape(X.size, 1)
return(np.tanh(np.dot(self.W, X)))
``` |
{
"source": "jparker/therminator_server",
"score": 2
} |
#### File: migrations/versions/a59cf08dedf3_.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'a59cf08dedf3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('api_key', sa.String(length=255), server_default=sa.text("encode(gen_random_bytes(32), 'hex')"), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('api_key'),
sa.UniqueConstraint('email')
)
op.create_table('homes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('timezone', sa.String(length=255), server_default='UTC', nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id', 'name', name='user_id_name_unq')
)
op.create_table('sensors',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('home_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('uuid', postgresql.UUID(), server_default=sa.text('gen_random_uuid()'), nullable=False),
sa.ForeignKeyConstraint(['home_id'], ['homes.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('home_id', 'name', name='home_id_name_unq'),
sa.UniqueConstraint('uuid')
)
op.create_table('readings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sensor_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('int_temp', sa.Float(), server_default='0.0', nullable=False),
sa.Column('ext_temp', sa.Float(), nullable=False),
sa.Column('humidity', sa.Float(), server_default='0.0', nullable=False),
sa.Column('resistance', sa.Float(), server_default='0.0', nullable=False),
sa.CheckConstraint('humidity >= 0 AND humidity <= 100', name='humidity_between_0_and_100'),
sa.CheckConstraint('resistance >= 0', name='resistance_must_be_positive'),
sa.ForeignKeyConstraint(['sensor_id'], ['sensors.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('sensor_id', 'timestamp', name='sensor_id_timestamp_unq')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('readings')
op.drop_table('sensors')
op.drop_table('homes')
op.drop_table('users')
# ### end Alembic commands ###
```
#### File: therminator_server/therminator/exc.py
```python
from flask import jsonify
from http import HTTPStatus
from . import app
class ApiError(Exception):
def __init__(self, message, status_code=HTTPStatus.BAD_REQUEST):
self.message = message
self.status_code = status_code
def as_dict(self):
return dict(error=self.message)
@app.errorhandler(ApiError)
def handle_api_error(error):
response = jsonify(error.as_dict())
response.status_code = error.status_code
return response
```
#### File: therminator_server/therminator/views.py
```python
from datetime import datetime, time, timedelta
from flask import flash, redirect, render_template, request, url_for
from flask_login import (
confirm_login,
current_user,
fresh_login_required,
login_required,
login_user,
logout_user,
)
import pytz
from sqlalchemy.exc import IntegrityError
from urllib.parse import urljoin, urlparse
from . import app, db, login_manager
from .forms import SignInForm, RefreshSessionForm, SensorForm
from .models import User, Home, Sensor, Reading
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@app.context_processor
def expose_timedelta():
def _timedelta(**kwargs):
return timedelta(**kwargs)
return dict(timedelta=_timedelta)
@app.context_processor
def expose_utcnow():
def utcnow():
return datetime.utcnow()
return dict(utcnow=utcnow)
@app.template_filter('localtime')
def localtime(timestamp, timezone, fmt='%Y-%m-%d %H:%M %Z'):
tz = pytz.timezone(timezone)
local = pytz.utc.localize(timestamp, is_dst=None).astimezone(tz)
return local.strftime(fmt)
@app.template_filter('numerify')
def numerify(number, prec=1):
return '{:,.{prec}f}'.format(number, prec=prec)
@app.route('/sign-in', methods=['GET', 'POST'])
def sign_in():
form = SignInForm()
target = get_redirect_target()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and user.is_correct_password(form.password.data):
app.logger.info('User {!r} signed in'.format(user.email))
login_user(user, remember=form.remember.data)
flash('You have successfully signed in.', 'success')
return redirect_back('list_homes')
else:
app.logger.warning('User {!r} failed to sign in'.format(form.email.data))
flash('Invalid email address or password.', 'danger')
return render_template('sign_in.html', form=form, target=target)
@app.route('/sign-out', methods=['GET', 'DELETE'])
def sign_out():
app.logger.info('User {!r} signed out'.format(current_user.email))
logout_user()
flash('You have successfully signed out.', 'info')
return redirect(url_for('list_homes'))
@app.route('/refresh-session', methods=['GET', 'POST'])
def refresh_session():
form = RefreshSessionForm()
target = get_redirect_target()
if form.validate_on_submit():
if current_user.is_correct_password(form.password.data):
confirm_login()
flash('You have successfully reauthenticated.', 'success')
return redirect_back('list_homes')
else:
flash('Invalid password.', 'danger')
return render_template('refresh_session.html', form=form, target=target)
@app.route('/')
@login_required
def list_homes():
return render_template('homes/index.html', homes=current_user.homes)
@app.route('/homes/<int:home_id>')
@login_required
def show_home(home_id):
home = current_user.homes.filter_by(id=home_id).first_or_404()
return render_template('homes/show.html', home=home)
@app.route('/sensors/<int:sensor_id>', defaults={'date': None})
@app.route('/sensors/<int:sensor_id>/<date:date>')
@login_required
def show_sensor(sensor_id, date):
sensor = db.session.query(Sensor).filter_by(id=sensor_id) \
.join(Home).filter_by(user_id=current_user.id).first_or_404()
timezone = pytz.timezone(sensor.home.timezone)
if not date:
date = datetime.now(timezone).date()
midnight = timezone.localize(datetime.combine(date, time())) \
.astimezone(pytz.utc).replace(tzinfo=None)
readings = sensor.readings.filter(Reading.timestamp.between(
midnight,
midnight + timedelta(days=1),
)).order_by(Reading.timestamp)
return render_template(
'sensors/show.html',
current_sensor=sensor,
home=sensor.home,
readings=readings,
date=date,
)
@app.route('/homes/<int:home_id>/sensors/new')
@fresh_login_required
def new_sensor(home_id):
home = current_user.homes.filter_by(id=home_id).first_or_404()
form = SensorForm()
return render_template('sensors/new.html', form=form, home=home)
@app.route('/homes/<int:home_id>/sensors', methods=['POST'])
@fresh_login_required
def create_sensor(home_id):
home = current_user.homes.filter_by(id=home_id).first_or_404()
form = SensorForm()
if form.validate_on_submit():
sensor = Sensor(home=home, name=form.name.data)
db.session.add(sensor)
db.session.commit()
flash('Sensor {} created successfully.'.format(sensor.name), 'success')
return redirect(url_for('show_sensor', sensor_id=sensor.id))
flash('Sensor could not be created.', 'danger')
return render_template('sensors/new.html', form=form, home=home)
@app.route('/sensors/<int:sensor_id>/edit')
@fresh_login_required
def edit_sensor(sensor_id):
sensor = db.session.query(Sensor).filter_by(id=sensor_id) \
.join(Home).filter_by(user_id=current_user.id).first_or_404()
form = SensorForm()
form.process(obj=sensor)
return render_template(
'sensors/edit.html',
form=form,
current_sensor=sensor,
home=sensor.home,
)
@app.route('/sensors/<int:sensor_id>', methods=['POST', 'PATCH'])
@fresh_login_required
def update_sensor(sensor_id):
sensor = db.session.query(Sensor).filter_by(id=sensor_id) \
.join(Home).filter_by(user_id=current_user.id).first_or_404()
form = SensorForm()
if form.validate_on_submit():
sensor.name = form.name.data
db.session.add(sensor)
db.session.commit()
flash('Sensor {} updated successfully.'.format(sensor.name), 'success')
return redirect(url_for('show_sensor', sensor_id=sensor.id))
flash('Sensor could not be updated.', 'danger')
return render_template(
'sensors/edit.html',
form=form,
current_sensor=sensor,
home=sensor.home,
)
def get_redirect_target():
for target in request.values.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return target
return None
def redirect_back(default, **params):
target = request.form['next']
if not target or not is_safe_url(target):
target = url_for(default, **params)
return redirect(target)
def is_safe_url(url):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, url))
return test_url.scheme in ('http', 'https') \
and ref_url.netloc == test_url.netloc
``` |
{
"source": "jparkgeo/GEOG489",
"score": 3
} |
#### File: GEOG489/Week11/utils.py
```python
from tqdm import tqdm, trange
import osmnx as ox
import networkx as nx
from shapely.ops import cascaded_union
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
import numpy as np
def assign_max_speed_with_highway_type(row_):
"""
Assign the maximum speed of an edge based on its attribute 'highway'
# https://wiki.openstreetmap.org/wiki/Key:highway
Args:
row_: (dict) a row of OSMnx network data
Returns:
temp_speed_: (int) the maximum speed of an edge
"""
max_speed_per_type = {'motorway': 50,
'motorway_link': 30,
'trunk': 50,
'trunk_link': 30,
'primary': 40,
'primary_link': 30,
'secondary': 40,
'secondary_link': 30,
'tertiary': 40,
'tertiary_link': 20,
'residential': 30,
'living_street': 20,
'unclassified': 20
}
# if the variable is a list, obtain just the first one.
if type(row_['highway']) == list:
road_type = row_['highway'][0]
else:
road_type = row_['highway']
# If the maximum speed of the road_type is predefined.
if road_type in max_speed_per_type.keys():
temp_speed_ = max_speed_per_type[road_type]
else: # If not defined, just use 20 mph.
temp_speed_ = 20
return temp_speed_
def network_settings(network):
for u, v, k, data in network.edges(data=True, keys=True):
if 'maxspeed' in data.keys():
if type(data['maxspeed']) == list:
temp_speed = data['maxspeed'][0] # extract only numbers
else:
temp_speed = data['maxspeed']
temp_speed = temp_speed.split(' ')[0]
else:
temp_speed = assign_max_speed_with_highway_type(data)
data['maxspeed'] = temp_speed
data['maxspeed_meters'] = int(data['maxspeed']) * 26.8223 # MPH * 1.6 * 1000 / 60; meter per minute
data['time'] = float(data['length'] / data['maxspeed_meters'])
# create point geometries for the entire graph
for node, data in network.nodes(data=True):
data['geometry'] = Point(data['x'], data['y'])
return network
def step1_E2SFCA(supply, supply_attr, demand, demand_attr, mobility, thresholds, weights):
"""
Input:
- supply (GeoDataFrame): stores locations and attributes of supply
- supply_attr (str): the column of `supply` to be used for the analysis
- demand (GeoDataFrame): stores locations and attributes of demand
- demand_attr (str): the column of `demand` to be used for the analysis
- mobility (NetworkX MultiDiGraph): Network Dataset obtained from OSMnx
- thresholds (list): the list of threshold travel times e.g., [5, 10, 15]
- weights (dict): keys: threshold travel time, values: weigths according to the threshold travel times
e.g., [5: 1, 10: 0.68, 15: 0.22]
Output:
- supply_ (GeoDataFrame):
a copy of supply and it stores supply-to-demand ratio of each supply at `ratio` column
"""
# Your code here (Change the name of the variable according to the inputs)
supply_ = supply.copy(deep=True)
supply_['ratio'] = 0
for i in trange(supply.shape[0]):
# Create catchment areas from a given location
ctmt_area = calculate_catchment_area(mobility, supply.loc[i, 'nearest_osm'], thresholds)
# Calculate the population within the catchment area based on distance decay
ctmt_area_pops = 0
for c_idx, c_row in ctmt_area.iterrows():
temp_pop = demand.loc[demand['geometry'].centroid.within(c_row['geometry']), demand_attr].sum()
ctmt_area_pops += temp_pop * weights[c_idx]
# Calculate the number of hospital beds in each hospital
temp_supply = supply.loc[i, supply_attr]
# Calculate the number of hospital beds available for 100,000 people
supply_.at[i, 'ratio'] = temp_supply / ctmt_area_pops * 100000
return supply_
def step2_E2SFCA(result_step1, demand, mobility, thresholds, weights):
"""
Input:
- result_step1 (GeoDataFrame): stores locations and 'ratio' attribute that resulted in step1
- demand (GeoDataFrame): stores locations and attributes of demand
- mobility (NetworkX MultiDiGraph): Network Dataset obtained from OSMnx
- thresholds (list): the list of threshold travel times e.g., [5, 10, 15]
- weights (dict): keys: threshold travel time, values: weigths according to the threshold travel times
e.g., [5: 1, 10: 0.68, 15: 0.22]
Output:
- demand_ (GeoDataFrame):
a copy of demand and it stores the final accessibility measures of each demand location at `ratio` column
"""
# Your code here (Change the name of the variable according to the inputs)
demand_ = demand.copy(deep=True)
demand_['access'] = 0
for j in trange(demand.shape[0]):
# Create catchment areas from a given location
ctmt_area = calculate_catchment_area(mobility, demand.loc[j, 'nearest_osm'], thresholds)
# Sum the ratio within the catchment areas based on distance decay
ctmt_area_ratio = 0
for c_idx, c_row in ctmt_area.iterrows():
temp_ratio = result_step1.loc[result_step1['geometry'].centroid.within(c_row['geometry']), 'ratio'].sum()
ctmt_area_ratio += temp_ratio * weights[c_idx]
# Assign the summed ratio for each demand location
demand_.at[j, 'access'] = ctmt_area_ratio
return demand_
def calculate_catchment_area(network, nearest_osm, minutes, distance_unit='time'):
polygons = gpd.GeoDataFrame(crs="EPSG:4326")
# Create convex hull for each travel time (minutes), respectively.
for minute in minutes:
access_nodes = nx.single_source_dijkstra_path_length(network, nearest_osm, minute, weight=distance_unit)
convex_hull = gpd.GeoSeries(nx.get_node_attributes(network.subgraph(access_nodes), 'geometry')).unary_union.convex_hull
polygon = gpd.GeoDataFrame({'minutes': [minute], 'geometry': [convex_hull]}, crs="EPSG:4326")
polygon = polygon.set_index('minutes')
polygons = polygons.append(polygon)
# Calculate the differences between convex hulls which created in the previous section.
polygons_ = polygons.copy(deep=True)
for idx, minute in enumerate(minutes):
if idx != 0:
current_polygon = polygons.loc[[minute]]
previous_polygons = cascaded_union(polygons.loc[minutes[:idx], 'geometry'])
previous_polygons = gpd.GeoDataFrame({'geometry': [previous_polygons]}, crs="EPSG:4326")
diff_polygon = gpd.overlay(current_polygon, previous_polygons, how="difference")
if diff_polygon.shape[0] != 0:
polygons_.at[minute, 'geometry'] = diff_polygon['geometry'].values[0]
return polygons_.copy(deep=True)
def extract_edges_nodes_from_networkx(network):
nodes, edges = ox.graph_to_gdfs(network, nodes=True, edges=True, node_geometry=True)
return nodes, edges
def step1_2SFCA(supply, supply_attr, demand, demand_attr, mobility, threshold):
"""
Input:
- supply (GeoDataFrame): stores locations and attributes of supply
- supply_attr (str): the column of `supply` to be used for the analysis
- demand (GeoDataFrame): stores locations and attributes of demand
- demand_attr (str): the column of `demand` to be used for the analysis
- mobility (NetworkX MultiDiGraph): Network Dataset obtained from OSMnx
- threshold (int): threshold travel distance
Output:
- supply_ (GeoDataFrame):
a copy of supply and it stores supply-to-demand ratio of each supply at `ratio` column
"""
# Extract the nodes and edges of the network dataset for the future analysis.
nodes, edges = extract_edges_nodes_from_networkx(mobility)
supply_ = supply.copy(deep=True)
supply_['ratio'] = 0
for i in trange(supply.shape[0]):
# Create a catchment area from a given location
temp_nodes = nx.single_source_dijkstra_path_length(mobility, supply.loc[i, 'nearest_osm'], threshold,
weight='length')
access_nodes = nodes.loc[nodes.index.isin(temp_nodes.keys()), 'geometry']
access_nodes = gpd.GeoSeries(access_nodes.unary_union.convex_hull, crs="EPSG:5070")
# Calculate the population within the catchment area
temp_demand = demand.loc[demand['geometry'].centroid.within(access_nodes[0]), demand_attr].sum()
# Calculate the number of hospital beds in each hospital
temp_supply = supply.loc[i, supply_attr]
# Calculate the number of hospital beds available for 100,000 people
supply_.at[i, 'ratio'] = temp_supply / temp_demand * 100000
supply_['ratio'].replace(np.inf, 0, inplace=True)
return supply_
def step2_2SFCA(result_step1, demand, mobility, threshold):
"""
Input:
- result_step1 (GeoDataFrame): stores locations and 'ratio' attribute that resulted in step1
- demand (GeoDataFrame): stores locations and attributes of demand
- mobility (NetworkX MultiDiGraph): Network Dataset obtained from OSMnx
- threshold (int): threshold travel distance
Output:
- demand_ (GeoDataFrame):
a copy of demand and it stores the final accessibility measures of each demand location at `ratio` column
"""
# Extract the nodes and edges of the network dataset for the future analysis.
nodes, edges = extract_edges_nodes_from_networkx(mobility)
demand_ = demand.copy(deep=True)
demand_['access'] = 0
for j in trange(demand.shape[0]):
temp_nodes = nx.single_source_dijkstra_path_length(mobility, demand.loc[j, 'nearest_osm'], threshold,
weight='length')
access_nodes = nodes.loc[nodes.index.isin(temp_nodes.keys()), 'geometry']
access_nodes = gpd.GeoSeries(access_nodes.unary_union.convex_hull, crs="EPSG:5070")
accum_ratio = result_step1.loc[result_step1['geometry'].within(access_nodes[0]), 'ratio'].sum()
demand_.at[j, 'access'] = accum_ratio
return demand_
``` |
{
"source": "jparkhill/notebook-molecular-visualization",
"score": 2
} |
#### File: notebook-molecular-visualization/nbmolviz/__init__.py
```python
from __future__ import print_function
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os as _os
# package metadata
from . import _version
__version__ = _version.get_versions()['version']
__copyright__ = "Copyright 2017 Autodesk Inc."
__license__ = "Apache 2.0"
PACKAGE_PATH = _os.path.dirname(_os.path.abspath(__file__))
def _jupyter_nbextension_paths():
return [{
'section': 'notebook',
'src': 'static',
'dest': 'nbmolviz-js',
'require': 'nbmolviz-js/extension'
}]
def find_static_assets():
from warnings import warn
warn("""To use the nbmolviz-js nbextension, you'll need to update
the Jupyter notebook to version 4.2 or later.""")
return []
```
#### File: nbmolviz/mdtconfig/images.py
```python
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import subprocess
import sys
import os
import ipywidgets as ipy
import moldesign as mdt
MISSING = u'\u274C'
INSTALLED = u"\u2705"
WARNING = u"⚠️"
class DockerImageStatus(ipy.VBox):
def __init__(self, client):
self.client = client
images = self._get_images()
self.header = ipy.HTML(
'<span class="nbv-table-header" style="width:950px"">Image status</span>',
layout=ipy.Layout(align_items='flex-end'))
super().__init__([self.header] + [DockerImageView(im, client) for im in sorted(images)])
def _get_images(self):
return set(p.get_docker_image_path()
for p in mdt.compute.packages.executables + mdt.compute.packages.packages)
class DockerImageView(ipy.HBox):
LOADER = "<div class='nbv-loader' />"
DMKDIR = os.path.join(os.path.dirname(os.path.dirname(mdt.__file__)), 'DockerMakefiles')
def __init__(self, image, client):
self._err = False
self._client = client
self.image = image
self.status = ipy.HTML(layout=ipy.Layout(width="20px"))
self.html = ipy.HTML(value=image, layout=ipy.Layout(width="400px"))
self.html.add_class('nbv-monospace')
self.msg = ipy.HTML(layout=ipy.Layout(width='300px'))
self.button = ipy.Button(layout=ipy.Layout(width='100px'))
if mdt.compute.config.devmode:
self.button.on_click(self.rebuild)
else:
self.button.on_click(self.pull)
self._reactivate_button()
self._set_status_value()
super().__init__(children=[self.status, self.html, self.button, self.msg])
def rebuild(self, *args):
if not os.path.isdir(self.DMKDIR):
raise ValueError('Could not locate the docker makefiles. '
'To run MDT in development mode, '
'clone the molecular-design-toolkit repository and install it using '
'`pip install -e`.')
namefields = self.image.split(':')
assert len(namefields) == 2 and namefields[1] == 'dev'
self._disable_button('Rebuilding...')
thread = threading.Thread(target=self._run_rebuild, args=[namefields[0]])
thread.start()
def _run_rebuild(self, targetname):
try:
self.msg.value = 'Running <code>docker-make</code>'
cmd = ['docker-make', '--tag', 'dev',targetname]
self.status.value = self.LOADER
print('> %s' % ' '.join(cmd))
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
cwd=self.DMKDIR)
for line in process.stdout:
sys.stdout.write(line)
except Exception as e:
self._err = True
self.msg.value = str(e)
raise
finally:
self._reactivate_button()
self._set_status_value()
if not self._err:
self.msg.value = 'Rebuilt <code>%s</code> successfully.' % self.image
def pull(self, *args):
self.button.disabled = True
self._err = False
thread = threading.Thread(target=self._run_pull)
thread.start()
def _run_pull(self):
from docker import errors
try:
self._disable_button('Pulling...')
self.status.value = self.LOADER
self.msg.value = 'Starting download.'
try:
response = self._client.pull(self.image, stream=True, decode=True)
self._watch_pull_logs(response)
except errors.NotFound as exc:
self._err = True
self.msg.value = 'ERROR: %s' % exc.explanation
except Exception as e:
self._err = True
self.msg = str(e)
raise
finally:
self._set_status_value()
self._reactivate_button()
if not self._err:
self.msg.value = 'Pull successful.'
def _disable_button(self, description):
self.button.disabled = True
self.button.description = description
self.button.style.font_weight = '100'
self.button.style.button_color = 'lightgray'
def _reactivate_button(self):
self.button.disabled = False
if self._client is None:
self.button.description = 'no connection'
self.button.disabled = True
self.button.style.button_color = '#FAFAFA'
else:
if mdt.compute.config.devmode:
self.button.description = 'Rebuild image'
else:
self.button.description = 'Pull image'
self.button.style.font_weight = '400'
self.button.style.button_color = '#9feeb2'
def _set_status_value(self):
from docker import errors
if self._client is None:
self.status.value = 'n/a'
else:
try:
imginfo = self._client.inspect_image(self.image)
except errors.ImageNotFound:
if self._err:
self.status.value = WARNING
else:
self.status.value = MISSING
else:
self.status.value = INSTALLED
def _watch_pull_logs(self, stream):
found = set()
inprogress = set()
done = set()
for item in stream:
if 'errorDetail' in item or 'error' in item:
self.msg.value = item
self._err = True
elif 'status' in item and 'id' in item: # for pulling images
imgid = item['id']
found.add(imgid)
stat = item['status'].strip()
fields = stat.split()
if fields[0:2] in (['Pull','complete'], ['Already','exists']):
done.add(imgid)
self.msg.value = 'Pulling from repository: %s/%s layers complete' % (len(done), len(found))
elif fields[0] in ('Pulling','Extracting', 'Downloading'):
inprogress.add(imgid)
```
#### File: nbmolviz/uielements/plotting.py
```python
from builtins import zip
from copy import deepcopy
from moldesign.units import *
def grid_map(f,v,dims,grids):
"""
Map function values along a grid
:param f: function to be evaluated, call signature f(v)
:param v: vector that sets the static coordinates
:param dims: ndims-length list of dimensions to vary
:param grids: ndims-length list of grid values for each dimension
:return: function value grid
"""
vmod = deepcopy(v)
for idx, vals in enumerate(zip(*[g.flat for g in grids])):
for idim, val in zip(dims, vals): vmod[idim] = val
if idx == 0:
firstf = f(vmod)
gridZ = np.zeros(grids[0].shape) * firstf
gridZ.flat[0] = firstf
else:
gridZ.flat[idx] = f(vmod)
return gridZ
def function_slice(f,v,dims,ranges):
"""
Return an arbitrary dimensional slice of function values
:param f: function to be evaluated, call signature f(v)
:param v: vector that sets the static coordinates
:param dims: ndims-length list of dimensions to vary
:param ranges: ndims-list of values along those dimensions
:return: gridpoints, function values
"""
assert len(dims)==len(ranges)
if len(ranges)>1:
grids = np.meshgrid(*ranges)
else:
grids=list(ranges)
for igrid,(r,g) in enumerate(zip(ranges,grids)):
grids[igrid] = units_transfer(r,g)
gridZ = grid_map(f,v,dims,grids)
return grids,gridZ
```
#### File: nbmolviz/viewers/common.py
```python
from past.builtins import basestring
from ..base.base_widget import MessageWidget
from moldesign import utils
from .. import colormaps
class BaseViewer(MessageWidget):
def colormap(self, atomvalues, atoms=None, mplmap='auto', categorical=None, save=True):
""" Color atoms according to categorical or numeric data
Args:
atomvalues (callable OR list or str): Either:
- a callable that takes an atom and the data,
- a list of values of each atom
- the name of an atomic property (e.g., 'residue' or 'mass')
atoms (moldesign.molecules.AtomContainer): atoms to color (default: self.mol.atoms)
mplmap (str): name of the matplotlib colormap to use if colors aren't explicitly
specified)
categorical (bool): If None (the default), automatically detect whether the
data is categorical or numerical. Otherwise, use this flag to force
interpretation of the data as categorical (True) or numerical (False)
save (bool): permanently color theses atoms this way (until self.unset_color is called)
Returns:
dict: mapping of categories to colors
"""
atoms = utils.if_not_none(atoms, self.mol.atoms)
if isinstance(atomvalues, basestring):
# shortcut to use strings to access atom attributes, i.e. "ff.partial_charge"
attrs = atomvalues.split('.')
atomvalues = []
for atom in atoms:
obj = atom
for attr in attrs:
obj = getattr(obj, attr)
atomvalues.append(obj)
elif callable(atomvalues):
atomvalues = list(map(atomvalues, atoms))
colors = colormaps.colormap(atomvalues, mplmap=mplmap, categorical=categorical)
self.set_colors(colors, atoms=atoms, save=save)
return {v:c for v,c in zip(atomvalues, colors)}
color_by = colormap
```
#### File: nbmolviz/viewers/orbital_viewer.py
```python
from __future__ import print_function, absolute_import, division, unicode_literals
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from IPython.display import display as display_now
import numpy as np
import ipywidgets as ipy
import traitlets
import io
from moldesign import units as u
from moldesign.mathutils import padded_grid
from ..viewers import GeometryViewer, translate_color
from ..widget_utils import process_widget_kwargs
from ..uielements.components import HBox, VBox
from . import ViewerContainer
class OrbitalViewer(ViewerContainer):
"""
Subclass of the standard geometry viewer with added UI for rendering orbitals
Args:
mol (mdt.Molecule): a molecule with A) orbitals, and
B) an energy model with calculate_orbital_grid
display (bool): immediately draw the viewer in the notebook
**kwargs (dict): kwargs for the viewer
"""
current_orbital = traitlets.Any() # reference to the currently displayed orbital
isoval = traitlets.Float(0.01)
orb_opacity = traitlets.Float(0.8)
negative_color = traitlets.Union([traitlets.Integer(), traitlets.Unicode()], default='red')
positive_color = traitlets.Union([traitlets.Integer(), traitlets.Unicode()], default='blue')
numpoints = traitlets.Integer(40, default=50, max=120, min=10)
def __init__(self, mol, display=False, **kwargs):
self.type_dropdown = None
self.orblist = None
self.isoval_selector = None
self.opacity_selector = None
self.viewer = GeometryViewer(mol=mol, **process_widget_kwargs(kwargs))
self.mol = mol
self.wfn = mol.wfn # cache this directly because the molecule's state may change
self._restyle_orbital() # sets defaults for orbital spec
self._cached_cubefiles = {}
self.uipane = self._make_ui_pane(self.viewer.layout.height)
hb = HBox([self.viewer, self.uipane])
super().__init__([hb], viewer=self.viewer)
if display:
display_now(self)
def draw_orbital(self, orbital):
"""Display a molecular orbital
Args:
orbital (moldesign.orbitals.Orbital): orbital to draw
"""
# This triggers self._redraw_orbital
self.current_orbital = orbital
@traitlets.observe('current_orbital', 'numpoints')
def _redraw_orbital(self, *args):
self.status_element.value = '<div class="nbv-loader"/>'
try:
if self.current_orbital is None:
self.viewer.cubefile = ''
return
orbkey = (id(self.current_orbital), self.numpoints)
if orbkey not in self._cached_cubefiles:
grid, values = self._calc_orb_grid(self.current_orbital)
cubefile = self._grid_to_cube(grid, values)
self._cached_cubefiles[orbkey] = cubefile
else:
cubefile = self._cached_cubefiles[orbkey]
self.viewer.cubefile = cubefile
except Exception as e:
self.status_element.value = u'⚠ %s' % e
else:
self.status_element.value = ''
@traitlets.observe('negative_color',
'positive_color',
'isoval',
'orb_opacity')
def _restyle_orbital(self, *args):
# this triggers the redraw
self.viewer.volumetric_style = {
'iso_val': self.isoval,
'opacity': self.orb_opacity,
'negativeVolumetricColor': self.negative_color,
'positiveVolumetricColor': self.positive_color}
def _calc_orb_grid(self, orbital):
""" Calculate grid of values for this orbital
Args:
orbital (moldesign.Orbital): orbital to calcualte grid for
Returns:
VolumetricGrid: grid that amplitudes where computed on
Vector[1/length**1.5]: list of orbital amplitudes at each point on grid
"""
# NEWFEATURE: limit grid size based on the non-zero atomic centers. Useful for localized
# orbitals, which otherwise require high resolution
grid = padded_grid(self.wfn.positions,
padding=3.0 * u.angstrom,
npoints=self.numpoints)
with np.errstate(under='ignore'):
values = orbital(grid.allpoints())
return grid, values
@staticmethod
def _grid_to_cube(grid, values):
""" Given a grid of values, create a gaussian cube file
Args:
grid (utils.VolumetricGrid): grid of points
values (Iterable): iterator over grid values, in the same order as grid points
Returns:
str: contents of the cube file
"""
fobj = io.StringIO()
# First two header lines
print('CUBE File\nGenerated by nbmolviz', file=fobj)
# third line: number of atoms (0, here) + origin of grid
print('-1 %f %f %f' % tuple(grid.origin.value_in(u.angstrom)), file=fobj)
# lines 4-7: number of points in each direction and basis vector for each
# basis vectors are negative to indicate angstroms
print('%d %f 0.0 0.0' % (-grid.xpoints, grid.dx.value_in(u.angstrom)), file=fobj)
print('%d 0.0 %f 0.0' % (-grid.ypoints, grid.dy.value_in(u.angstrom)), file=fobj)
print('%d 0.0 0.0 %f' % (-grid.zpoints, grid.dz.value_in(u.angstrom)), file=fobj)
# Next is a line per atom
# We put just one atom here - it shouldn't be rendered
print('6 0.000 0.0 0.0 0.0', file=fobj)
# Next, indicate that there's just one orbital
print('1 1', file=fobj)
# finally, write out all the grid values
# ival = 0
valueiter = iter(values)
for ix in range(grid.xpoints):
for iy in range(grid.ypoints):
for iz in range(grid.zpoints):
print(str(next(valueiter)), end=' ', file=fobj)
# ival += 1
# if ival%6 == 0: print >> fobj #newline
if iz % 6 == 5:
fobj.write('\n')
fobj.write('\n')
v = fobj.getvalue()
fobj.close()
return v
def _make_ui_pane(self, hostheight):
layout = ipy.Layout(width='325px',
height=str(int(hostheight.rstrip('px')) - 50) + 'px')
#element_height = str(int(hostheight.rstrip('px')) - 125) + 'px'
element_height = None
# NOTE - element_height was used for the listbox-style orblist.
# HOWEVER ipywidgets 6.0 only displays those as a dropdown.
# This is therefore disabled until we can display listboxes again. -- AMV 7/16
# Orbital set selector
self.status_element = ipy.HTML(layout=ipy.Layout(width='inherit', height='20px'))
orbtype_label = ipy.Label("Orbital set:")
self.type_dropdown = ipy.Dropdown(options=list(self.wfn.orbitals.keys()))
initialtype = 'canonical'
if initialtype not in self.type_dropdown.options:
initialtype = next(iter(self.type_dropdown.options.keys()))
self.type_dropdown.value = initialtype
self.type_dropdown.observe(self.new_orb_type, 'value')
# List of orbitals in this set
orblist_label = ipy.Label("Orbital:")
self.orblist = ipy.Dropdown(options={None: None},
layout=ipy.Layout(width=layout.width, height=element_height))
traitlets.link((self.orblist, 'value'), (self, 'current_orbital'))
# Isovalue selector
isoval_label = ipy.Label('Isovalue:')
self.isoval_selector = ipy.FloatSlider(min=0.0, max=0.075,
value=0.01, step=0.00075,
readout_format='.4f',
layout=ipy.Layout(width=layout.width))
traitlets.link((self.isoval_selector, 'value'), (self, 'isoval'))
# Opacity selector
opacity_label = ipy.Label('Opacity:')
self.opacity_selector = ipy.FloatSlider(min=0.0, max=1.0,
value=0.8, step=0.01,
readout_format='.2f',
layout=ipy.Layout(width=layout.width))
traitlets.link((self.opacity_selector, 'value'), (self, 'orb_opacity'))
# Resolution selector
resolution_label = ipy.Label("Grid resolution:", layout=ipy.Layout(width=layout.width))
self.orb_resolution = ipy.Text(layout=ipy.Layout(width='75px',
positioning='bottom'))
self.orb_resolution.value = str(self.numpoints)
self.resolution_button = ipy.Button(description='Update resolution')
self.resolution_button.on_click(self.change_resolution)
traitlets.directional_link((self, 'numpoints'), (self.orb_resolution, 'value'),
transform=str)
self.uipane = ipy.VBox([self.status_element,
orbtype_label, self.type_dropdown,
orblist_label, self.orblist,
isoval_label, self.isoval_selector,
opacity_label, self.opacity_selector,
resolution_label, self.orb_resolution, self.resolution_button])
self.new_orb_type()
self.type_dropdown.observe(self.new_orb_type, 'value')
return self.uipane
def new_orb_type(self, *args):
"""Create list of available orbitals when user selects a new type
"""
wfn = self.wfn
newtype = self.type_dropdown.value
neworbs = wfn.orbitals[newtype]
orblist = collections.OrderedDict()
orblist[None] = None
for i, orb in enumerate(neworbs):
if hasattr(orb, 'unicode_name'):
orbname = orb.unicode_name
else:
orbname = orb.name
meta = ''
if orb.energy is not None:
meta = '{:.02fP}'.format(orb.energy.defunits())
if orb.occupation is not None:
if meta: meta += ', '
meta += 'occ %.2f' % orb.occupation
if meta:
desc = '%d. %s (%s)' % (i, orbname, meta)
else:
desc = '%d. %s' % (i, orbname)
orblist[desc] = orb
self.orblist.value = None
self.orblist.options = orblist
def change_resolution(self, *args):
self.numpoints = int(self.orb_resolution.value)
```
#### File: nbmolviz/viewers/viewercontainer.py
```python
from __future__ import print_function
from future.builtins import *
from future.standard_library import install_aliases
install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipywidgets as ipy
from moldesign import utils
from ..uielements.components import VBox
from . import GeometryViewer
class ViewerContainer(VBox):
"""
Container for one or more viewers. Delegates calls to the component viewers
"""
def __reduce__(self):
"""These don't gat passed around,
so it reduces to NOTHING"""
return utils.make_none, tuple()
def __init__(self, children, viewer=None, graphviewer=None, **kwargs):
if 'layout' not in kwargs:
kwargs['layout'] = ipy.Layout(flex_flow='column', width='100%')
super().__init__(children=children, **kwargs)
self.viewer = viewer
self.graphviewer = graphviewer
@utils.args_from(GeometryViewer.set_color)
def set_color(self, *args, **kwargs):
if self.graphviewer: self.graphviewer.set_color(*args, **kwargs)
if self.viewer: self.viewer.set_color(*args, **kwargs)
@utils.args_from(GeometryViewer.set_color)
def color_by(self, *args, **kwargs):
if self.graphviewer: self.graphviewer.color_by(*args, **kwargs)
if self.viewer: self.viewer.color_by(*args, **kwargs)
@utils.args_from(GeometryViewer.set_color)
def set_colors(self, *args, **kwargs):
if self.graphviewer: self.graphviewer.set_colors(*args, **kwargs)
if self.viewer: self.viewer.set_colors(*args, **kwargs)
@utils.args_from(GeometryViewer.unset_color)
def unset_color(self, *args, **kwargs):
if self.graphviewer: self.graphviewer.unset_color(*args, **kwargs)
if self.viewer: self.viewer.unset_color(*args, **kwargs)
def __getattr__(self, item):
if item != 'viewer' and self.viewer is not None:
return getattr(self.viewer, item)
else:
raise AttributeError(item)
``` |
{
"source": "jparkie/cookiecutter-c99",
"score": 2
} |
#### File: functional/{{cookiecutter.lib_slug}}_cffi/compile_cffi.py
```python
import subprocess
import sys
import cffi
if __name__ == "__main__":
path_include_dir = sys.argv[1]
path_library_dir = sys.argv[2]
lib_cffi = cffi.FFI()
preprocess = subprocess.run(
"gcc -E -P %s/{{cookiecutter.lib_slug}}/*.h" % path_include_dir,
check=True,
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
csource = preprocess.stdout
csource = csource[csource.find("typedef void {{cookiecutter.lib_slug.upper()}}_UNUSED;"):]
csource = "\n".join([
csource,
"""
"""
])
lib_cffi.cdef(csource)
lib_cffi.set_source(
"_{{cookiecutter.lib_slug}}_cffi",
"""
#include "{{cookiecutter.lib_slug}}/{{cookiecutter.lib_slug}}.h"
""",
libraries=["{{cookiecutter.lib_slug}}"],
include_dirs=[path_include_dir, ],
library_dirs=[path_library_dir, ],
)
lib_cffi.compile()
``` |
{
"source": "jparklev/jesse",
"score": 2
} |
#### File: jesse/research/__init__.py
```python
from .get_candles import get_candles
def init():
"""
"""
from pydoc import locate
import os
import sys
import jesse.helpers as jh
# Python version validation.
if jh.python_version() < 3.6:
print(
jh.color(
'Jesse has not beed tested with your Python version ({}), hence it may not work properly. Consider upgrading to >= 3.7'.format(
jh.python_version()),
'red'
)
)
# fix directory issue
sys.path.insert(0, os.getcwd())
ls = os.listdir('.')
is_jesse_project = 'strategies' in ls and 'config.py' in ls and 'storage' in ls and 'routes.py' in ls
if not is_jesse_project:
print(
jh.color(
'Invalid directory. To use Jesse inside notebooks, create notebooks inside the root of a Jesse project.',
'red'
)
)
if is_jesse_project:
local_config = locate('config.config')
from jesse.config import set_config
set_config(local_config)
```
#### File: strategies/Test29/__init__.py
```python
from jesse.strategies import Strategy
# test_on_route_increased_position_and_on_route_reduced_position_and_strategy_vars part 1 - BTCUSD
class Test29(Strategy):
"""
"""
def __init__(self):
super().__init__()
self.vars['should_short'] = False
self.vars['should_long'] = False
def should_long(self):
return self.vars['should_long']
def should_short(self):
return self.vars['should_short']
def go_long(self):
self.buy = 1, self.price
self.take_profit = 1, self.price + 10
def go_short(self):
self.sell = 1, self.price
self.stop_loss = 1, self.price + 10
def on_route_increased_position(self, strategy):
"""
:param strategy:
"""
# setting it to True means we'll open a position on NEXT candle
self.vars['should_long'] = True
def on_route_reduced_position(self, strategy):
"""
:param strategy:
"""
# setting it to True means we'll open a position on NEXT candle
self.vars['should_short'] = True
def should_cancel(self):
return False
def on_take_profit(self):
self.vars['should_long'] = False
self.vars['should_short'] = False
def on_stop_loss(self):
self.vars['should_long'] = False
self.vars['should_short'] = False
``` |
{
"source": "jparra5/dra_ut_decision_gate",
"score": 2
} |
#### File: jparra5/dra_ut_decision_gate/sauce.py
```python
import requests
import sys
import json
import time
import os
import base64
import urllib2
import logging
import hmac
from hashlib import md5
#from prettytable import PrettyTable
#ascii color codes for output
LABEL_GREEN = '\033[0;32m'
LABEL_YELLOW = '\033[4;33m'
LABEL_RED = '\033[0;31m'
LABEL_COLOR = '\033[0;33m'
LABEL_NO_COLOR = '\033[0m'
STARS = "**********************************************************************"
#test result url
TEST_URL = "https://saucelabs.com/jobs/%s?auth=%s"
#environment saucelabs variables
SAUCE_URL = "https://saucelabs.com/rest/v1/"
SAUCE_USER = os.environ.get('SAUCE_USERNAME')
SAUCE_ACCESS_KEY = os.environ.get('SAUCE_ACCESS_KEY')
START_TIME = os.environ.get('INIT_START_TIME')
DOWNLOAD_ASSETS = os.environ.get('DOWNLOAD_ASSETS')
chunk_size = 1024
exit_flag = 0
#browser test stat vars
FIREFOX_PASS = 0
FIREFOX_TOTAL = 0
CHROME_PASS = 0
CHROME_TOTAL = 0
IE_PASS = 0
IE_TOTAL = 0
SAFARI_PASS = 0
SAFARI_TOTAL = 0
JOB_DATA = "job_data_collection.json"
def request(url):
base64string = base64.encodestring('%s:%s' % (SAUCE_USER, SAUCE_ACCESS_KEY)).replace('\n', '')
headers = {'Authorization': 'Basic %s' % base64string}
return requests.get(url, headers=headers)
def download_log(url, job):
base64string = base64.encodestring('%s:%s' % (SAUCE_USER, SAUCE_ACCESS_KEY)).replace('\n', '')
headers = {'Authorization': 'Basic %s' % base64string}
r = requests.get(url, headers=headers, stream=True)
with open("selenium-server-" + job + ".log", 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
def download_video(url, job):
base64string = base64.encodestring('%s:%s' % (SAUCE_USER, SAUCE_ACCESS_KEY)).replace('\n', '')
headers = {'Authorization': 'Basic %s' % base64string}
r = requests.get(url, headers=headers, stream=True)
with open("video-" + job + ".flv", 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
def get_jobs():
try:
response = request(SAUCE_URL + SAUCE_USER + "/jobs?from=" + START_TIME)
response.raise_for_status()
return response
except requests.exceptions.RequestException as e:
print e
sys.exit(1)
def get_job_status(job):
try:
response = request(SAUCE_URL + SAUCE_USER + "/jobs/" + job)
response.raise_for_status()
response_json = response.json()
append_job_json(response.content)
return response_json
except requests.exceptions.RequestException as e:
print e
sys.exit(1)
def get_job_assets(job):
try:
LOGGER.info("Getting selenium log for job: " + job)
download_log(SAUCE_URL + SAUCE_USER + "/jobs/" + job + "/assets/selenium-server.log", job)
LOGGER.info("Getting video for job: " + job)
download_video(SAUCE_URL + SAUCE_USER + "/jobs/" + job + "/assets/video.flv", job)
except requests.exceptions.RequestException as e:
print e
sys.exit(1)
def output_job(job):
global exit_flag
auth_key = hmac.new(SAUCE_USER + ":" + SAUCE_ACCESS_KEY, job, md5).hexdigest()
test_info = get_job_status(job)
browser = test_info["browser"]
test_status = test_info["consolidated_status"]
if test_status == "passed":
print LABEL_GREEN
LOGGER.info("Job %s passed successfully." % job)
LOGGER.info("See details at: " + TEST_URL % (job, auth_key))
print LABEL_NO_COLOR
analyze_browser_results(0, browser)
elif test_status == "complete":
print LABEL_GREEN
LOGGER.info("Job %s completed successfully." % job)
LOGGER.info("See details at: " + TEST_URL % (job, auth_key))
print LABEL_NO_COLOR
analyze_browser_results(0, browser)
#for some reason job is still running
elif test_status == "in progress":
print LABEL_YELLOW
LOGGER.info("Job %s is still in progress." % job)
LOGGER.info("See details at: " + TEST_URL % (job, auth_key))
print LABEL_NO_COLOR
#job failed
else:
print LABEL_RED
LOGGER.error("There was problem with job %s." % job)
LOGGER.info("See details at: " + TEST_URL % (job, auth_key))
print LABEL_NO_COLOR
analyze_browser_results(1, browser)
exit_flag = 1
#download assets
if DOWNLOAD_ASSETS == "true":
get_job_assets(job)
def append_job_json(job_json):
with open(JOB_DATA, 'a') as fd:
fd.write(job_json + ",")
fd.close()
def analyze_browser_results(status, browser):
global FIREFOX_PASS
global FIREFOX_TOTAL
global CHROME_PASS
global CHROME_TOTAL
global IE_PASS
global IE_TOTAL
global SAFARI_PASS
global SAFARI_TOTAL
if browser == "firefox":
if status == 0:
FIREFOX_PASS += 1
FIREFOX_TOTAL += 1
if browser == "googlechrome":
if status == 0:
CHROME_PASS += 1
CHROME_TOTAL += 1
if browser == "iexplore":
if status == 0:
IE_PASS += 1
IE_TOTAL += 1
if browser == "safari":
if status == 0:
SAFARI_PASS += 1
SAFARI_TOTAL += 1
def setup_logging():
logger = logging.getLogger('pipeline')
logger.setLevel(logging.INFO)
# if logmet is enabled, send the log through syslog as well
if os.environ.get('LOGMET_LOGGING_ENABLED'):
handler = logging.handlers.SysLogHandler(address='/dev/log')
logger.addHandler(handler)
# don't send debug info through syslog
handler.setLevel(logging.INFO)
# in any case, dump logging to the screen
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
#Start
logging.captureWarnings(True)
LOGGER = setup_logging()
LOGGER.info("Getting jobs...")
jobs_json = get_jobs().json()
#loop through each job in the list and process its assets
with open(JOB_DATA, 'wb') as fd:
fd.write("[")
fd.close()
LOGGER.info("Processing jobs...")
for key in jobs_json:
output_job(key["id"])
with open(JOB_DATA, 'a') as fd:
fd.write("{}]")
fd.close()
#log test results
#print LABEL_GREEN
#print STARS
#results_table = PrettyTable(["Browser", "Jobs Succeeded", "Jobs Failed", "Total Jobs"])
#results_table.align["Browser"] = "l"
#results_table.add_row(["Firefox", FIREFOX_PASS, FIREFOX_TOTAL - FIREFOX_PASS, FIREFOX_TOTAL])
#results_table.add_row(["Google Chrome", CHROME_PASS, CHROME_TOTAL - CHROME_PASS, CHROME_TOTAL])
#results_table.add_row(["Internet Explorer", IE_PASS, IE_TOTAL - IE_PASS, IE_TOTAL])
#results_table.add_row(["Safari", SAFARI_PASS, SAFARI_TOTAL - SAFARI_PASS, SAFARI_TOTAL])
#print results_table
#print STARS
#print LABEL_NO_COLOR
#log test results
print STARS
print LABEL_GREEN
print '%d out of %d jobs succeeded on Firefox.' % (FIREFOX_PASS, FIREFOX_TOTAL)
if FIREFOX_TOTAL - FIREFOX_PASS > 0:
print LABEL_RED
print '%d jobs failed.' % (FIREFOX_TOTAL - FIREFOX_PASS)
print LABEL_NO_COLOR
print STARS
print LABEL_GREEN
print '%d out of %d jobs succeeded on Google Chrome.' % (CHROME_PASS, CHROME_TOTAL)
if CHROME_TOTAL - CHROME_PASS > 0:
print LABEL_RED
print '%d jobs failed.' % (CHROME_TOTAL - CHROME_PASS)
print LABEL_NO_COLOR
print STARS
print LABEL_GREEN
print '%d out of %d jobs succeeded on Internet Explorer.' % (IE_PASS, IE_TOTAL)
if IE_TOTAL - IE_PASS > 0:
print LABEL_RED
print '%d jobs failed.' % (IE_TOTAL - IE_PASS)
print LABEL_NO_COLOR
print STARS
print LABEL_GREEN
print '%d out of %d jobs succeeded on Safari.' % (SAFARI_PASS, SAFARI_TOTAL)
if SAFARI_TOTAL - SAFARI_PASS > 0:
print LABEL_RED
print '%d jobs failed.' % (SAFARI_TOTAL - SAFARI_PASS)
print LABEL_NO_COLOR
print STARS
#exit with appropriate status
sys.exit(exit_flag)
``` |
{
"source": "JParramore/search-engine",
"score": 2
} |
#### File: alembic/versions/a9422f5dde7c_add_page_location_word.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a9422f5dde7c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('page',
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('word',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('stem', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('location',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('position', sa.String(), nullable=True),
sa.Column('word_id', sa.Integer(), nullable=True),
sa.Column('page_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['page_id'], ['page.id'], ),
sa.ForeignKeyConstraint(['word_id'], ['word.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('location')
op.drop_table('word')
op.drop_table('page')
# ### end Alembic commands ###
```
#### File: JParramore/search-engine/indexer.py
```python
import asyncio
from db.services import PageService, LocationService, WordService
from db.session import get_session
# https://stackoverflow.com/a/53256058
# allow functions to be 'fired and forgotten'
def background(f):
from functools import wraps
@wraps(f)
def wrapped(*args, **kwargs):
loop = asyncio.get_event_loop()
if callable(f):
return loop.run_in_executor(None, f, *args, **kwargs)
else:
raise TypeError('Task must be a callable')
return wrapped
@background
def add_to_index(url, title, text, description):
'''
Add a page to our index. Add any new words as well as their locations
on the page. If the page already exists in our index, presume it is stale.
'''
session = get_session()
page_service = PageService(session)
location_service = LocationService(session)
word_service = WordService(session)
existing_page = page_service.find(url=url)
if existing_page:
# seen this page before? keep it up to date by removing its locations
location_service.clean_up(existing_page)
location_service.save()
page = existing_page
else:
page = page_service.new(url=url, title=title, description=description)
for index, stem in enumerate(text.lower().split()):
word = word_service.find(stem=stem)
if not word:
word = word_service.new(stem=stem)
location_service.new(page=page, word=word, position=index)
page_service.save()
```
#### File: JParramore/search-engine/server.py
```python
import time
from flask import Flask, request, jsonify, render_template
from query import query
app = Flask(__name__,
static_url_path='',
static_folder='build',
template_folder='build')
@app.route("/")
def index():
return render_template('index.html')
@app.route('/search')
def search():
q = request.args.get('q')
if q:
start = time.time()
results = query(q.lower())
t = (time.time() - start) / 1000
if len(results) > 0:
if t < 0.0001:
t = 0.0001
stats = f'{len(results)} results found in {t:.4f}ms'
else:
stats = '0 results found.'
return jsonify(
{
'results': results,
'stats': stats,
}
)
else:
return jsonify([])
if __name__ == "__main__":
'''
In development, allow CORS requests so create-react-app
can use hot reload and still hit us.
'''
from flask_cors import CORS
CORS(app)
app.run(debug=True)
```
#### File: search-engine/tests/test_crawler.py
```python
from crawler import extract_base, process_website, scrape_url_for_links, stream_seeds_into_queue
import unittest
from unittest.mock import patch
from bs4 import BeautifulSoup
from collections import deque
class DotDict(dict):
def __getattr__(self, item):
if item in self:
return self[item]
raise AttributeError
def __setattr__(self, key, value):
if key in self:
self[key] = value
return
raise AttributeError
class TestCrawlerMethods(unittest.TestCase):
def test_extract_base_url(self):
url = 'https://www.duolingo.com/'
expected = {
'strip_base': 'duolingo.com',
'base_url': 'https://www.duolingo.com',
'path': 'https://www.duolingo.com/'
}
actual = extract_base(url)
self.assertEqual(actual['strip_base'], expected['strip_base'])
self.assertEqual(actual['base_url'], expected['base_url'])
self.assertEqual(actual['path'], expected['path'])
def test_scrape_url_for_links(self):
url = 'https://www.testcase.com/'
with open('tests/data/test.html', 'r', encoding='utf-8') as stream:
html_doc = stream.read()
soup = BeautifulSoup(html_doc, 'lxml')
base = extract_base(url)
actual_internal_urls = scrape_url_for_links(base, soup)
expected_internal_urls = set()
expected_internal_urls.add('http://www.testcase.com/good-site-path')
expected_internal_urls.add('https://www.testcase.com/good-site-path2')
expected_internal_urls.add('https://www.testcase.com/goodpath3')
expected_internal_urls.add('https://www.testcase.com/')
self.assertCountEqual(actual_internal_urls, expected_internal_urls)
@patch('crawler.process_website')
def test_stream_seeds_into_queue(self, mock_process_website):
test_yaml = 'tests/data/test.yaml'
test_yaml_urls = ['https://facebook.com',
'https://google.com',
'https://www.test.com']
stream_seeds_into_queue(test_yaml)
for test_url in test_yaml_urls:
mock_process_website.assert_any_call(test_url)
@patch('crawler.requests')
@patch('crawler.add_to_index')
def test_process_website(self, add_to_index, mock_requests):
test_url = 'https://example.org'
mock_requests.get.return_value = DotDict({"text": "<html></html>"})
process_website(test_url)
mock_requests.get.assert_called_once_with('https://example.org')
```
#### File: search-engine/tests/test_indexer.py
```python
from indexer import add_to_index
from db.models import Page, Location, Word
import unittest
from unittest.mock import patch
from setup.session import get_testing_session
class TestIndexerData(unittest.TestCase):
@patch('indexer.get_session')
def test_add_data(self, mock_get_session):
mock_session = get_testing_session()
mock_get_session.return_value = mock_session
url = 'http://google.com'
title = 'Google'
text = 'apple banana orange apple'
description = 'Some description.'
add_to_index.__wrapped__(url, title, text, description)
page = mock_session.query(Page).first()
words = mock_session.query(Word).all()
# page saved
self.assertEqual(page.url, url)
self.assertEqual(page.title, title)
self.assertEqual(page.description, description)
# correct amount of locations
locations = mock_session.query(
Location).filter_by(page_id=page.id).all()
self.assertEqual(len(locations), 4)
# use existing words when necessary
self.assertEqual(len(words), 3)
# clear up stale locations
old_locations = locations
add_to_index.__wrapped__(url, title, text, description)
locations = mock_session.query(
Location).filter_by(page_id=page.id).all()
for new_location in locations:
self.assertNotIn(new_location, old_locations)
``` |
{
"source": "jparras/fed-baselines",
"score": 3
} |
#### File: fed-baselines/admm/admm_toolbox.py
```python
import numpy as np
import matplotlib.pyplot as plt
import cvxpy as cp
'''
In order to append / modify a problem, you should:
(a) Add a new target function and modify the target method
(b) Add its analytical solution and modify the solve_analytical method
(c) Add the new problem to AdmmCentralized class (if you want to use ADMM centralized), i.e., modify updates of x and z
(d) Add the new problem to AdmmDistributedAgent class (if you want to use ADMM distributed), i.e., modify updates of x and z
'''
class AdmmDistributed(object): # Class that prepares data for distributed training
'''
Note that this class is a convenient way to test a distributed ADMM implementation. In real deployments, no agent has
access to all data (as this class does) and hence, it is not possible to compute the global loss unless we split the
regularizer term among all distributed agents. In a real deployment, also, the analytical solution is not available.
Note that this function is provided just for illustration and testing purposes.
'''
def __init__(self, data_in, data_out, problem, lam=0.1, rho=10, grad_steps=10, grad_mu=0.1):
if type(data_in) is not list or type(data_out) is not list:
raise RuntimeError('Data must be provided as a list of numpy arrays per agent')
if len(data_in) is not len(data_out):
raise RuntimeError('Input and output data lists must have the same number of elements')
self.na = len(data_in)
self.problem = problem # Problem to be solved
# To store training values
self.f = None # To store function values (global)
# ADMM parameters
self.lam = lam # Regularizer
self.rho = rho # Quadratic weight
self.grad_steps = grad_steps # Number of steps per iteration in gradient / subgradient method (if needed)
self.grad_mu = grad_mu # Step size in gradient / subgradient method (if needed)
# Store global data
self.global_data_in = np.vstack(data_in)
self.global_data_out = np.hstack(data_out)
self.global_data_out = self.global_data_out.reshape([self.global_data_out.size, 1])
# Build ADMM agents
self.agents = [AdmmDistributedAgent(data_in[i], data_out[i], self.global_data_in.shape[0], self.na,
self.problem, lam=self.lam, rho=self.rho, grad_steps=self.grad_steps,
grad_mu=self.grad_mu) for i in range(self.na)]
# Analytical solution (for comparison purposes)
self.fopt, self.xopt = self.solve_analytical()
def function(self, x): # Global target function (this function is only for illustration purposes)
return target(self.global_data_in, self.global_data_out, self.lam, x, self.problem, z=None, na=1)
def solve_analytical(self):
return solve_analytical(self.global_data_in, self.global_data_out, self.lam, self.problem)
def train(self, niter): # Distributed ADMM training!
for agent in self.agents:
agent.initialize() # Initialize local values
self.f = [] # Initialize global f value
for iter in range(niter):
# Update x (locally)
for agent in self.agents:
agent.x_update(agent.x[-1], agent.y[-1], agent.z[-1])
# Update z (globally!)
sum_x = np.zeros_like(self.agents[0].x[-1])
sum_y = np.zeros_like(self.agents[0].y[-1])
for agent in self.agents:
sum_x += agent.x[-1]
sum_y += agent.y[-1]
for agent in self.agents:
agent.z_update(sum_x / self.na, sum_y / self.na, agent.z[-1])
# Update y (locally)
for agent in self.agents:
agent.y_update(agent.x[-1], agent.y[-1], agent.z[-1])
# Update global f: make use of z (global value, shared by all agents)
self.f.append(self.function(self.agents[0].z[-1]))
def plot(self):
# Plot the losses using the global variable z and all the data
plt.plot(10 * np.log10(np.square(np.array(self.f) - self.fopt) + np.finfo(float).eps), 'b', label='global')
# Plot also the losses using the local terms, x and z (the actual values obtained: the gap is due to x != z)
sum_f_local = np.sum(np.array([np.array(agent.f) for agent in self.agents]), axis=0)
plt.plot(10 * np.log10(np.square(sum_f_local - self.fopt) + np.finfo(float).eps), 'r', label='local')
plt.title('ADMM distributed global loss')
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.legend(loc='best')
plt.show()
'''
for i, agent in enumerate(self.agents):
plt.plot(agent.f, label=str(i))
plt.plot(self.f, label='Global value')
plt.title('ADMM distributed function values: local and global')
plt.xlabel('Iteration')
plt.ylabel('Value')
plt.legend(loc='best')
plt.show()
for i, agent in enumerate(self.agents):
plt.plot(np.squeeze(np.array(agent.x)), label=str(i))
plt.title('ADMM distributed x')
plt.xlabel('Iteration')
plt.ylabel('x value')
plt.legend(loc='best')
plt.show()
for i, agent in enumerate(self.agents):
plt.plot(np.squeeze(np.array(agent.y)), label=str(i))
plt.title('ADMM distributed y')
plt.xlabel('Iteration')
plt.ylabel('y value')
plt.legend(loc='best')
plt.show()
for i, agent in enumerate(self.agents):
plt.plot(np.squeeze(np.array(agent.z)), label=str(i))
plt.title('ADMM distributed z')
plt.xlabel('Iteration')
plt.ylabel('z value')
plt.legend(loc='best')
plt.show()
for i, agent in enumerate(self.agents):
plt.plot(10 * np.log10(
np.sum(np.square(np.squeeze(np.array(agent.z)) - np.squeeze(np.array(agent.x))), axis=1) + np.finfo(
float).eps), label=str(i))
plt.title('ADMM distributed x-z convergence')
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.legend(loc='best')
plt.show()
'''
class AdmmDistributedAgent(object):
def __init__(self, local_data_in, local_data_out, d_tot, na, problem, lam=0.1, rho=10, grad_steps=10, grad_mu=0.001):
self.ndata = local_data_in.shape[0] # Number of data points (local dataset)
self.ndata_tot = d_tot # Number of data points (global dataset)
self.data_dim = local_data_in.shape[1] # Number of features per data point
self.data_in = local_data_in # Feature matrix
self.data_out = local_data_out.reshape([self.ndata, 1]) # Labels / regression targets
self.na = na # Number of agents cooperating
self.problem = problem # Problem to be solved
# To store training values
self.x = None # To store x values (local)
self.y = None # To store y values (local)
self.z = None # To store z values (global)
self.f = None # To store function values (local)
# ADMM parameters
self.lam = lam # Regularizer
self.rho = rho # Quadratic weight
self.grad_steps = grad_steps # Number of steps per iteration in gradient / subgradient method (if needed)
self.grad_mu = grad_mu # Step size in gradient / subgradient method (if needed)
def function(self, x, z): # Local target function
return target(self.data_in, self.data_out, self.lam, x, self.problem, z=z, na=self.na, ntot=self.ndata_tot)
def x_update(self, x, y, z):
if self.problem is "lasso" or self.problem is "ridge":
term1 = np.linalg.inv(2 / self.ndata_tot * self.data_in.T @ self.data_in + self.rho * np.eye(self.data_dim))
term2 = 2 / self.ndata_tot * self.data_in.T @ self.data_out + self.rho * (z - y)
xnew = term1 @ term2
elif self.problem is "svm": # In this case, we use a subgradient approach for the hinge function
for it in range(self.grad_steps):
d = np.diag(np.squeeze(self.data_out)) @ self.data_in
term1 = -1 / self.ndata_tot * np.sum(d[np.squeeze(d @ x < 1), :], axis=0).reshape([self.data_dim, 1])
x = x - self.grad_mu * (term1 + self.rho * (x - z + y))
xnew = x
elif self.problem is "logistic": ## We use a gradient method for the logistic function
for it in range(self.grad_steps):
d = np.diag(np.squeeze(self.data_out)) @ self.data_in
denominator = np.repeat(1 + np.exp(d @ x), self.data_dim, axis=1)
term1 = -1 / self.ndata_tot * np.sum(d / denominator, axis=0).reshape([self.data_dim, 1])
x = x - self.grad_mu * (term1 + self.rho * (x - z + y))
xnew = x
else:
raise RuntimeError('Problem not recognized')
self.x.append(xnew)
def y_update(self, x, y, z):
ynew = y + x - z
self.y.append(ynew)
# Update also the function value!
self.f.append(self.function(x, z))
def z_update(self, x, y, z): # In this case, x and y are the average of local x and y values!!
if self.problem is "lasso":
q = x + y
v = self.lam / (self.na * self.rho)
znew = np.maximum(np.zeros_like(q), q - v) - np.maximum(np.zeros_like(q), - q - v)
elif self.problem is "ridge" or self.problem is "svm" or self.problem is "logistic":
znew = (x+y) * self.rho * self.na / (self.lam + self.rho * self.na)
else:
raise RuntimeError('Problem not recognized')
self.z.append(znew)
def initialize(self):
# Initialize values
self.x = [] # To store x values
self.y = [] # To store y values
self.z = [] # To store z values
self.f = [] # To store target function values
self.x.append(np.zeros((self.data_dim, 1)))
self.y.append(np.zeros((self.data_dim, 1)))
self.z.append(np.zeros((self.data_dim, 1)))
self.f.append(self.function(self.x[-1], self.z[-1]))
class AdmmCentralized(object):
def __init__(self, data_in, data_out, problem, lam=0.1, rho=10, grad_steps=10, grad_mu=0.001):
self.ndata = data_in.shape[0] # Number of data points
self.data_dim = data_in.shape[1] # Number of features per data point
self.data_in = data_in # Feature matrix
self.data_out = data_out.reshape([self.ndata, 1]) # Labels / regression targets
self.problem = problem # Problem to be solved
# To store training values
self.x = None # To store x values
self.y = None # To store y values
self.z = None # To store z values
self.f = None # To store function values
# ADMM parameters
self.lam = lam # Regularizer
self.rho = rho # Quadratic weight
self.grad_steps = grad_steps # Number of steps per iteration in gradient / subgradient method (if needed)
self.grad_mu = grad_mu # Step size in gradient / subgradient method (if needed)
# Analytical solution (for comparison purposes)
self.fopt, self.xopt = self.solve_analytical()
def function(self, x): # Target function
return target(self.data_in, self.data_out, self.lam, x, self.problem, z=None, na=1)
def solve_analytical(self):
return solve_analytical(self.data_in, self.data_out, self.lam, self.problem)
def x_update(self, x, y, z):
if self.problem is "lasso" or self.problem is "ridge":
term1 = np.linalg.inv(2 / self.ndata * self.data_in.T @ self.data_in + self.rho * np.eye(self.data_dim))
term2 = 2 / self.ndata * self.data_in.T @ self.data_out + self.rho*(z-y)
xnew = term1 @ term2
elif self.problem is "svm": # In this case, we use a subgradient approach for the hinge function
for it in range(self.grad_steps):
d = np.diag(np.squeeze(self.data_out)) @ self.data_in
term1 = -1 / self.ndata * np.sum(d[np.squeeze(d @ x < 1), :], axis=0).reshape([self.data_dim, 1])
x = x - self.grad_mu * (term1 + self.rho * (x - z + y))
xnew = x
elif self.problem is "logistic": ## We use a gradient method for the logistic function
for it in range(self.grad_steps):
d = np.diag(np.squeeze(self.data_out)) @ self.data_in
denominator = np.repeat(1 + np.exp(d @ x), self.data_dim, axis=1)
term1 = -1 / self.ndata * np.sum(d / denominator, axis=0).reshape([self.data_dim, 1])
x = x - self.grad_mu * (term1 + self.rho * (x - z + y))
xnew = x
else:
raise RuntimeError('Problem not recognized')
self.x.append(xnew)
def y_update(self, x, y, z): # Always the same, we update the function value here!
ynew = y + x - z
self.y.append(ynew)
# Update also the function value!
self.f.append(self.function(x))
def z_update(self, x, y, z):
if self.problem is "lasso":
q = x + y
v = self.lam / self.rho
znew = np.maximum(np.zeros_like(q), q - v) - np.maximum(np.zeros_like(q), - q - v)
elif self.problem is "ridge" or self.problem is "svm" or self.problem is "logistic":
znew = (x + y) * self.rho / (self.lam + self.rho)
else:
raise RuntimeError('Problem not recognized')
self.z.append(znew)
def initialize(self):
self.x = [] # To store x values
self.y = [] # To store y values
self.z = [] # To store z values
self.f = [] # To store target function values
self.x.append(np.zeros((self.data_dim, 1)))
self.y.append(np.zeros((self.data_dim, 1)))
self.z.append(np.zeros((self.data_dim, 1)))
self.f.append(self.function(self.x[-1]))
def train(self, niter): # Train centralized ADMM
# Initialize values
self.initialize()
# Iterate ADMM
for iter in range(niter):
self.x_update(self.x[-1], self.y[-1], self.z[-1]) # Update x
self.z_update(self.x[-1], self.y[-1], self.z[-1]) # Update z
self.y_update(self.x[-1], self.y[-1], self.z[-1]) # Update y (and store the function value!)
def plot(self):
'''
plt.plot(np.squeeze(np.array(self.x)), 'b', label='x')
plt.plot(np.squeeze(np.array(self.y)), 'r', label='y')
plt.plot(np.squeeze(np.array(self.z)), 'g', label='z')
plt.title('ADMM centralized values')
plt.xlabel('Iteration')
plt.ylabel('Value')
plt.legend(loc='best')
plt.show()
plt.plot(10 * np.log10(np.square(np.array(self.f))))
plt.title('ADMM centralized function')
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.show()
'''
plt.plot(10 * np.log10(np.square(np.array(self.f) - self.fopt) + np.finfo(float).eps))
plt.title('ADMM centralized loss')
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.show()
# Target functions (in distributed, x is local and z is global)
def target(data_in, data_out, lam, x, problem, z=None, na=1, ntot=None):
if problem is "lasso":
return target_lasso(data_in, data_out, lam, x, z, na=na, ntot=ntot)
elif problem is "svm":
return target_svm(data_in, data_out, lam, x, z, na=na, ntot=ntot)
elif problem is "ridge":
return target_ridge(data_in, data_out, lam, x, z, na=na, ntot=ntot)
elif problem is "logistic":
return target_logistic(data_in, data_out, lam, x, z, na=na, ntot=ntot)
else:
raise RuntimeError('Problem not recognized')
def target_lasso(data_in, data_out, lam, x, z, na=1, ntot=None):
if ntot is None:
ntot = data_in.shape[0]
if z is None:
return np.sum(np.square(data_in @ x - data_out)) / ntot + lam * np.sum(np.abs(x)) / na
else:
return np.sum(np.square(data_in @ x - data_out)) / ntot + lam * np.sum(np.abs(z)) / na
def target_ridge(data_in, data_out, lam, x, z, na=1, ntot=None):
if ntot is None:
ntot = data_in.shape[0]
if z is None:
return np.sum(np.square(data_in @ x - data_out)) / ntot + lam / 2 * np.sum(np.square(x)) / na
else:
return np.sum(np.square(data_in @ x - data_out)) / ntot + lam / 2 * np.sum(np.square(z)) / na
def target_svm(data_in, data_out, lam, x, z, na=1, ntot=None):
if ntot is None:
ntot = data_in.shape[0]
cost = np.sum(np.maximum(np.zeros((data_in.shape[0], 1)),
np.ones((data_in.shape[0], 1)) - np.diag(np.squeeze(data_out)) @ (data_in @ x)))
if z is None:
return cost / ntot + lam / 2 * np.sum(np.square(x)) / na
else:
return cost / ntot + lam / 2 * np.sum(np.square(z)) / na
def target_logistic(data_in, data_out, lam, x, z, na=1, ntot=None):
if ntot is None:
ntot = data_in.shape[0]
cost = np.sum(np.log(1 + np.exp(- np.diag(np.squeeze(data_out)) @ (data_in @ x))))
if z is None:
return cost / ntot + lam / 2 * np.sum(np.square(x)) / na
else:
return cost / ntot + lam / 2 * np.sum(np.square(z)) / na
# Analytical solutions to classical problems using CVX: this data can be later used for our problems as REFERENCE
def solve_analytical(data_in, data_out, lam, problem):
if problem is "lasso":
return solve_lasso_analytical(data_in, data_out, lam)
elif problem is "svm":
return solve_svm_analytical(data_in, data_out, lam)
elif problem is "ridge":
return solve_ridge_analytical(data_in, data_out, lam)
elif problem is "logistic":
return solve_logistic_analytical(data_in, data_out, lam)
else:
raise RuntimeError('Problem not recognized')
def solve_lasso_analytical(data_in, data_out, lam):
x = cp.Variable((data_in.shape[1], 1))
cost = cp.sum_squares(data_in @ x - data_out)
prob = cp.Problem(cp.Minimize(cost/data_in.shape[0] + lam * cp.sum(cp.abs(x))))
prob.solve()
return prob.value, x.value
def solve_ridge_analytical(data_in, data_out, lam):
x = cp.Variable((data_in.shape[1], 1))
cost = cp.sum_squares(data_in @ x - data_out)
prob = cp.Problem(cp.Minimize(cost/data_in.shape[0] + lam / 2 * cp.sum_squares(x)))
prob.solve()
return prob.value, x.value
def solve_svm_analytical(data_in, data_out, lam):
x = cp.Variable((data_in.shape[1], 1))
cost = cp.sum(cp.maximum(np.zeros((data_in.shape[0], 1)), np.ones((data_in.shape[0], 1)) - cp.diag(data_out) @ (data_in @ x)))
prob = cp.Problem(cp.Minimize(cost/data_in.shape[0] + lam / 2 * cp.sum_squares(x)))
prob.solve()
return prob.value, x.value
def solve_logistic_analytical(data_in, data_out, lam):
x = cp.Variable((data_in.shape[1], 1))
cost = cp.sum(cp.logistic(-cp.diag(data_out) @ (data_in @ x)))
prob = cp.Problem(cp.Minimize(cost / data_in.shape[0] + lam / 2 * cp.sum_squares(x)))
prob.solve()
return prob.value, x.value
```
#### File: fed-baselines/BNNcent/bnn.py
```python
import os
import csv
import torch
import urllib
import argparse
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from bnn_toolbox import *
from torchvision import datasets
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
torch.manual_seed(0)
def data_preparation(hyperparameters):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x * 255. / 126.), # Divide as in paper
])
train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform)
valid_size = 1 / 6
num_train = len(train_data)
indices = list(range(num_train))
split = int(valid_size * num_train)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx) # Samples elements randomly from a given list of indices, without replacement.
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=hyperparameters.batch_size,
sampler=train_sampler,
num_workers=1)
valid_loader = torch.utils.data.DataLoader(
train_data,
batch_size=hyperparameters.eval_batch_size,
sampler=valid_sampler,
num_workers=1)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=hyperparameters.eval_batch_size,
num_workers=1)
return train_loader, valid_loader, test_loader, 28*28, 10
def parse_arguments():
parser = argparse.ArgumentParser(description='Train Bayesian Neural Net on MNIST with Variational Inference')
parser.add_argument('--model', type=str, nargs='?', action='store', default='mixture_prior',
help='Model to run.Default mixture prior. Options are \'gaussian_prior\', \'mixture_prior\'.')
parser.add_argument('--hidd_units', type=int, nargs='?', action='store', default=1200,
help='Neural network hidden units. Default 1200.')
args = parser.parse_args()
return args
if __name__ == '__main__':
# Configuration
print('[INFO] Environment configuration...')
args = parse_arguments()
mixture = True
if args.model != 'mixture_prior':
mixture = False
hyperparameters = HyperparametersInitialization(hidden_units=args.hidd_units, mixture=mixture)
# Data preparation
print('[INFO] Preparing data...')
train_loader, valid_loader, test_loader, n_input, n_ouput = data_preparation(hyperparameters)
# Test parameters
print('[INFO] Model hyperparameters:')
print(hyperparameters.__dict__)
# Initialize network
print('[INFO] Initializing network...')
model = BNN(n_input, n_ouput, hyperparameters).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=hyperparameters.lr, momentum=hyperparameters.momentum)
train_losses = np.zeros(hyperparameters.max_epoch)
valid_accs = np.zeros(hyperparameters.max_epoch)
test_accs = np.zeros(hyperparameters.max_epoch)
test_errs = np.zeros(hyperparameters.max_epoch)
# Training
print('[INFO] Training network for', hyperparameters.max_epoch, 'epochs...')
for epoch in range(hyperparameters.max_epoch):
train_loss = train(model, optimizer, train_loader, hyperparameters)
valid_acc = evaluate(model, valid_loader, samples=hyperparameters.n_test_samples)
test_acc = evaluate(model, test_loader, samples=hyperparameters.n_test_samples)
print('Epoch', epoch + 1, 'Loss', float(train_loss),
'Valid Error', round(100 * (1 - valid_acc / hyperparameters.eval_batch_size), 3), '%',
'Test Error', round(100 * (1 - test_acc / hyperparameters.eval_batch_size), 3), '%')
valid_accs[epoch] = valid_acc
test_accs[epoch] = test_acc
train_losses[epoch] = train_loss
test_errs[epoch] = round(100 * (1 - test_acc / hyperparameters.eval_batch_size), 3)
# Save results
if not os.path.exists('results'):
os.makedirs('results')
path = 'results/BBB_' + 'mnist' + '_' + str(hyperparameters.hidden_units) + '_' + str(hyperparameters.lr) + '_samples' + str(hyperparameters.n_samples) + '_' + str(args.model)
wr = csv.writer(open(path + '.csv', 'w'), delimiter=',', lineterminator='\n')
wr.writerow(['epoch', 'valid_acc', 'test_acc', 'train_losses'])
for i in range(hyperparameters.max_epoch):
wr.writerow((i + 1, str(round(valid_accs[i] / hyperparameters.eval_batch_size * 100, 3)) + "%", str(round(test_accs[i] / hyperparameters.eval_batch_size * 100, 3)) + '_' + "%", train_losses[i]))
torch.save(model.state_dict(), path + '.pth')
# Plot test error
plt.plot(test_errs)
plt.xlabel('Epochs')
plt.ylabel('Error (%)')
plt.title('Test data error prediction')
plt.grid(True)
plt.savefig('results/BBB_' + 'mnist' + '_' + str(hyperparameters.hidden_units) + '_' + str(hyperparameters.lr) + '_samples' + str(hyperparameters.n_samples) + '_' + str(args.model), format='png')
plt.show()
print('[INFO] Done')
```
#### File: BNNfed/source/fed_process.py
```python
import tensorflow as tf
from source.tfp_utils import loc_prod_from_locprec
eps = 1/tf.float32.max
import random
import logging
from pathlib import Path
import tensorflow as tf
import numpy as np
from source.utils import avg_dict, avg_dict_eval
from source.constants import ROOT_LOGGER_STR
from operator import itemgetter
from source.utils import CustomTensorboard
logger = logging.getLogger(ROOT_LOGGER_STR + '.' + __name__)
class FedProcess:
def __init__(self, model_fn, num_clients):
self.model_fn = model_fn
self.num_clients = num_clients
self.clients_indx = range(self.num_clients)
self.clients = []
self.server = None
self.train_summary_writer = None
self.test_summary_writer = None
self.valid_summary_writer = None
def build(self, *args, **kwargs):
pass
def aggregate_deltas_multi_layer(self, deltas, client_weight=None):
aggregated_deltas = []
deltas = list(map(list, zip(*deltas)))
for delta_layer in deltas:
aggregated_deltas.append(
self.aggregate_deltas_single_layer(delta_layer, client_weight))
return aggregated_deltas
def aggregate_deltas_single_layer(self, deltas, client_weight=None):
for i, delta_client in enumerate(deltas):
for key, el in delta_client.items():
if isinstance(el, tuple):
(loc, prec) = el
if client_weight:
prec = prec*client_weight[i]*self.num_clients
loc = tf.math.multiply(loc, prec)
delta_client[key] = (loc, prec)
else:
if client_weight:
delta_client[key] = (el*client_weight[i], )
else:
delta_client[key] = (el/self.num_clients, )
deltas = {key: [dic[key] for dic in deltas] for key in deltas[0]}
for key, lst in deltas.items():
lst = zip(*lst)
sum_el = []
for i, el in enumerate(lst):
add = tf.math.add_n(el)
sum_el.append(add)
if len(sum_el) == 2:
loc = loc_prod_from_locprec(*sum_el)
deltas[key] = (loc, sum_el[1])
else:
deltas[key] = sum_el[0]
return deltas
def fit(self,
federated_train_data,
num_rounds,
clients_per_round,
epochs_per_round,
federated_test_data=None,
tensorboard_updates=1,
logdir=Path(),
callbacks=None,
train_size=None,
test_size=None,
hierarchical=False,
server_learning_rate=1.,
verbose=0,
MTL=False):
print('fed_process ' + str(logdir))
self.summary_writer = tf.summary.create_file_writer(str(logdir))
if MTL:
self.build(train_size, hierarchical)
deltas = [client.compute_delta() for client in self.clients]
aggregated_deltas = self.aggregate_deltas_multi_layer(
deltas, [size / sum(train_size) for size in train_size])
self.server.apply_delta(aggregated_deltas)
else:
self.build()
history_test = [None] * len(self.clients)
max_train_accuracy = -1.0
max_train_acc_round = None
max_server_accuracy = -1.0
max_server_acc_round = None
max_client_all_accuracy = -1.0
max_client_all_round = None
max_client_selected_accuracy = -1.0
max_client_selected_acc_round = None
server_test_accs = np.zeros(num_rounds)
all_client_test_accs = np.zeros(num_rounds)
selected_client_test_accs = np.zeros(num_rounds)
training_accs = np.zeros(num_rounds)
server_test_losses = np.zeros(num_rounds)
all_client_test_losses = np.zeros(num_rounds)
selected_client_test_losses = np.zeros(num_rounds)
training_losses = np.zeros(num_rounds)
overall_tensorboard = CustomTensorboard(log_dir=str(logdir)+'/selected_client',
histogram_freq=max(0, verbose - 2),
profile_batch=max(0, verbose - 2))
if verbose >= 2:
if callbacks:
callbacks.append(overall_tensorboard)
else:
callbacks = [overall_tensorboard]
for round_i in range(num_rounds):
clients_sampled = random.sample(self.clients_indx,
clients_per_round)
deltas = []
history_train = []
for indx in clients_sampled:
self.clients[indx].receive_and_save_weights(self.server)
self.clients[indx].renew_center(round_i > 0)
if MTL:
if self.fed_avg_init == 2 or (
self.fed_avg_init
and round_i > 0):
print('initialize posterior with server')
self.clients[indx].initialize_kernel_posterior()
history_single = self.clients[indx].fit(
federated_train_data[indx],
verbose=0,
validation_data=federated_test_data[indx],
epochs=epochs_per_round,
callbacks=callbacks
)
if MTL:
self.clients[indx].apply_damping(self.damping_factor)
delta = self.clients[indx].compute_delta()
deltas.append(delta)
if verbose >= 1:
with self.summary_writer.as_default():
for layer in self.clients[indx].layers:
layer_to_check = layer
if hasattr(layer, 'cell'):
layer_to_check = layer.cell
for weight in layer_to_check.trainable_weights:
if 'natural' in weight.name + layer.name:
tf.summary.histogram(layer.name + '/' + weight.name + '_gamma',
weight[..., 0], step=round_i)
tf.summary.histogram(layer.name + '/' + weight.name + '_prec',
weight[..., 1], step=round_i)
else:
tf.summary.histogram(layer.name + '/' + weight.name, weight, step=round_i)
if hasattr(layer_to_check, 'kernel_posterior'):
tf.summary.histogram(
layer.name + '/kernel_posterior' + '_gamma_reparametrized',
layer_to_check.kernel_posterior.distribution.gamma,
step=round_i)
tf.summary.histogram(
layer.name + '/kernel_posterior' + '_prec_reparametrized',
layer_to_check.kernel_posterior.distribution.prec,
step=round_i)
if hasattr(layer_to_check, 'recurrent_kernel_posterior'):
tf.summary.histogram(
layer.name + '/recurrent_kernel_posterior' + '_gamma_reparametrized',
layer_to_check.recurrent_kernel_posterior.distribution.gamma,
step=round_i)
tf.summary.histogram(
layer.name + '/recurrent_kernel_posterior' + '_prec_reparametrized',
layer_to_check.recurrent_kernel_posterior.distribution.prec,
step=round_i)
for layer in self.server.layers:
layer_to_check = layer
if hasattr(layer, 'cell'):
layer_to_check = layer.cell
if hasattr(layer_to_check, 'server_variable_dict'):
for key, value in layer_to_check.server_variable_dict.items():
if 'natural' in layer_to_check.name + value.name:
tf.summary.histogram(
layer.name + '/server_gamma',
value[..., 0], step=round_i)
tf.summary.histogram(
layer.name + '/server_prec',
value[..., 1], step=round_i)
else:
tf.summary.histogram(layer.name, value, step=round_i)
history_train.append({key: history_single.history[key]
for key in history_single.history.keys()
if 'val' not in key})
history_test[indx] = \
{key.replace('val_', ''): history_single.history[key]
for key in history_single.history.keys()
if 'val' in key}
train_size_sampled = itemgetter(*clients_sampled)(train_size)
if clients_per_round == 1:
train_size_sampled = [train_size_sampled]
if MTL:
client_weights = [server_learning_rate * train_size[client] / sum(train_size)
for client in clients_sampled]
else:
client_weights = [server_learning_rate * train_size[client] / sum(train_size_sampled)
for client in
clients_sampled]
aggregated_deltas = self.aggregate_deltas_multi_layer(deltas, client_weights)
self.server.apply_delta(aggregated_deltas)
server_test = [self.server.evaluate(test_data, verbose=0)
for test_data in federated_test_data]
all_client_test = [self.clients[indx].evaluate(test_data, verbose=0)
for indx, test_data in enumerate(federated_test_data)]
all_client_avg_test = avg_dict_eval(
all_client_test, [size / sum(test_size) for size in test_size])
all_client_test_accs[round_i] = all_client_avg_test[1]
all_client_test_losses[round_i] = all_client_avg_test[0]
avg_train = avg_dict(history_train,
[train_size[client]
for client in clients_sampled])
selected_client_test = avg_dict(history_test, test_size)
server_avg_test = avg_dict_eval(
server_test, [size / sum(test_size) for size in test_size])
if server_avg_test[1] > max_server_accuracy:
max_server_accuracy = server_avg_test[1]
max_server_acc_round = round_i
if avg_train['sparse_categorical_accuracy'] > max_train_accuracy:
max_train_accuracy = avg_train['sparse_categorical_accuracy']
max_train_acc_round = round_i
if selected_client_test['sparse_categorical_accuracy'] > max_client_selected_accuracy:
max_client_selected_accuracy = selected_client_test['sparse_categorical_accuracy']
max_client_selected_acc_round = round_i
if all_client_avg_test[1] > max_client_all_accuracy:
max_client_all_accuracy = all_client_avg_test[1]
max_client_all_round = round_i
server_test_accs[round_i] = server_avg_test[1]
training_accs[round_i] = avg_train['sparse_categorical_accuracy']
selected_client_test_accs[round_i] = selected_client_test['sparse_categorical_accuracy']
server_test_losses[round_i] = server_avg_test[0]
selected_client_test_losses[round_i] = selected_client_test['loss']
training_losses[round_i] = avg_train['loss']
debug_string = (f"round: {round_i}, "
f"avg_train: {avg_train}, "
f"selected_client_test: {selected_client_test}, "
f"server_avg_test on whole test data: {server_avg_test} "
f"server max accuracy so far: {max_server_acc_round} reached at "
f"round {max_server_acc_round} "
f"all clients max accuracy so far: {max_client_all_accuracy} reached at "
f"round {max_client_all_round} "
f"all clients avg test: {all_client_avg_test}")
logger.debug(debug_string)
if round_i % tensorboard_updates == 0:
for i, key in enumerate(avg_train.keys()):
with self.summary_writer.as_default():
tf.summary.scalar('train/' + key, avg_train[key], step=round_i)
tf.summary.scalar('server/' + key, server_avg_test[i], step=round_i)
tf.summary.scalar('client_selected/' + key, selected_client_test[key], step=round_i)
tf.summary.scalar('client_all/' + key, all_client_avg_test[i], step=round_i)
if key == 'sparse_categorical_accuracy':
tf.summary.scalar('train/max_' + key, max_train_accuracy, step=round_i)
tf.summary.scalar('server/max_' + key, max_server_accuracy, step=round_i)
tf.summary.scalar('client_selected/max_' + key, max_client_selected_accuracy, step=round_i)
tf.summary.scalar('client_all/max_' + key, max_client_all_accuracy, step=round_i)
# Do this at every round to make sure to keep the data even if
# the training is interrupted
np.save(Path(logdir).parent / 'server_accs.npy', server_test_accs)
np.save(Path(logdir).parent / 'training_accs.npy', training_accs)
np.save(Path(logdir).parent / 'selected_client_accs.npy', selected_client_test_accs)
np.save(Path(logdir).parent / 'server_losses.npy', server_test_losses)
np.save(Path(logdir).parent / 'training_losses.npy', training_losses)
np.save(Path(logdir).parent / 'selected_client_losses.npy', selected_client_test_losses)
np.save(Path(logdir).parent / 'all_client_accs.npy', all_client_test_accs)
np.save(Path(logdir).parent / 'all_client_losses.npy', all_client_test_losses)
for i, client in enumerate(self.clients):
client.save_weights(str(Path(logdir) / f'weights_{i}.h5'))
``` |
{
"source": "jparrax/datascience_middle_course",
"score": 2
} |
#### File: 02_linear_models/solutions/solution_07.py
```python
def huber_loss(y_true, y_pred, *, epsilon):
mask_greater_epsilon = np.abs(y_true - y_pred) > epsilon
loss = np.zeros_like(y_pred)
loss[mask_greater_epsilon] = np.abs(y_true - y_pred)[mask_greater_epsilon]
loss[~mask_greater_epsilon] = se_loss(y_true, y_pred)[~mask_greater_epsilon]
return loss
``` |
{
"source": "jparrent/Insight",
"score": 3
} |
#### File: Project/app/RankRest.py
```python
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
# import math
def RankIt(df, model, model_hs1, model_hs1_neg0, allergens, nutrition_data, big8):
all_allergens = []
all_allergens.extend(big8)
all_allergens.extend(allergens)
print(all_allergens)
num_of_allerg = len(all_allergens)
g = {k: list(s) for k, s in df.groupby(
['restid', 'restname', 'address'])['sentence']}
scores = []
for key, llist in g.items():
key_score = 100
num_of_items = len(llist)
strike = key_score / num_of_items / num_of_allerg
if len(allergens) > 0:
for allerg in allergens:
for sen in llist:
sen_list = sen.split()
for word in sen_list:
# step 1: check for direct word match
if fuzz.ratio(word.lower(), allerg) > 80:
key_score -= strike
print(word, allerg, strike)
break
# if none, step 2: check for indirect word match
else:
try:
test1 = model.similar_by_word(
word.lower(), topn=20)
test2 = model_hs1.similar_by_word(
word.lower(), topn=20)
test3 = model_hs1_neg0.similar_by_word(
word.lower(), topn=20)
except KeyError:
continue
check1 = [i[0].lower() for i in test1 if i[1] >
0.40 and fuzz.ratio(i[0].lower(), allerg) > 80]
check2 = [i[0].lower() for i in test2 if i[1] >
0.40 and fuzz.ratio(i[0].lower(), allerg) > 80]
check3 = [i[0].lower() for i in test3 if i[1] >
0.30 and fuzz.ratio(i[0].lower(), allerg) > 80]
if allerg in check1 and allerg in check2 and allerg in check3:
print(word, allerg, strike)
key_score -= strike
break
# step 3: check for match with big8 list
if len(big8) > 0:
for item in big8:
for sen in llist:
sen_list = sen.split()
for word in sen_list:
if fuzz.ratio(word.lower(), item) > 80:
key_score -= strike
print(word, item, strike)
break
if word in nutrition_data.keys():
if item in nutrition_data[word.lower()]:
key_score -= strike
print(word, item, strike)
break
key_score = int(round(key_score))
scores.append([key_score, key])
scores.sort(reverse=True)
return scores, all_allergens
``` |
{
"source": "jparris/enso",
"score": 2
} |
#### File: enso/graphics/__init__.py
```python
import enso.providers
_graphics = enso.providers.getInterface( "graphics" )
from enso.graphics.measurement import pointsToPixels, pixelsToPoints
def getDesktopSize():
width, height = _graphics.getDesktopSize()
width = pixelsToPoints( width )
height = pixelsToPoints( height )
return ( width, height )
``` |
{
"source": "jparrpearson/python-chinese-pinyin-translator",
"score": 3
} |
#### File: jparrpearson/python-chinese-pinyin-translator/translate.py
```python
import codecs
import getopt
import io
import os
import re
import shutil
import sys
import time
def main(argv):
# Defaults
dictionaryFile = "resources/cedict_ts.u8"
inputFile = ""
inputDir = ""
inputString = ""
process = "filename"
tones = False
capitalize = True
backup = True
# Allow for unicode output to a non-unicode console
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding=sys.stdout.encoding, errors="replace")
# Get program arguments
usage = ("Example usage:\n translate.py -h --file # --dir <directory> --string <string>\n"
" --process <filename|text|both> --tones <true|false>\n"
" --capitalize <true|false> --backup <true|false>")
options = ("Options:\n -h, --help - Shows the script usage and help options.\n"
" -f, --file - The input file to translate. One of 'dir', 'file', or 'string' is required.\n"
" -d, --dir - The input directory to translate (translates all files in nested directories). One of 'dir', 'file', or 'string' is required.\n"
" -s, --string - The string to translate (displays in the console). One of 'dir', 'file', or 'string' is required.\n"
" -p, --process - Determines what is processed - 'filename' (default), 'text', or 'both'.\n"
" -t, --tones - Output the pinyin tone numbers. Defaults to 'false'.\n"
" -c, --capitalize - Capitalize the pinyin (otherwise all lower case). Defaults to 'true'.\n"
" -b, --backup - Backup each translated file (e.g. 'filename.ext.BAK'). Defaults to 'true'.\n")
try:
opts, args = getopt.getopt(argv, "?hf:d:s:p:t:c:b:", ["help", "file=", "dir=", "string=", "process=", "tones=", "capitalize=", "backup="])
except getopt.GetoptError as err:
print(str(er))
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-?", "-h", "--help"):
print(usage)
print()
print(options)
sys.exit(0)
elif opt in ("-f", "--file"):
inputFile = arg
elif opt in ("-d", "--dir"):
inputDir = arg
elif opt in ("-s", "--string"):
inputString = arg
elif opt in ("-p", "--process"):
if arg not in ("filename", "text", "both"):
print("Invalid process option")
print(usage)
sys.exit(2)
process = arg
elif opt in ("-t", "--tones"):
tones = True if arg.upper() == "TRUE" else False
elif opt in ("-c", "--capitalize"):
capitalize = True if arg.upper() == "TRUE" else False
elif opt in ("-b", "--backup"):
backup = True if arg.upper() == "TRUE" else False
if not inputFile and not inputDir and not inputString:
print("Must provide either a file, directory, or string to translate")
print(usage)
sys.exit(2)
# Parse the dictionary file into a local dictionary
start = time.time()
dict = {}
with codecs.open(dictionaryFile, "r", encoding="utf-8") as file:
pattern = "(.*?) (.*?) \[(.*?)\] /(.*?)/"
for line in file:
if not line.startswith('#') and not line.startswith('%'):
match = re.match(pattern, line)
if match:
pinyin = match.group(3)
# Determine tone numbers and capitalization
pinyin = pinyin if tones else re.sub("[1234567890]", "", pinyin)
pinyin = pinyin.lower() if not capitalize else pinyin.capitalize()
# Add keys for traditional and simplified characters
dict[match.group(1)] = pinyin
dict[match.group(2)] = pinyin
print("Loaded " + str(len(dict)) + " dictionary entries (" + getTime(start) + "s)")
# Perform translation actions
if inputFile:
translateFile(inputFile, dict, process, backup)
elif inputDir:
count = translateDir(inputDir, dict, process, backup)
print("Translated " + str(count) + " files")
elif inputString:
outputString = translateLine(inputString, dict)
print("Translated string:\n" + outputString)
def translateFile(inputFile, dict, process, backup):
"""Translates the file (filename, text, or both)."""
print("Translating file " + inputFile + "...")
# Translate the file contents
with codecs.open(inputFile, "r+", encoding="utf-8") as file:
# Backup the file
if backup:
filename = file.name + ".BAK"
if not os.path.isfile(filename):
print("Backing up to file " + filename)
shutil.copy2(file.name, filename)
else:
print("Skipping backup to " + filename + " (file already exists)")
# Translate the file contents (text)
if process in ("text", "both"):
text = ""
for line in file:
text += translateLine(line, dict)
file.seek(0)
file.write(text)
file.truncate()
# Translate the filename
if process in ("filename", "both"):
filename = translateLine(os.path.basename(file.name), dict)
file.close()
print("Changing filename to " + filename)
os.rename(file.name, os.path.dirname(file.name) + os.path.sep + filename)
def translateDir(inputDir, dict, process, backup):
"""Translates all files in the given directory, and all nested directories. Returns the number of files translated."""
print("Translating directory " + inputDir + "...")
# Translate all nested files in the directory
count = 0
for subdir, dirs, files in os.walk(inputDir):
for file in files:
filename = os.path.join(subdir, file)
if not filename.endswith(".BAK"):
translateFile(filename, dict, process, backup)
count += 1
return count
def translateLine(line, dict):
"""Translates and returns a given line of text."""
text = ""
lastChar = ""
lineStart = True
translated = False
for char in line:
# Use the matching value in the dictionary, otherwise output the existing character
try:
value = dict[char]
if not lineStart and not lastChar.isspace():
text += " "
text += value
translated = True
except KeyError:
if not lineStart and translated and (char.isalpha() or char.isdigit()):
text += " "
text += char
translated = False
lastChar = char
lineStart = False
return text
def getTime(start, digits=2):
"""Get and return the time elapsed since start to now (in seconds, to digits decimal places)."""
end = time.time()
f = "{:." + str(digits) + "f}"
return f.format(end-start)
if __name__ == "__main__":
start = time.time()
main(sys.argv[1:])
print("Done (" + getTime(start) +"s)")
``` |
{
"source": "jparsai/cvejob",
"score": 3
} |
#### File: cvejob/cvejob/config.py
```python
import os
class DefaultConfig(object):
"""Default configuration holder."""
# ecosystem/language to work with
ecosystem = 'python'
# max age of a CVE, in days; older CVEs will be ignored.
# 0 = disable this option and process all CVEs
cve_age = 0
# location of the default NVD JSON feeds
feed_dir = 'nvd-data/'
# path to the default NVD JSON feed
feed_names = None
# range of CVES to process, all other CVEs will be ignored
date_range = None
# ID of a CVE to process, all other CVEs will be ignored
cve_id = None
# package name, requires also cve_id to be set
package_name = None
# location of the cpe2pkg Jar file
cpe2pkg_path = 'cpe2pkg.jar'
# directory where to find files containing package names
pkgfile_dir = 'data/'
# whether or not to use nvd-toolkit
use_nvdtoolkit = False
# directory where to find pretrained classifier for the nvd-toolkit
nvdtoolkit_export_dir = 'export/'
class RuntimeConfig(object):
"""Runtime configuration holder."""
def __init__(self):
"""Constructor."""
self._config = DefaultConfig()
ecosystem = os.environ.get('CVEJOB_ECOSYSTEM')
if ecosystem is not None:
self._config.ecosystem = ecosystem
cve_age = os.environ.get('CVEJOB_CVE_AGE')
if cve_age is not None:
self._config.cve_age = int(cve_age)
feed_dir = os.environ.get('CVEJOB_FEED_DIR')
if feed_dir is not None:
self._config.feed_dir = feed_dir
feed_names = os.environ.get('CVEJOB_FEED_NAMES')
if feed_names is not None:
self._config.feed_names = feed_names.split(',')
date_range = os.environ.get('CVEJOB_DATE_RANGE')
if date_range is not None:
self._config.date_range = date_range
cve_id = os.environ.get('CVEJOB_CVE_ID')
if cve_id is not None:
self._config.cve_id = cve_id
package_name = os.environ.get('CVEJOB_PACKAGE_NAME')
if package_name is not None:
self._config.package_name = package_name
cpe2pkg_path = os.environ.get('CVEJOB_CPE2PKG_PATH')
if cpe2pkg_path is not None:
self._config.cpe2pkg_path = cpe2pkg_path
pkgfile_dir = os.environ.get('CVEJOB_PKGFILE_DIR')
if pkgfile_dir is not None:
self._config.pkgfile_dir = pkgfile_dir
use_nvdtoolkit = os.environ.get('CVEJOB_USE_NVD_TOOLKIT')
if use_nvdtoolkit is not None:
self._config.use_nvdtoolkit = use_nvdtoolkit.lower() in ('true', '1', 'yes')
nvdtoolkit_export_dir = os.environ.get('CVEJOB_NVD_TOOLKIT_EXPORT_DIR')
if nvdtoolkit_export_dir is not None:
self._config.nvdtoolkit_export_dir = nvdtoolkit_export_dir
def __getattr__(self, item):
"""Get attribute."""
return getattr(self._config, item)
Config = RuntimeConfig()
```
#### File: cvejob/identifiers/naive.py
```python
import re
import nltk
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from collections import OrderedDict
from cvejob.config import Config
from cvejob import utils
from cvejob.cpe2pkg import (
run_cpe2pkg, PackageNameCandidate, build_cpe2pkg_query
)
class NaivePackageNameIdentifier(object):
"""Naive package name identifier.
All words from the first sentence of a CVE description that are starting with uppercase letter
are considered to be possible package names (minus stop words).
"""
def __init__(self, doc, ecosystem, pkgfile_path, cpe2pkg_path=Config.cpe2pkg_path):
"""Constructor."""
self.doc = doc
self.ecosystem = ecosystem
self.pkgfile_path = pkgfile_path
self.cpe2pkg_path = cpe2pkg_path
def _get_vendor_product_pairs(self):
"""Get (vendor, product) pairs from the CVE.
:return: a set containing (vendor, product) pairs
"""
result = set()
for cpe in utils.get_cpe(self.doc, cpe_type='application'):
vendor = cpe.get_vendor()[0]
product = cpe.get_product()[0]
result.add((vendor, product))
return result
def _get_candidates_from_description(self):
"""Try to identify possible package names from the description."""
pkg_name_candidates = set()
sentences = sent_tokenize(
utils.get_description_by_lang(self.doc)
)
first_sentence = sentences[0] if sentences else ''
names = self._guess_from_sentence(first_sentence)
pkg_name_candidates.update(set(names))
return pkg_name_candidates
# noinspection PyMethodMayBeStatic
def _guess_from_sentence(self, sentence):
"""Guess possible package name(s) from given description.
Very naive approach. Words starting with uppercase letter
are considered to be possible package names (minus stop words).
Returns a list of possible package names, without duplicates.
"""
stop_words = set()
try:
# Fails when no downloaded stopwords are available.
stop_words.update(stopwords.words('english'))
except LookupError:
# Download stopwords since they are not available.
nltk.download('stopwords')
stop_words.update(stopwords.words('english'))
# modified the logic to include keywords that have capital letter anywhere,
# not necessarily as the first character.
# Also, two words separated by hyphen are also important,
# even when there are no capital letters
regexp = re.compile('[A-Za-z0-9-:]*[A-Z][A-Za-z0-9-:]*|[A-Za-z0-9]+[-][A-Za-z0-9]+')
suspects = regexp.findall(sentence)
results = [x.lower() for x in suspects if x.lower() not in stop_words]
# get rid of duplicates, but keep order
results = list(OrderedDict.fromkeys(results))
return results
def identify(self):
"""Identify possible package name candidates."""
vp_pairs = self._get_vendor_product_pairs()
desc_candidates = self._get_candidates_from_description()
results = []
for vp_pair in vp_pairs:
if self.ecosystem == 'java':
# in java, vendor could help us to narrow down the groupId
vendor = [*vp_pair] + list(desc_candidates)
else:
vendor = [self.ecosystem]
product = [vp_pair[1]] + list(desc_candidates)
results.extend(self._run_cpe2pkg(vendor, product))
return results
def _run_cpe2pkg(self, vendor, product):
"""Run cpe2pkg tool.
:param vendor: list[str], a list of vendor strings
:param product: list[str], a list of product strings
:return: list[PackageNameCandidate], a list of package name candidates
"""
query_str = build_cpe2pkg_query(vendor, product)
output = run_cpe2pkg(query_str, self.pkgfile_path, self.cpe2pkg_path)
return [
PackageNameCandidate.from_cpe2pkg_output(x, self.ecosystem) for x in output if x
]
```
#### File: jparsai/cvejob/run.py
```python
import sys
from decimal import Decimal
import multiprocessing
import nvdlib
from nvdlib.manager import FeedManager
from nvdlib.query_selectors import in_range
from cvejob.filters.input import validate_cve
from cvejob.config import Config
from cvejob.identifiers import get_identifier_cls
from cvejob.cpe2pkg import get_pkgfile_path, PackageNameCandidate
from cvejob.selectors.basic import VersionSelector
from cvejob.outputs.victims import VictimsYamlOutput
from cvejob.versions import NVDVersions
from cvejob.utils import parse_date_range
import logging
# logging configuration
logging.basicConfig(level=logging.DEBUG,
handlers=[nvdlib.get_logging_handler()]) # use nvdlib's handler
logger = logging.getLogger('cvejob')
FEED_NAME_PATTERN = r"nvdcve-" \
r"(?P<version>[\d.]+)-" \
r"(?P<name>(?P<name_string>(([A-Za-z]+)))|(?P<name_year>([\d]+)))" \
r".json"
def _log_results(victims_output):
"""Log results."""
cve_id = victims_output.cve.id_
logger.info(
"[{cve_id}] picked `{winner}` out of `{candidates}`".format(
cve_id=cve_id,
winner=victims_output.winner,
candidates=victims_output.candidates
))
logger.info(
"[{cve_id}] Affected version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.affected_versions
))
logger.info(
"[{cve_id}] Safe version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.safe_versions
))
def _filter_collection(collection, date_range, cherry_pick):
"""Filter Document collection."""
if date_range:
collection_size_before = collection.count()
collection = collection.find(
{'published_date': in_range(*date_range)}
)
logger.debug(("Filtered out {} Documents that do not fall "
"in the given range.").format(
collection_size_before - collection.count()
))
if cherry_pick:
logger.debug("Cherry-picked CVE `{cve_id}`".format(
cve_id=cherry_pick
))
collection = collection.find(
{'cve.id_': cherry_pick}
)
return collection
def run():
"""Run CVEjob."""
feed_dir = Config.feed_dir
feed_names = Config.feed_names
date_range = Config.date_range
cherrypicked_cve_id = Config.cve_id
cherrypicked_year = None
if cherrypicked_cve_id:
cherrypicked_year = cherrypicked_cve_id.split(sep='-')[1]
if int(cherrypicked_year) < 2002:
# all CVEs prior to 2002 are stored in 2002 feed
cherrypicked_year = 2002
if date_range:
date_range = parse_date_range(Config.date_range)
feed_names = range(date_range[0].year, date_range[1].year + 1)
if cherrypicked_cve_id: # optimization check
if int(cherrypicked_year) not in feed_names:
logger.info(
"[{picked_cve_id}] does not belong to the given feed range:"
" {date_range}".format(
picked_cve_id=cherrypicked_cve_id,
date_range=date_range
))
return
# prune the feed names as it is not necessary to iterate over all of them
feed_names = [cherrypicked_year]
if not feed_names:
if cherrypicked_cve_id:
feed_names = [cherrypicked_year]
else:
feed_names = ['modified']
with FeedManager(n_workers=multiprocessing.cpu_count()) as feed_manager:
feeds = feed_manager.fetch_feeds(
feed_names=feed_names, data_dir=feed_dir, update=True
)
collection = feed_manager.collect(feeds)
collection = _filter_collection(collection,
date_range,
cherrypicked_cve_id)
if not collection: # collection is empty
logger.info(
"Collection is empty.".format(
picked_cve_id=cherrypicked_cve_id,
))
return
logger.debug("Number of CVE Documents in the collection: {}".format(
collection.count()
))
if Config.package_name and Config.cve_id:
# user knows the package name, so we don't have to guess ;)
doc = [x for x in collection][0] # Collection doesn't support indexing
affected, safe = NVDVersions(doc, Config.package_name, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=PackageNameCandidate(Config.package_name, Decimal('1.0')),
candidates=[],
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
sys.exit(0)
for doc in collection:
cve_id = doc.cve.id_
try:
if not validate_cve(doc):
logger.debug(
"[{cve_id}] was filtered out by input checks".format(
cve_id=cve_id
))
continue
pkgfile_path = get_pkgfile_path(Config.pkgfile_dir, Config.ecosystem)
identifier = get_identifier_cls()(doc, Config.ecosystem, pkgfile_path)
candidates = identifier.identify()
if not candidates:
logger.info(
"[{cve_id}] no package name candidates found".format(
cve_id=cve_id
))
continue
selector = VersionSelector(doc, candidates, Config.ecosystem)
winner = selector.pick_winner()
if not winner:
logger.info(
"[{cve_id}] no package name found".format(
cve_id=cve_id
))
continue
affected, safe = NVDVersions(doc, winner.package, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=winner,
candidates=candidates,
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
except Exception as exc:
logger.warning(
"[{cve_id}] Unexpected exception occurred: {exc}".format(
cve_id=cve_id,
exc=exc
), exc_info=True)
if __name__ == '__main__':
run()
```
#### File: cvejob/tests/test_config.py
```python
import os
from cvejob.config import DefaultConfig, RuntimeConfig
def test_default_config_constructor():
"""Basic test for the class DefaultConfig."""
config = DefaultConfig()
assert config is not None
def test_default_config_attributes():
"""Check the attributes existence for a class DefaultConfig."""
config = DefaultConfig()
# basic configuration check
attributes = ("ecosystem", "cve_age", "feed_dir", "feed_names", "date_range", "cve_id",
"package_name", "cpe2pkg_path", "pkgfile_dir", "use_nvdtoolkit",
"nvdtoolkit_export_dir")
for attribute in attributes:
assert hasattr(config, attribute)
def test_default_config_attribute_values_nil():
"""Check the attributes that needs to be set to nil (None)."""
config = DefaultConfig()
# the following attributes needs to be set to nil
assert config.feed_names is None
assert config.date_range is None
assert config.cve_id is None
assert config.package_name is None
assert config.feed_names is None
def test_default_config_attribute_values_not_nil():
"""Check the attributes that needs not to be set to nil (None)."""
config = DefaultConfig()
# the following attributes need not be set to nil
assert config.ecosystem is not None
assert config.cve_age is not None
assert config.feed_dir is not None
assert config.cpe2pkg_path is not None
assert config.pkgfile_dir is not None
assert config.use_nvdtoolkit is not None
assert config.nvdtoolkit_export_dir is not None
def test_runtime_config():
"""Basic test for the class RuntimeConfig."""
config = RuntimeConfig()
assert config is not None
def test_runtime_config_attributes():
"""Check the attributes existence for a class RuntimeConfig."""
config = RuntimeConfig()
assert config is not None
assert hasattr(config, "_config")
def unset_environment_variable(name):
"""Reset specified environment variable."""
return os.environ.pop(name, None)
def test_runtime_config_attribute_ecosystem():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_ECOSYSTEM')
config = RuntimeConfig()
assert config._config.ecosystem == 'python'
os.environ['CVEJOB_ECOSYSTEM'] = 'foobar'
config = RuntimeConfig()
assert config._config.ecosystem == 'foobar'
if old_value is not None:
os.environ['CVEJOB_ECOSYSTEM'] = old_value
def test_runtime_config_attribute_cve_age():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_CVE_AGE')
config = RuntimeConfig()
assert config._config.cve_age == 0
os.environ['CVEJOB_CVE_AGE'] = '42'
config = RuntimeConfig()
assert config._config.cve_age == 42
os.environ['CVEJOB_CVE_AGE'] = '-42'
config = RuntimeConfig()
assert config._config.cve_age == -42
if old_value is not None:
os.environ['CVEJOB_CVE_AGE'] = old_value
def test_runtime_config_attribute_cvejob_feed_dir():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_FEED_DIR')
config = RuntimeConfig()
assert config._config.feed_dir == 'nvd-data/'
os.environ['CVEJOB_FEED_DIR'] = 'directory1'
config = RuntimeConfig()
assert config._config.feed_dir == 'directory1'
if old_value is not None:
os.environ['CVEJOB_FEED_DIR'] = old_value
def test_runtime_config_attribute_cvejob_feed_names():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_FEED_NAMES')
config = RuntimeConfig()
assert config._config.feed_names is None
# TODO: the following test needs to be enabled after the fix in master branch
# os.environ['CVEJOB_FEED_NAMES'] = 'name1'
# config = RuntimeConfig()
# assert config._config.feed_names == ['name1']
# os.environ['CVEJOB_FEED_NAMES'] = 'name1,name2'
# config = RuntimeConfig()
# assert config._config.feed_names == ['name1', 'name2']
if old_value is not None:
os.environ['CVEJOB_FEED_NAMES'] = old_value
def test_runtime_config_attribute_cvejob_date_range():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_DATE_RANGE')
config = RuntimeConfig()
assert config._config.date_range is None
os.environ['CVEJOB_DATE_RANGE'] = '2017-01-01'
config = RuntimeConfig()
assert config._config.date_range == '2017-01-01'
if old_value is not None:
os.environ['CVEJOB_DATE_RANGE'] = old_value
def test_runtime_config_attribute_cvejob_cve_id():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_CVE_ID')
config = RuntimeConfig()
assert config._config.cve_id is None
os.environ['CVEJOB_CVE_ID'] = 'CVE1234'
config = RuntimeConfig()
assert config._config.cve_id == 'CVE1234'
if old_value is not None:
os.environ['CVEJOB_CVE_ID'] = old_value
def test_runtime_config_attribute_cvejob_package_name():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_PACKAGE_NAME')
config = RuntimeConfig()
assert config._config.package_name is None
os.environ['CVEJOB_PACKAGE_NAME'] = 'test_package'
config = RuntimeConfig()
assert config._config.package_name == 'test_package'
if old_value is not None:
os.environ['CVEJOB_PACKAGE_NAME'] = old_value
def test_runtime_config_attribute_cvejob_cpe2pkg_path():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_CPE2PKG_PATH')
config = RuntimeConfig()
assert config._config.cpe2pkg_path == 'cpe2pkg.jar'
os.environ['CVEJOB_CPE2PKG_PATH'] = 'cpe2pkg10.jar'
config = RuntimeConfig()
assert config._config.cpe2pkg_path == 'cpe2pkg10.jar'
if old_value is not None:
os.environ['CVEJOB_CPE2PKG_PATH'] = old_value
def test_runtime_config_attribute_cvejob_pkgfile_dir():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_PKGFILE_DIR')
config = RuntimeConfig()
assert config._config.pkgfile_dir == 'data/'
os.environ['CVEJOB_PKGFILE_DIR'] = 'cpe2pkg10.jar'
config = RuntimeConfig()
assert config._config.pkgfile_dir == 'cpe2pkg10.jar'
if old_value is not None:
os.environ['CVEJOB_PKGFILE_DIR'] = old_value
def test_runtime_config_attribute_cvejob_use_nvd_toolkit():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_USE_NVD_TOOLKIT')
config = RuntimeConfig()
assert not config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = 'true'
config = RuntimeConfig()
assert config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = '1'
config = RuntimeConfig()
assert config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = 'yes'
config = RuntimeConfig()
assert config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = 'false'
config = RuntimeConfig()
assert not config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = '0'
config = RuntimeConfig()
assert not config._config.use_nvdtoolkit
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = 'no'
config = RuntimeConfig()
assert not config._config.use_nvdtoolkit
if old_value is not None:
os.environ['CVEJOB_USE_NVD_TOOLKIT'] = old_value
def test_runtime_config_attribute_cvejob_nvd_toolkit_export_dir():
"""Check the attributes handling for a class RuntimeConfig."""
old_value = unset_environment_variable('CVEJOB_NVD_TOOLKIT_EXPORT_DIR')
config = RuntimeConfig()
assert config._config.nvdtoolkit_export_dir == 'export/'
os.environ['CVEJOB_NVD_TOOLKIT_EXPORT_DIR'] = 'export2/'
config = RuntimeConfig()
assert config._config.nvdtoolkit_export_dir == 'export2/'
if old_value is not None:
os.environ['CVEJOB_NVD_TOOLKIT_EXPORT_DIR'] = old_value
```
#### File: cvejob/tests/test_version.py
```python
from cvejob.version import BenevolentVersion
def test_version_basic():
"""Test basic behavior."""
assert BenevolentVersion('1') == BenevolentVersion('1')
assert BenevolentVersion('1') != BenevolentVersion('2')
assert BenevolentVersion('1') < BenevolentVersion('2')
assert BenevolentVersion('1') <= BenevolentVersion('2')
assert BenevolentVersion('1') > BenevolentVersion('0')
assert BenevolentVersion('1') >= BenevolentVersion('0')
assert BenevolentVersion(None) != BenevolentVersion('')
assert BenevolentVersion(None) == BenevolentVersion(None)
assert BenevolentVersion('0') != BenevolentVersion('')
assert BenevolentVersion('') == BenevolentVersion('')
assert BenevolentVersion(1) == BenevolentVersion(1)
assert BenevolentVersion('Final.RELEASE') == BenevolentVersion('Final.RELEASE')
def test_version_trailing_zeros():
"""Test with trailing zeros."""
assert BenevolentVersion('1.0.0.0.0') == BenevolentVersion('1.0')
assert BenevolentVersion('1.0.1') != BenevolentVersion('1.0.0')
assert BenevolentVersion('1.1.0') < BenevolentVersion('1.2.0')
assert BenevolentVersion('1.1.0') <= BenevolentVersion('1.2.0')
assert BenevolentVersion('1.2.1.1') > BenevolentVersion('1.2.0')
assert BenevolentVersion('1.2.1.1') >= BenevolentVersion('1.2.1.0')
def test_version_complex():
"""More complex tests."""
assert BenevolentVersion('0.3m') == BenevolentVersion('0.3.0')
assert BenevolentVersion('0.3m1') == BenevolentVersion('0.3')
assert BenevolentVersion('0.3-SNAPSHOT-1') == BenevolentVersion('0.3')
assert BenevolentVersion('1.2.Final') == BenevolentVersion('1.2.0')
assert BenevolentVersion('1.2.Final.RELEASE') == BenevolentVersion('1.2.0')
def test_version_exact():
"""Test exact version."""
assert '1.5.0.RELEASE-1' == BenevolentVersion('1.5.0.RELEASE-1').exact
def test_version_loose():
"""Test loose version."""
assert '1.5' == BenevolentVersion('1.5.0.RELEASE-1').loose
def test_hash():
"""Test hashing."""
s = {
BenevolentVersion('1.0'),
BenevolentVersion('1'),
BenevolentVersion(None)
}
assert len(s) == 2
def test_repr():
"""Basic test for the __repr__ method."""
v = BenevolentVersion('1.0')
assert v.__repr__() == "BenevolentVersion('1.0')"
v = BenevolentVersion('1.2.3')
assert v.__repr__() == "BenevolentVersion('1.2.3')"
```
#### File: tests/versions/test_version_identifier.py
```python
from cvejob.versions import NVDVersions
def test_nvd_versions(python_cve):
"""Test NVDVersions().run()."""
affected, safe = NVDVersions(python_cve, 'numpy', 'python').run()
assert affected
assert len(affected) == 1
assert str(affected[0]) == '<=1.16.0'
assert safe
assert len(safe) == 1
assert str(safe[0]) == '>=1.16.1'
``` |
{
"source": "jparsai/f8a-3scale-connect-api",
"score": 2
} |
#### File: tools/configure-gateway/run.py
```python
from threescale import (Services, StateTracker, Config,
ApplicationPlans, Metrics, Limits, Mappings,
Proxies, Accounts, Applications, logger)
from validators import email, url, validator
import logging
import re
import click
import json
import sys
import yaml
COLORED_OUTPUT = '\033[32m{}\033[39m'
@validator
def is_valid_username(username):
"""Validate username."""
return re.search(r"^[a-zA-Z0-9]+([_-]?[a-zA-Z0-9])*$", username)
@validator
def is_valid_orgname(org):
"""Validate orgnization name."""
return re.search(r"^[a-zA-Z0-9]+([_-]?[a-zA-Z0-9])*$", org)
@click.command()
@click.option('--debug', is_flag=True, help="Enables the debuging mode.")
@click.option('-v', '--verbose', is_flag=True)
@click.option('-o', '--output', type=click.Path(), help="Output file path")
@click.argument('config-file', type=click.Path(exists=True))
def cli(**options):
"""Three Scale Command line tool."""
if options.get('debug'):
logger.setLevel(logging.DEBUG)
elif options.get('verbose'):
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARN)
with open(options.get('config_file')) as config_file:
config = yaml.load(config_file)
if not config:
click.echo(
"error: not enough information provided in the config file.")
sys.exit(2)
creds = config.get('credentials', {})
admin_token = creds.get('admin-token')
account_headers = creds.get('3scale-account-headers', {})
private_base_url = creds.get('private-base-url')
threescale_domain = creds.get('domain', '3scale.net')
threescale_id = creds.get('threescale-id')
account = config.get('account', {})
username = account.get('username')
user_email = account.get('email')
password = account.get('password')
org = account.get('organization')
endpoints = config.get('endpoints', {})
if not all([creds, admin_token, private_base_url, threescale_id, account_headers]):
click.echo("Error: Missing credentials in config file."
"""
credentials:
admin-token: <3scale_admin_token>
threescale-id: <3scale_id>
private-base-url: <private_base_url>
3scale-account-headers:
x-3scale-account-secret: <account_secret> """)
sys.exit(2)
if not all([account, username, user_email, password, org]):
click.echo("Error: Missing Developer Account information in config file"
"""
account:
username: <username>
email: <email>
password: <password>
organization: <org> """)
sys.exit(2)
if not endpoints:
click.echo("Error: Missing endpoints information in config file"
"""
endpoints:
- pattern: /my-endpoint/test
method: GET
limit:
value: <int_value>
period: <minute|hour|day|week|month|year|eternity> """)
sys.exit(2)
if not is_valid_username(username):
click.echo(
"error: use only letters, numbers, and hyphen(-), underscore(_) in username.")
sys.exit(2)
if not is_valid_orgname(org):
click.echo(
"error: use only letters, numbers, and hyphen(-), underscore(_) in organization.")
sys.exit(2)
if not email(user_email):
click.echo("error: email address is not valid.")
sys.exit(2)
if not url(private_base_url):
click.echo(
"error: private-base-url is not in the format protocol://domain:[port]")
sys.exit(2)
Config._3scale_domain = threescale_domain
Config._3scale_id = threescale_id
Config._access_token = <PASSWORD>
private_base_url = private_base_url.strip('/')
try:
# Create 3scale API service.
service = Services()
service_name = org + '-3scale-service'
service_response = service.create(StateTracker, service_name)
service_id = service_response.get('service', {}).get('id')
# Create 3scale Application Plan.
application_plan = ApplicationPlans()
application_plan_name = org + '-3scale-application-plan'
application_plan_response = application_plan.create(
StateTracker, service_id, application_plan_name)
application_plan_id = application_plan_response.get(
'plan').get('id')
for endpoint in endpoints:
pattern = endpoint.get('pattern')
method = endpoint.get('method')
limit = endpoint.get('limit', {})
limit_value = limit.get('value')
limit_period = limit.get('period')
if not limit:
click.echo(
"please provide the rate limit for the api endpoint.")
StateTracker._rollback()
sys.exit(2)
if not method:
click.echo(
"please define method [GET |POST |DELETE ] for the api endpoint.")
StateTracker._rollback()
sys.exit(2)
if not pattern:
click.echo(
"please provide the api endpoint pattern ex: /api/v1/my-endpoint.")
StateTracker._rollback()
sys.exit(2)
# Create 3scale API Metric.
metrics = Metrics()
metric_name = '-'.join([org] + pattern.strip('/').split('/') +
[method.lower(), 'metric'])
metric_response = metrics.create(
StateTracker, service_id, metric_name)
metric_id = metric_response.get('metric').get('id')
# Create 3scale limit.
limits = Limits()
limits.create(StateTracker, application_plan_id, metric_id,
value=limit_value, period=limit_period)
# Create mappings to the endpoints.
mappings = Mappings()
mappings.create(StateTracker, service_id,
method.upper(), pattern, metric_id, 1)
mappings.create(StateTracker, service_id,
'OPTIONS', pattern, metric_id, 1)
# Update 3scale proxies and proxy policies.
proxies = Proxies()
proxy_update_response = proxies.update(
StateTracker, service_id, private_base_url)
headers = [
{"op": "set", "header": key, "value": value}
for key, value in account_headers.items()
]
proxies.policy_update(StateTracker, headers=headers)
proxies.proxy_promote(StateTracker)
stage_route = proxy_update_response.get(
'proxy', {}).get("endpoint")
prod_route = proxy_update_response.get(
'proxy', {}).get("sandbox_endpoint")
# Create 3scale Developer account.
account = Accounts()
account_response = account.create(
StateTracker, username, password, email, org)
account_id = account_response.get('account').get('id')
# Create 3scale Applicaiton.
application = Applications()
application_name = org + '-3scale-appplication'
application_response = application.create(StateTracker, account_id=account_id,
application_plan_id=application_plan_id,
application_name=application_name)
user_key = application_response.get('application').get('user_key')
response = {
'stage_route': stage_route,
'prod_route': prod_route,
'user_key': user_key
}
output_file = options.get('output')
if not output_file:
print('-'*40)
print(COLORED_OUTPUT.format(json.dumps(response, indent=4)))
print('-'*40)
else:
with open(output_file, 'w') as f:
f.write(json.dumps(response, indent=4))
except Exception as exc:
StateTracker._rollback()
raise exc
if __name__ == "__main__":
cli()
```
#### File: configure-gateway/threescale/metrics.py
```python
from .base import ThreeScale
import logging
import requests
import xmltodict
import re
logger = logging.getLogger(__name__)
class Metrics(ThreeScale):
"""ThreeScale Metrics creation and deletion."""
response = None
def __init__(self):
"""Initialize object."""
super().__init__()
def create(self, tracker, service_id, metric_name,
unit='hit',
description=None,
system_name=None,
state_event=None):
"""Create a Metric."""
request_body = {
'access_token': self._access_token,
'service_id': service_id,
'friendly_name': metric_name,
'system_name': ''.join([re.sub('[^A-Za-z0-9]', '_', metric_name), '_system']),
'unit': unit,
'description': description
}
request_body = {k: v for k, v in request_body.items() if v}
_url = self._build_url(
self._endpoints.metric_create.format(service_id=service_id))
_resp = requests.post(_url, data=request_body)
logger.info("[POST] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = xmltodict.parse(
_resp.content, dict_constructor=dict)
logger.info(
"Successfully Created Metric: {}".format(metric_name))
tracker._save_current_state(self)
return self.response
else:
logger.error("Create Metric FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def delete(self, service_id=None, metric_id=None):
"""Remove a Metric."""
if metric_id is None and self.response.get('metric', {}).get('id') is None:
raise ValueError(
"Metric ID is required to delete a Metric")
if service_id is None and self.response.get('metric', {}).get('service_id') is None:
raise ValueError(
"Service ID is required to delete an Metric")
metric_id = metric_id or self.response.get(
'metric', {}).get('id')
service_id = service_id or self.response.get(
'metric', {}).get('service_id')
request_body = {'access_token': self._access_token}
_url = self._build_url(
self._endpoints.metric_delete.format(
service_id=service_id, id=metric_id))
_resp = requests.delete(_url, data=request_body)
logger.info("[DELETE] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
logger.info(
"Successfully Deleted Metric ID {}".format(
metric_id))
else:
logger.error("Delete Metric FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
def find(self):
"""Find a Metric."""
raise NotImplementedError("Method find Not Implemented.")
def __repr__(self):
"""Representation of class."""
metric_id = self.response.get('metric', {}).get('id')
return "Class Metric(id={})".format(metric_id)
class Limits(ThreeScale):
"""ThreeScale Limits creation and deletion."""
response = None
def __init__(self):
"""Initialize object."""
super().__init__()
def create(self, tracker, application_plan_id, metric_id, value=30, period='minute'):
"""Create an Limit."""
request_body = {
'access_token': self._access_token,
'period': period,
'value': value
}
request_body = {k: v for k, v in request_body.items() if v}
_url = self._build_url(
self._endpoints.limit_create.format(application_plan_id=application_plan_id,
metric_id=metric_id))
_resp = requests.post(_url, data=request_body)
logger.info("[POST] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = xmltodict.parse(
_resp.content, dict_constructor=dict)
logger.info("Successfully Created Limit")
tracker._save_current_state(self)
return self.response
else:
logger.error("Create Limit FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def delete(self, limit_id=None, metric_id=None, application_plan_id=None):
"""Remove an Limit."""
if application_plan_id is None and self.response.get('limit', {}).get('plan_id') is None:
raise ValueError(
"Application plan ID is required to delete a Limit")
if metric_id is None and self.response.get('limit', {}).get('metric_id') is None:
raise ValueError("Metric ID is required to delete a Limit")
if limit_id is None and self.response.get('limit', {}).get('id') is None:
raise ValueError("Limit ID is required to delete a Limit")
application_plan_id = application_plan_id or self.response.get(
'limit', {}).get('plan_id')
metric_id = metric_id or self.response.get(
'limit', {}).get('metric_id')
limit_id = limit_id or self.response.get('limit', {}).get('id')
request_body = {'access_token': self._access_token}
_url = self._build_url(
self._endpoints.limit_delete.format(application_plan_id=application_plan_id,
id=limit_id, metric_id=metric_id))
_resp = requests.delete(_url, data=request_body)
logger.info("[DELETE] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
logger.info(
"Successfully Deleted Limit ID {}".format(
application_plan_id))
else:
logger.error("Delete Limit FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
def find(self):
"""Find an Limit."""
raise NotImplementedError("Method find Not Implemented.")
def __repr__(self):
"""Representation of class."""
limit_id = self.response.get('limit', {}).get('id')
return "Class Limit(id={})".format(limit_id)
```
#### File: configure-gateway/threescale/proxies.py
```python
from .base import ThreeScale
import logging
import requests
import xmltodict
import json
logger = logging.getLogger(__name__)
class Proxies(ThreeScale):
"""ThreeScale Proxies create, update."""
response = None
def __init__(self):
"""Initialize object."""
super().__init__()
self.service_id = None
def update(self,
tracker,
service_id,
api_backend,
credentials_location='query',
auth_app_key='user_key',
endpoint=None,
auth_app_id=None,
auth_user_key=None,
error_auth_failed=None,
error_status_auth_failed=None,
error_headers_auth_failed=None,
error_auth_missing=None,
error_status_auth_missing=None,
error_headers_auth_missing=None,
error_no_match=None,
error_status_no_match=None,
error_headers_no_match=None,
oidc_issuer_endpoint=None,
sandbox_endpoint=None
):
"""Update policy."""
self.service_id = service_id
request_body = {
'access_token': self._access_token,
"api_backend": api_backend,
"credentials_location": credentials_location,
"auth_app_key": auth_app_key,
"endpoint": endpoint,
"auth_app_id": auth_app_id,
"auth_user_key": auth_user_key,
"error_auth_failed": error_auth_failed,
"error_status_auth_failed": error_status_auth_failed,
"error_headers_auth_failed": error_headers_auth_failed,
"error_auth_missing": error_auth_missing,
"error_status_auth_missing": error_status_auth_missing,
"error_headers_auth_missing": error_headers_auth_missing,
"error_no_match": error_no_match,
"error_status_no_match": error_status_no_match,
"error_headers_no_match": error_headers_no_match,
"oidc_issuer_endpoint": oidc_issuer_endpoint,
"sandbox_endpoint": sandbox_endpoint,
}
request_body = {k: v for k, v in request_body.items() if v}
_url = self._build_url(
self._endpoints.proxy_update.format(service_id=service_id))
_resp = requests.patch(_url, data=request_body)
logger.info("[PATCH] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = xmltodict.parse(
_resp.content, dict_constructor=dict)
logger.info(
"Successfully Updated Proxy: {}".format(api_backend))
return self.response
else:
logger.error("Update Proxy FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def _get_highest_version(self, service_id=None, environment='sandbox'):
service_id = service_id or self.service_id
params = {
'access_token': self._access_token,
}
_url = self._build_url(
self._endpoints.proxy_config_list.format(service_id=service_id,
environment=environment))
_resp = requests.get(_url, params=params)
logger.info("[GET] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
output = _resp.json()
if output:
higest_version = max([conf.get('proxy_config', {}).get('version', 2)
for conf in output.get('proxy_configs', {})])
logger.info("HIGHEST Version: {}".format(higest_version))
return higest_version
else:
logger.error("Unable to fetch the latest version.")
return 2
def policy_update(self, tracker, headers, service_id=None):
"""Update the Proxy Policy Configuration."""
policies_config = [{
"name": "headers",
"configuration": {
"response": [],
"request":headers},
"version": "builtin",
"enabled": True
}]
service_id = service_id or self.service_id
request_body = {
'access_token': self._access_token,
'service_id': service_id,
'policies_config': json.dumps(policies_config)
}
_url = self._build_url(
self._endpoints.proxy_policy_update.format(service_id=service_id))
_resp = requests.put(_url, data=request_body)
logger.info("[PUT] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = _resp
logger.info("Successfully Updated Proxy Policy Config")
return self.response
else:
logger.error("Update Proxy Policy Config FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def proxy_promote(self, tracker,
service_id=None,
environment='sandbox',
to='production'):
"""Promote Proxy to another environment."""
service_id = service_id or self.service_id
version = self._get_highest_version()
request_body = {
'access_token': self._access_token,
'to': to
}
_url = self._build_url(
self._endpoints.proxy_config_promote.format(service_id=service_id,
environment=environment,
version=version))
_resp = requests.post(_url, data=request_body)
logger.info("[POST] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = _resp
logger.info("Successfully Promoted Proxy to {}".format(to))
return self.response
else:
logger.error("Promote Proxy FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def find(self):
"""Find the Mapping."""
raise NotImplementedError("Method find Not Implemented.")
def __repr__(self):
"""Representation of class."""
api_backend = self.response.get('proxy', {}).get('api_backend')
return "Class Mappings(id={})".format(api_backend)
```
#### File: configure-gateway/threescale/rollback.py
```python
import logging
logger = logging.getLogger(__name__)
class StateTracker:
"""Process State Tracker."""
__states = list()
@classmethod
def _save_current_state(cls, obj):
cls.__states.append(obj)
logger.info("SAVED State {}".format(obj))
@classmethod
def _pop_previous_state(cls):
if cls.__states:
logger.info("Current State {}".format(cls.__states))
obj = cls.__states.pop()
logger.info("POPPED State {}".format(obj))
return obj
logger.info("Saved states are empty")
@classmethod
def _rollback(cls):
"""Undo Everything."""
logger.warn("Rolling Back Started")
while cls.__states:
obj = cls._pop_previous_state()
logger.warn("Rolling back {}".format(obj))
obj.delete()
logger.warn("[DELETED] {}".format(obj))
logger.info("Rolling Back finished.")
``` |
{
"source": "jparsai/fabric8-analytics-auth",
"score": 2
} |
#### File: jparsai/fabric8-analytics-auth/setup.py
```python
from setuptools import setup
def get_requirements():
"""Parse all packages mentioned in the 'requirements.txt' file."""
with open('requirements.txt') as fd:
return fd.read().splitlines()
setup(
name='fabric8a_auth',
version='0.0.1',
description='a pip-installable package example',
license='Apache License 2.0',
packages=['fabric8a_auth'],
author='<NAME>',
author_email='<EMAIL>',
keywords=['fabric8-analytics'],
url='https://github.com/fabric8-analytics/fabric8-analytics-auth',
install_requires=get_requirements(),
)
```
#### File: fabric8-analytics-auth/tests/test_utility_functions.py
```python
from fabric8a_auth.auth import is_authentication_disabled, get_audiences
import os
def test_is_authentication_disabled_negative_test():
"""Test the function is_authentication_disabled()."""
os.environ['DISABLE_AUTHENTICATION'] = ''
assert not is_authentication_disabled()
os.environ['DISABLE_AUTHENTICATION'] = '0'
assert not is_authentication_disabled()
os.environ['DISABLE_AUTHENTICATION'] = 'false'
assert not is_authentication_disabled()
os.environ['DISABLE_AUTHENTICATION'] = 'False'
assert not is_authentication_disabled()
os.environ['DISABLE_AUTHENTICATION'] = 'FALSE'
assert not is_authentication_disabled()
def test_is_authentication_disabled_positive_test():
"""Test the function is_authentication_disabled()."""
os.environ['DISABLE_AUTHENTICATION'] = '1'
assert is_authentication_disabled()
os.environ['DISABLE_AUTHENTICATION'] = 'True'
assert is_authentication_disabled()
os.environ['DISABLE_AUTHENTICATION'] = 'true'
assert is_authentication_disabled()
os.environ['DISABLE_AUTHENTICATION'] = 'TRUE'
assert is_authentication_disabled()
def test_get_audiences():
"""Test the function get_audiences()."""
os.environ.unsetenv('FABRIC8_ANALYTICS_JWT_AUDIENCE')
assert get_audiences() == ['']
os.environ['FABRIC8_ANALYTICS_JWT_AUDIENCE'] = ''
assert get_audiences() == ['']
os.environ['FABRIC8_ANALYTICS_JWT_AUDIENCE'] = 'a'
assert get_audiences() == ['a']
os.environ['FABRIC8_ANALYTICS_JWT_AUDIENCE'] = 'a,b'
assert get_audiences() == ['a', 'b']
os.environ['FABRIC8_ANALYTICS_JWT_AUDIENCE'] = 'a,b,'
assert get_audiences() == ['a', 'b', '']
os.environ['FABRIC8_ANALYTICS_JWT_AUDIENCE'] = 'a,b,c'
assert get_audiences() == ['a', 'b', 'c']
if __name__ == '__main__':
test_is_authentication_disabled_negative_test()
test_is_authentication_disabled_positive_test()
test_get_audiences()
``` |
{
"source": "jparten/altimeter",
"score": 2
} |
#### File: resource/ec2/instance.py
```python
from typing import Type
from botocore.client import BaseClient
from altimeter.aws.resource.resource_spec import ListFromAWSResult
from altimeter.aws.resource.ec2 import EC2ResourceSpec
from altimeter.aws.resource.ec2.image import EC2ImageResourceSpec
from altimeter.aws.resource.ec2.security_group import SecurityGroupResourceSpec
from altimeter.aws.resource.ec2.subnet import SubnetResourceSpec
from altimeter.aws.resource.ec2.vpc import VPCResourceSpec
from altimeter.aws.resource.iam.instance_profile import InstanceProfileResourceSpec
from altimeter.core.graph.field.dict_field import AnonymousDictField, AnonymousEmbeddedDictField
from altimeter.core.graph.field.list_field import AnonymousListField
from altimeter.core.graph.field.resource_link_field import (
ResourceLinkField,
TransientResourceLinkField,
)
from altimeter.core.graph.field.scalar_field import ScalarField
from altimeter.core.graph.field.tags_field import TagsField
from altimeter.core.graph.schema import Schema
class EC2InstanceResourceSpec(EC2ResourceSpec):
"""Resource for EC2Instances"""
type_name = "instance"
schema = Schema(
TransientResourceLinkField("ImageId", EC2ImageResourceSpec),
ScalarField("InstanceType"),
ScalarField("LaunchTime"),
AnonymousDictField("State", ScalarField("Name", "state")),
ScalarField("Platform", optional=True),
ScalarField("PrivateIpAddress", optional=True),
ScalarField("PublicIpAddress", optional=True),
ResourceLinkField("VpcId", VPCResourceSpec, optional=True),
ResourceLinkField("SubnetId", SubnetResourceSpec, optional=True),
AnonymousListField(
"SecurityGroups",
AnonymousEmbeddedDictField(ResourceLinkField("GroupId", SecurityGroupResourceSpec)),
),
AnonymousDictField(
"IamInstanceProfile",
TransientResourceLinkField(
"Arn", InstanceProfileResourceSpec, alti_key="instance_profile", value_is_id=True
),
optional=True,
),
TagsField(),
)
@classmethod
def list_from_aws(
cls: Type["EC2InstanceResourceSpec"], client: BaseClient, account_id: str, region: str
) -> ListFromAWSResult:
"""Return a dict of dicts of the format:
{'instance_1_arn': {instance_1_dict},
'instance_2_arn': {instance_2_dict},
...}
Where the dicts represent results from describe_instances."""
paginator = client.get_paginator("describe_instances")
instances = {}
for resp in paginator.paginate():
for reservation in resp.get("Reservations", []):
for instance in reservation.get("Instances", []):
resource_arn = cls.generate_arn(account_id, region, instance["InstanceId"])
instances[resource_arn] = instance
return ListFromAWSResult(resources=instances)
```
#### File: resource/elasticloadbalancing/load_balancer.py
```python
from typing import Type
from botocore.client import BaseClient
from altimeter.aws.resource.resource_spec import ListFromAWSResult
from altimeter.aws.resource.elasticloadbalancing import ElasticLoadBalancingResourceSpec
from altimeter.aws.resource.ec2.security_group import SecurityGroupResourceSpec
from altimeter.aws.resource.ec2.vpc import VPCResourceSpec
from altimeter.aws.resource.ec2.subnet import SubnetResourceSpec
from altimeter.core.graph.field.dict_field import AnonymousDictField, EmbeddedDictField
from altimeter.core.graph.field.list_field import ListField
from altimeter.core.graph.field.resource_link_field import (
EmbeddedResourceLinkField,
ResourceLinkField,
)
from altimeter.core.graph.field.scalar_field import ScalarField
from altimeter.core.graph.schema import Schema
class LoadBalancerResourceSpec(ElasticLoadBalancingResourceSpec):
"""Resource for load balancer"""
type_name = "loadbalancer"
schema = Schema(
ScalarField("DNSName"),
ScalarField("CreatedTime"),
ScalarField("LoadBalancerName"),
ScalarField("Scheme"),
ResourceLinkField("VpcId", VPCResourceSpec, optional=True),
AnonymousDictField("State", ScalarField("Code", alti_key="load_balancer_state")),
ScalarField("Type"),
ListField(
"AvailabilityZones",
EmbeddedDictField(
ScalarField("ZoneName"),
ResourceLinkField("SubnetId", SubnetResourceSpec, optional=True),
ListField(
"LoadBalancerAddresses",
EmbeddedDictField(
ScalarField("IpAddress", optional=True),
ScalarField("AllocationId", optional=True),
),
optional=True,
),
),
),
ListField(
"SecurityGroups", EmbeddedResourceLinkField(SecurityGroupResourceSpec), optional=True
),
ScalarField("IpAddressType"),
)
@classmethod
def list_from_aws(
cls: Type["LoadBalancerResourceSpec"], client: BaseClient, account_id: str, region: str
) -> ListFromAWSResult:
"""Return a dict of dicts of the format:
{'lb_1_arn': {lb_1_dict},
'lb_2_arn': {lb_2_dict},
...}
Where the dicts represent results from describe_load_balancers."""
paginator = client.get_paginator("describe_load_balancers")
load_balancers = {}
for resp in paginator.paginate():
for lb in resp.get("LoadBalancers", []):
resource_arn = lb["LoadBalancerArn"]
load_balancers[resource_arn] = lb
return ListFromAWSResult(resources=load_balancers)
```
#### File: aws/scan/account_scanner.py
```python
from typing import List
from altimeter.core.artifact_io.writer import ArtifactWriter
from altimeter.aws.scan.settings import (
RESOURCE_SPEC_CLASSES,
INFRA_RESOURCE_SPEC_CLASSES,
ORG_RESOURCE_SPEC_CLASSES,
)
from altimeter.aws.settings import GRAPH_NAME, GRAPH_VERSION
from altimeter.aws.scan.base_scanner import BaseScanner, GetSessionType
class AccountScanner(BaseScanner): # pylint: disable=too-few-public-methods
"""An AccountScanner scans a single account using an AccountScanPlan to define scan parameters
and writes the output using an ArtifactWriter.
Args:
account_id: account id to scan
regions: regions to scan
get_session: callable that can get a session in this account_id
artifact_writer: ArtifactWriter for writing out artifacts
scan_sub_accounts: if set to True, if this account is an org master any subaccounts
of that org will also be scanned.
graph_name: name of graph
graph_version: version string for graph
max_svc_threads: max number of scan threads to run concurrently.
"""
def __init__(
self,
account_id: str,
regions: List[str],
get_session: GetSessionType,
artifact_writer: ArtifactWriter,
scan_sub_accounts: bool,
max_svc_threads: int,
graph_name: str = GRAPH_NAME,
graph_version: str = GRAPH_VERSION,
) -> None:
resource_spec_classes = RESOURCE_SPEC_CLASSES + INFRA_RESOURCE_SPEC_CLASSES
if scan_sub_accounts:
resource_spec_classes += ORG_RESOURCE_SPEC_CLASSES
super().__init__(
account_id=account_id,
regions=regions,
get_session=get_session,
artifact_writer=artifact_writer,
max_svc_threads=max_svc_threads,
graph_name=graph_name,
graph_version=graph_version,
resource_spec_classes=resource_spec_classes,
)
```
#### File: aws/scan/scan_manifest.py
```python
from dataclasses import dataclass
from typing import Any, Dict, List
@dataclass(frozen=True)
class ScanManifest:
"""A ScanManifest defines the output of a complete scan. It contains pointers to the
per-account scan result artifacts and summaries of what was scanned, errors which occurred,
scan datetime and api call statistics.
Args:
scanned_accounts: Dict of account_ids to account detail dicts for scanned accounts
master_artifact: artifact containing complete graph json
artifacts: list of artifacts, one per account
errors: Dict of account_ids to list of errors encountered during scan
unscanned_accounts: Dict of account_ids to account detail dicts for unscanned accounts
api_call_stats: api call stats for this scan
start_time: epoch timestamp of scan start time
end_time: epoch timestamp of scan end time
"""
scanned_accounts: List[Dict[str, str]]
master_artifact: str
artifacts: List[str]
errors: Dict[str, List[str]]
unscanned_accounts: List[Dict[str, str]]
api_call_stats: Dict[str, Any]
start_time: int
end_time: int
def to_dict(self) -> Dict[str, Any]:
"""Generate a dict representation of this ScanManifest.
Returns:
dict representation of this ScanManifest
"""
return {
"scanned_accounts": self.scanned_accounts,
"master_artifact": self.master_artifact,
"artifacts": self.artifacts,
"errors": self.errors,
"unscanned_accounts": self.unscanned_accounts,
"start_time": self.start_time,
"end_time": self.end_time,
"api_call_stats": self.api_call_stats,
}
```
#### File: core/awslambda/__init__.py
```python
import os
from typing import Any, Dict
from altimeter.core.awslambda.exceptions import (
RequiredEnvironmentVariableNotPresentException,
RequiredEventVariableNotPresentException,
)
def get_required_lambda_env_var(key: str) -> str:
"""Get a variable from os.environ.
Args:
key: Key to look up in the os environment.
Returns:
String value for the given key
Raises:
RequiredEnvironmentVariableNotPresentException if key is not present.
"""
value = os.environ.get(key)
if value is None:
raise RequiredEnvironmentVariableNotPresentException(f"Missing required env var {key}")
return value
def get_required_lambda_event_var(event: Dict[str, Any], key: str) -> Any:
"""Get a variable from a lambda event dict.
Args:
event: Lambda event dict
key: Key to look up in the event
Returns:
String value for the given key
Raises:
RequiredEventVariableNotPresentException if key is not in event
"""
value = event.get(key)
if value is None:
raise RequiredEventVariableNotPresentException(
f"Missing required event var {key} in {event}"
)
return value
```
#### File: graph/field/base.py
```python
import abc
from typing import Dict, Any, List
from altimeter.core.graph.field.exceptions import (
ParentKeyMissingException,
InvalidParentKeyException,
)
from altimeter.core.graph.link.base import Link
class Field(abc.ABC):
"""Abstract base class for all fields"""
@abc.abstractmethod
def parse(self, data: Any, context: Dict[str, Any]) -> List[Link]:
"""Parse data into a list of Links using this field's definition."""
class SubField(Field):
"""SubFields are fields which must have a non-anonymous parent Field."""
def get_parent_alti_key(self, data: Any, context: Dict[str, Any]) -> str:
"""Get the alti_key of the parent of this SubField.
Args:
data: field data
context: contains auxiliary information which can be passed through the parse process.
Returns:
alti_key of parent of this SubField
"""
parent_alti_key = context.get("parent_alti_key")
if parent_alti_key is None:
raise ParentKeyMissingException(
(
f"Missing parent_alti_key in context for "
f"{self.__class__.__name__} , data: {data}"
)
)
if not isinstance(parent_alti_key, str):
raise InvalidParentKeyException(f"ParentKey {parent_alti_key} is not a str.")
return parent_alti_key
```
#### File: graph/field/list_field.py
```python
from copy import deepcopy
from typing import Dict, Any, List
from altimeter.core.graph.exceptions import (
ListFieldSourceKeyNotFoundException,
ListFieldValueNotAListException,
)
from altimeter.core.graph.field.base import Field, SubField
from altimeter.core.graph.field.scalar_field import SCALAR_TYPES
from altimeter.core.graph.field.util import camel_case_to_snake_case
from altimeter.core.graph.link.base import Link
class ListField(Field):
"""A ListField is a field where the input is a JSON object containing a key (source_key)
where the corresponding value is a list of homogeneous items.
Examples:
A list of strings:
>>> from altimeter.core.graph.field.scalar_field import EmbeddedScalarField
>>> input = {"Animals": ["cow", "pig", "human"]}
>>> field = ListField("Animals", EmbeddedScalarField())
>>> links = field.parse(data=input, context={})
>>> for link in links: print(link.to_dict())
{'pred': 'animals', 'obj': 'cow', 'type': 'simple'}
{'pred': 'animals', 'obj': 'pig', 'type': 'simple'}
{'pred': 'animals', 'obj': 'human', 'type': 'simple'}
A list of dicts:
>>> from altimeter.core.graph.field.dict_field import EmbeddedDictField
>>> from altimeter.core.graph.field.scalar_field import ScalarField
>>> input = {"People": [{"Name": "Bob", "Age": 49}, {"Name": "Sue", "Age": 42}]}
>>> field = ListField("People", EmbeddedDictField(ScalarField("Name"),\
ScalarField("Age")))
>>> links = field.parse(data=input, context={})
>>> for link in links:
... print(link.pred)
... for obj in link.obj:
... print(obj.to_dict())
...
people
{'pred': 'name', 'obj': 'Bob', 'type': 'simple'}
{'pred': 'age', 'obj': 49, 'type': 'simple'}
people
{'pred': 'name', 'obj': 'Sue', 'type': 'simple'}
{'pred': 'age', 'obj': 42, 'type': 'simple'}
Args:
source_key: Name of the key in the input JSON
sub_field: SubField object representing the type that is contained in this list.
alti_key: Optional key name to be used in the graph. By default
this is set to the source key converted to snake case.
optional: Whether this key is optional. Defaults to False.
allow_scalar: Whether this field can sometimes contain a scalar rather
than a list - if this is set to True the scalar will be treated as a
list of one. Defaults to False.
"""
def __init__(
self,
source_key: str,
sub_field: SubField,
alti_key: str = None,
optional: bool = False,
allow_scalar: bool = False,
):
self.source_key = source_key
self.sub_field = sub_field
self.alti_key = alti_key if alti_key else camel_case_to_snake_case(self.source_key)
self.optional = optional
self.allow_scalar = allow_scalar
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: dictionary of data to parse
context: context dict containing data from higher level parsing code.
Returns:
List of Link objects.
Raises:
ListFieldSourceKeyNotFoundException if self.source_key is not in data.
ListFieldValueNotAListException if the data does not appear to represent a list.
"""
if self.source_key not in data:
if self.optional:
return []
raise ListFieldSourceKeyNotFoundException(
f"Expected key '{self.source_key}' in data, present keys: {', '.join(data.keys())}"
)
sub_datas = data.get(self.source_key, [])
if not isinstance(sub_datas, list):
if self.allow_scalar and isinstance(sub_datas, SCALAR_TYPES):
sub_datas = [sub_datas]
else:
raise ListFieldValueNotAListException(
(
f"Key '{self.source_key}' value had unexpected type, value: {sub_datas} "
f"type: {type(sub_datas)}"
)
)
links: List[Link] = []
updated_context = deepcopy(context)
updated_context.update({"parent_alti_key": self.alti_key})
for sub_data in sub_datas:
sub_links = self.sub_field.parse(sub_data, updated_context)
links += sub_links
return links
class AnonymousListField(Field):
"""An AnonymousListField is a ListField where the source_key of the field is discarded
and not used as a name in the resulting graph. See Examples below for more clarity.
Args:
source_key: Name of the key in the input JSON
field: Field object representing the type that is contained in this list.
optional: Whether this key is optional. Defaults to False.
allow_scalar: Whether this field can sometimes contain a scalar rather than a list - if
this is set to True the scalar will be treated as a list of one. Defaults to False.
Examples:
A DictField containing an AnonymousListField:
>>> from altimeter.core.graph.field.dict_field import DictField
>>> from altimeter.core.graph.field.scalar_field import EmbeddedScalarField
>>> input = {"Biota": {"Animals": ["cow", "pig", "human"],\
"Plants": ["tree", "fern"]}}
>>> field = DictField("Biota", AnonymousListField("Animals", EmbeddedScalarField()))
>>> links = field.parse(data=input, context={})
>>> for link in links: print(link.to_dict())
{'pred': 'biota', 'obj': [{'pred': 'biota', 'obj': 'cow', 'type': 'simple'}, {'pred': 'biota', 'obj': 'pig', 'type': 'simple'}, {'pred': 'biota', 'obj': 'human', 'type': 'simple'}], 'type': 'multi'}
"""
def __init__(
self, source_key: str, field: Field, optional: bool = False, allow_scalar: bool = False
):
self.source_key = source_key
self.field = field
self.optional = optional
self.allow_scalar = allow_scalar
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: dictionary of data to parse
context: context dict containing data from higher level parsing code.
Returns:
List of Link objects.
Raises:
ListFieldSourceKeyNotFoundException if self.source_key is not in data.
ListFieldValueNotAListException if the data does not appear to represent a list.
"""
if self.source_key not in data:
if self.optional:
return []
raise ListFieldSourceKeyNotFoundException(
f"Expected key '{self.source_key}' in data, present keys: {', '.join(data.keys())}"
)
sub_datas = data.get(self.source_key, [])
if not isinstance(sub_datas, list):
if self.allow_scalar and isinstance(sub_datas, SCALAR_TYPES):
sub_datas = [sub_datas]
else:
raise ListFieldValueNotAListException(
(
f"Key '{self.source_key}' value had unexpected type, value: {sub_datas} "
f"type: {type(sub_datas)}"
)
)
links: List[Link] = []
for sub_data in sub_datas:
sub_links = self.field.parse(sub_data, context)
links += sub_links
return links
```
#### File: graph/field/resource_link_field.py
```python
from typing import Dict, Any, List, Type, Union
from altimeter.core.graph.field.exceptions import (
ResourceLinkFieldSourceKeyNotFoundException,
ResourceLinkFieldValueNotAStringException,
)
from altimeter.core.graph.field.base import Field, SubField
from altimeter.core.graph.link.links import ResourceLinkLink, TransientResourceLinkLink
from altimeter.core.graph.link.base import Link
from altimeter.core.resource.resource_spec import ResourceSpec
class ResourceLinkField(Field):
"""A ResourceLinkField represents a field containing ids of other top level resources in the
graph. For example, an EC2 instance has a ResourceLinkField with source_key 'VpcId' pointing to
a VPC.
Examples:
A link to a TestResourceSpec resource::
>>> from altimeter.core.resource.resource_spec import ResourceSpec
>>> class TestResourceSpec(ResourceSpec): type_name="thing"
>>> input = {"ThingId": "123"}
>>> field = ResourceLinkField("ThingId", TestResourceSpec)
>>> links = field.parse(data=input, context={})
>>> print([link.to_dict() for link in links])
[{'pred': 'thing', 'obj': 'thing:123', 'type': 'resource_link'}]
A link to a TestResourceSpec resource using value_is_id::
>>> from altimeter.core.resource.resource_spec import ResourceSpec
>>> class TestResourceSpec(ResourceSpec): type_name="thing"
>>> input = {"ThingId": "thing:123"}
>>> field = ResourceLinkField("ThingId", TestResourceSpec, value_is_id=True)
>>> links = field.parse(data=input, context={})
>>> print([link.to_dict() for link in links])
[{'pred': 'thing', 'obj': 'thing:123', 'type': 'resource_link'}]
Args:
source_key: Name of the key in the input JSON
resource_spec_class: The name of the ResourceSpec class or the ResourceSpec class which this
link represents.
alti_key: Optional key name to be used in the graph. By default this is set to the
resource_spec_class' type_name attribute.
optional: Whether this key is optional. Defaults to False.
value_is_id: Whether the value for this key contains the entire resource id. For AWS
resources set this to True if the value is a complete arn.
"""
def __init__(
self,
source_key: str,
resource_spec_class: Union[Type[ResourceSpec], str],
alti_key: str = None,
optional: bool = False,
value_is_id: bool = False,
):
self.source_key = source_key
self._resource_spec_class = resource_spec_class
self.alti_key = alti_key
self.optional = optional
self.value_is_id = value_is_id
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: data to parse
context: contains data from higher level parsing code.
Returns:
List of Link objects. At most one link will be returned.
"""
if isinstance(self._resource_spec_class, str):
resource_spec_class: Type[ResourceSpec] = ResourceSpec.get_by_class_name(
self._resource_spec_class
)
else:
resource_spec_class = self._resource_spec_class
if not self.alti_key:
self.alti_key = resource_spec_class.type_name
short_resource_id = data.get(self.source_key)
if not short_resource_id:
if self.optional:
return []
raise ResourceLinkFieldSourceKeyNotFoundException(
f"Expected key '{self.source_key}' with non-empty/zero value in {data}"
)
if not isinstance(short_resource_id, str):
raise ResourceLinkFieldValueNotAStringException(
(
f"ResourceLinkField for {self.source_key} expected a string but got a "
f"{type(short_resource_id)} : {short_resource_id}"
)
)
if self.value_is_id:
resource_id = short_resource_id
else:
resource_id = resource_spec_class.generate_id(short_resource_id, context)
return [ResourceLinkLink(pred=self.alti_key, obj=resource_id)]
class EmbeddedResourceLinkField(SubField):
"""An EmbeddedResourceLinkField is a ResourceLinkField where the input is the resource id
only, not a key/value where the value is a resource id.
Examples:
A link to a TestResourceSpec resource::
>>> from altimeter.core.graph.field.list_field import ListField
>>> from altimeter.core.resource.resource_spec import ResourceSpec
>>> class TestResourceSpec(ResourceSpec): type_name="thing"
>>> input = {"Thing": ["123", "456"]}
>>> field = ListField("Thing", EmbeddedResourceLinkField(TestResourceSpec))
>>> links = field.parse(data=input, context={})
>>> print([link.to_dict() for link in links])
[{'pred': 'thing', 'obj': 'thing:123', 'type': 'resource_link'}, {'pred': 'thing', 'obj': 'thing:456', 'type': 'resource_link'}]
Args:
resource_spec_class: The name of the ResourceSpec class or the ResourceSpec class which
this link represents.
optional: Whether this key is optional. Defaults to False.
value_is_id: Whether the value for this key contains the entire resource id. For AWS
resources set this to True if the value is a complete arn.
"""
def __init__(
self,
resource_spec_class: Union[Type[ResourceSpec], str],
alti_key: str = None,
optional: bool = False,
value_is_id: bool = False,
):
self._resource_spec_class = resource_spec_class
self.alti_key = alti_key
self.optional = optional
self.value_is_id = value_is_id
def parse(self, data: str, context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: data to parse
context: contains data from higher level parsing code.
Returns:
List of Link objects. At most one link will be returned.
"""
if isinstance(self._resource_spec_class, str):
resource_spec_class: Type[ResourceSpec] = ResourceSpec.get_by_class_name(
self._resource_spec_class
)
else:
resource_spec_class = self._resource_spec_class
if not self.alti_key:
self.alti_key = resource_spec_class.type_name
short_resource_id = data
if self.value_is_id:
resource_id = short_resource_id
else:
resource_id = resource_spec_class.generate_id(short_resource_id, context)
return [ResourceLinkLink(pred=self.alti_key, obj=resource_id)]
class TransientResourceLinkField(Field):
"""Transient Resource Link Fields represent field containing ids of other top level resources in
the graph which may not exist. For example, a Lambda can refer to a VPC however VPCs can be
deleted from lambdas.
Args:
source_key (str): Name of the key in the input JSON
resource_spec_class (str|Type[ResourceSpec]): The name of the ResourceSpec class or the
ResourceSpec class which this link represents.
alti_key (str): Optional key name to be used in the graph. By default
this is set to the resource_spec_class' type_name attribute.
optional (bool): Whether this key is optional. Defaults to False.
value_is_id(bool): Whether the value for this key contains the entire resource id.
For AWS resources set this to True if the value is a complete arn.
Examples:
A link to a TestResourceSpec resource::
>>> from altimeter.core.resource.resource_spec import ResourceSpec
>>> class TestResourceSpec(ResourceSpec): type_name="thing"
>>> input = {"ThingId": "123"}
>>> field = TransientResourceLinkField("ThingId", TestResourceSpec)
>>> links = field.parse(data=input, context={})
>>> print([link.to_dict() for link in links])
[{'pred': 'thing', 'obj': 'thing:123', 'type': 'transient_resource_link'}]
"""
def __init__(
self,
source_key: str,
resource_spec_class: Union[Type[ResourceSpec], str],
alti_key: str = None,
optional: bool = False,
value_is_id: bool = False,
):
self.source_key = source_key
self._resource_spec_class = resource_spec_class
self.alti_key = alti_key
self.optional = optional
self.value_is_id = value_is_id
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this field and return a list of Links.
Args:
data: data to parse
context: contains data from higher level parsing code.
Returns:
List of Link objects. At most one link will be returned.
"""
if isinstance(self._resource_spec_class, str):
resource_spec_class: Type[ResourceSpec] = ResourceSpec.get_by_class_name(
self._resource_spec_class
)
else:
resource_spec_class = self._resource_spec_class
if not self.alti_key:
self.alti_key = resource_spec_class.type_name
short_resource_id = data.get(self.source_key)
if not short_resource_id:
if self.optional:
return []
raise ResourceLinkFieldSourceKeyNotFoundException(
f"Expected key '{self.source_key}' with non-empty/zero value in {data}"
)
if self.value_is_id:
resource_id = short_resource_id
else:
resource_id = resource_spec_class.generate_id(short_resource_id, context)
return [TransientResourceLinkLink(pred=self.alti_key, obj=resource_id)]
```
#### File: core/graph/schema.py
```python
from typing import Any, Dict, List
from altimeter.core.graph.field.base import Field
from altimeter.core.graph.link.base import Link
class Schema:
"""A Schema consists of a list of Fields which define how to parse an arbitrary dictionary
into a list of :class:`altimeter.core.graph.links.Link`.
The schema method performs translation to :class:`altimeter.core.graph.links.Link`.
Args:
fields: fields for this Schema.
"""
def __init__(self, *fields: Field) -> None:
self.fields = fields
def parse(self, data: Dict[str, Any], context: Dict[str, Any]) -> List[Link]:
"""Parse this schema into a list of Links
Args:
data: raw data to parse
context: contains auxiliary information which can be passed through the parse process.
Returns:
A list of :class:`altimeter.core.graph.links.Link` .
"""
links: List[Any] = []
for field in self.fields:
links += field.parse(data, context)
return links
```
#### File: core/neptune/results.py
```python
from collections import Counter
import csv
import io
from typing import Any, Dict, List, Type, Union
class QueryResultSet:
"""Represents the results of a SPARQL query.
Args:
fields: List of field names
values: list of value dicts as returned from neptune's query api.
"""
def __init__(self, fields: List[str], values: List[Dict[str, Any]]):
self.fields = fields
self.values = values
self.length = len(self.values)
@classmethod
def from_sparql_endpoint_json(
cls: Type["QueryResultSet"], resp: Dict[str, Any]
) -> "QueryResultSet":
"""Build a QueryResultSet object from the returned data
of a sparql endpoint json query (has top level field 'head' and
results')
Args:
resp: response dict from neptune's query api
Returns:
QueryResultSet object
"""
fields = resp.get("head", {}).get("vars", [])
values = resp.get("results", {}).get("bindings", [])
return cls(fields, values)
def to_list(self) -> List[Dict[str, Any]]:
"""Create a list of dicts representing these results, each dict
is an individual result row.
Returns:
List of dicts representing this QueryResultSet.
"""
result_list = []
for value in self.values:
result_list.append({k: v["value"] for k, v in value.items()})
return result_list
def to_csv(self) -> str:
"""Create a CSV representation of this QueryResultSet.
Returns:
csv as a str
"""
with io.StringIO() as csv_buf:
writer = csv.DictWriter(csv_buf, fieldnames=self.fields, lineterminator="\n")
writer.writeheader()
for result in self.values:
row = {key: value["value"] for key, value in result.items()}
writer.writerow(row)
csv_buf.seek(0)
return csv_buf.read()
def get_stats(self, field_keys: List[str]) -> Counter:
"""Return a Counter representing statistics about this result set keyed by a user
specified list of field keys (e.g. account_id and account_name)
Args:
field_keys: list of field names to use as stat keys
Returns:
Counter containing result stats.
"""
stats: Counter = Counter()
results = self.to_list()
for result in results:
stat_key_parts = []
for field_key in field_keys:
stat_key_parts.append(result[field_key])
stat_key = "/".join(stat_key_parts)
stats[stat_key] += 1
return stats
@classmethod
def from_dict(cls: Type["QueryResultSet"], data: Dict[str, Any]) -> "QueryResultSet":
fields = data.get("fields")
if fields is None:
raise ValueError(f"{cls.__name__} missing key 'fields': {data}")
values = data.get("values")
if values is None:
raise ValueError(f"{cls.__name__} missing key 'values': {data}")
return cls(fields=fields, values=values)
class QueryResult:
"""Represents the results of a SPARQL query and includes the
graph uris from which results were pulled.
Args:
graph_uris_load_times: Dict with keys which are the graph uris which were used in this
query and values which are the load end times for the graph.
query_result_set: QueryResultSet containing results
"""
def __init__(self, graph_uris_load_times: Dict[str, int], query_result_set: QueryResultSet):
self.graph_uris_load_times = graph_uris_load_times
self.query_result_set = query_result_set
def get_length(self) -> int:
"""Get the length of this result.
Returns:
int length
"""
return self.query_result_set.length
def to_dict(self) -> Dict[str, Union[List[Any], Dict[str, int]]]:
"""Generate a dict representing this QueryResult
Returns:
dict representation of this QueryResult
"""
return {
"graph-uris-load-times": self.graph_uris_load_times,
"results": self.query_result_set.to_list(),
}
def to_list(self) -> List[Dict[str, Any]]:
"""Generate a list representing this QueryResult
Returns:
List of dicts representing this QueryResult
"""
return self.query_result_set.to_list()
def to_csv(self) -> str:
"""Create a CSV representation of this QueryResult.
Returns:
csv as a str
"""
return self.query_result_set.to_csv()
def get_stats(self, field_keys: List[str]) -> Counter:
"""Return a Counter representing statistics about this result set keyed by a user
specified list of field keys (e.g. account_id and account_name)
Args:
field_keys: list of field names to use as stat keys
Returns:
Counter containing result stats.
"""
return self.query_result_set.get_stats(field_keys)
```
#### File: altimeter/bin/graphpruner.py
```python
from datetime import datetime
from altimeter.core.log import LogEvent, Logger
from altimeter.core.awslambda import get_required_lambda_env_var
from altimeter.core.neptune.client import AltimeterNeptuneClient, NeptuneEndpoint, META_GRAPH_NAME
def lambda_handler(event, context):
host = get_required_lambda_env_var("NEPTUNE_HOST")
port = get_required_lambda_env_var("NEPTUNE_PORT")
region = get_required_lambda_env_var("NEPTUNE_REGION")
max_age_min = get_required_lambda_env_var("MAX_AGE_MIN")
graph_name = get_required_lambda_env_var("GRAPH_NAME")
try:
max_age_min = int(max_age_min)
except ValueError as ve:
raise Exception(f"env var MAX_AGE_MIN must be an int: {ve}")
now = int(datetime.now().timestamp())
oldest_acceptable_graph_epoch = now - max_age_min * 60
endpoint = NeptuneEndpoint(host=host, port=port, region=region)
client = AltimeterNeptuneClient(max_age_min=max_age_min, neptune_endpoint=endpoint)
logger = Logger()
uncleared = []
# first prune metadata - if clears below are partial we want to make sure no clients
# consider this a valid graph still.
logger.info(event=LogEvent.PruneNeptuneMetadataGraphStart)
client.clear_old_graph_metadata(name=graph_name, max_age_min=max_age_min)
logger.info(event=LogEvent.PruneNeptuneMetadataGraphEnd)
# now clear actual graphs
with logger.bind(neptune_endpoint=endpoint):
logger.info(event=LogEvent.PruneNeptuneGraphsStart)
for graph_metadata in client.get_graph_metadatas(name=graph_name):
assert graph_metadata.name == graph_name
graph_epoch = graph_metadata.end_time
with logger.bind(graph_uri=graph_metadata.uri, graph_epoch=graph_epoch):
if graph_epoch < oldest_acceptable_graph_epoch:
logger.info(event=LogEvent.PruneNeptuneGraphStart)
try:
client.clear_graph(graph_uri=graph_metadata.uri)
logger.info(event=LogEvent.PruneNeptuneGraphEnd)
except Exception as ex:
logger.error(
event=LogEvent.PruneNeptuneGraphError,
msg=f"Error pruning graph {graph_metadata.uri}: {ex}",
)
uncleared.append(graph_metadata.uri)
continue
else:
logger.info(event=LogEvent.PruneNeptuneGraphSkip)
logger.info(event=LogEvent.PruneNeptuneGraphsEnd)
if uncleared:
msg = f"Errors were found pruning {uncleared}."
logger.error(event=LogEvent.PruneNeptuneGraphsError, msg=msg)
raise Exception(msg)
```
#### File: altimeter/bin/runquery.py
```python
import hashlib
import json
import sys
import time
import boto3
from altimeter.core.awslambda import get_required_lambda_env_var, get_required_lambda_event_var
from altimeter.core.neptune.client import AltimeterNeptuneClient, NeptuneEndpoint
def lambda_handler(event, context):
graph_names_list = get_required_lambda_event_var(event, "graph_names")
if not isinstance(graph_names_list, list):
raise ValueError(f"Value for graph_names should be a list. Is {type(graph_names_list)}")
graph_names = set(graph_names_list)
query = get_required_lambda_event_var(event, "query")
if not isinstance(query, str):
raise ValueError(f"Value for query should be a str. Is {type(query)}")
max_age_min = get_required_lambda_event_var(event, "max_age_min")
if not isinstance(max_age_min, int):
raise ValueError(f"Value for max_age_min should be an int. Is {type(max_age_min)}")
host = get_required_lambda_env_var("NEPTUNE_HOST")
port = get_required_lambda_env_var("NEPTUNE_PORT")
region = get_required_lambda_env_var("NEPTUNE_REGION")
results_bucket = get_required_lambda_env_var("RESULTS_BUCKET")
endpoint = NeptuneEndpoint(host=host, port=port, region=region)
client = AltimeterNeptuneClient(max_age_min=max_age_min, neptune_endpoint=endpoint)
query_result = client.run_query(graph_names=graph_names, query=query)
csv_results = query_result.to_csv()
query_hash = hashlib.sha256(query.encode()).hexdigest()
now_str = str(int(time.time()))
results_key = "/".join(("-".join(graph_names), query_hash, f"{now_str}.csv"))
s3_client = boto3.Session().client("s3")
s3_client.put_object(Bucket=results_bucket, Key=results_key, Body=csv_results)
return {
"results_bucket": results_bucket,
"results_key": results_key,
"num_results": query_result.get_length(),
}
def get_runquery_lambda_name():
runquery_lambda_name_prefix = "ITCloudGraph-RunQuery-"
lambda_client = boto3.client("lambda")
paginator = lambda_client.get_paginator("list_functions")
for resp in paginator.paginate():
for func in resp["Functions"]:
if func["FunctionName"].startswith(runquery_lambda_name_prefix):
return func["FunctionName"]
raise ValueError(
(
f"Unable to find a runquery lambda with name starting with "
f"{runquery_lambda_name_prefix}"
)
)
def main(argv=None):
import argparse
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("query_file", type=str)
parser.add_argument("--graph_names", type=str, default=["alti"], nargs="+")
parser.add_argument("--max_age_min", type=int, default=1440)
args_ns = parser.parse_args(argv)
with open(args_ns.query_file, "r") as query_fp:
query = query_fp.read()
runquery_lambda_name = get_runquery_lambda_name()
payload = {
"graph_names": args_ns.graph_names,
"max_age_min": args_ns.max_age_min,
"query": query,
}
payload_bytes = json.dumps(payload).encode("utf-8")
lambda_client = boto3.client("lambda")
invoke_lambda_resp = lambda_client.invoke(
FunctionName=runquery_lambda_name, Payload=payload_bytes
)
lambda_resp_bytes = invoke_lambda_resp["Payload"].read()
lambda_resp_str = lambda_resp_bytes.decode("utf-8")
lambda_resp = json.loads(lambda_resp_str)
if "errorMessage" in lambda_resp:
print("Error running query:")
print(lambda_resp["errorMessage"])
sys.exit(1)
results_bucket = lambda_resp["results_bucket"]
results_key = lambda_resp["results_key"]
s3_client = boto3.client("s3")
s3_resp = s3_client.get_object(Bucket=results_bucket, Key=results_key)
results_bytes = s3_resp["Body"].read()
results_str = results_bytes.decode("utf-8")
print(results_str)
if __name__ == "__main__":
sys.exit(main())
```
#### File: altimeter/bin/scan_resource.py
```python
import json
import sys
from typing import Type
import boto3
from altimeter.aws.resource.resource_spec import AWSResourceSpec
from altimeter.core.json_encoder import json_encoder
from altimeter.aws.scan.aws_accessor import AWSAccessor
from altimeter.aws.scan.settings import RESOURCE_SPEC_CLASSES
def main(argv=None):
import argparse
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
"resource_spec_class",
type=str,
help="Name of class in altimeter.aws.scan.settings.RESOURCE_SPEC_CLASSES to scan",
)
parser.add_argument("region", type=str, help="AWS region name to scan")
args_ns = parser.parse_args(argv)
resource_spec_class_name = args_ns.resource_spec_class
region = args_ns.region
resource_spec_class: Type[AWSResourceSpec] = None
for cls in RESOURCE_SPEC_CLASSES:
if cls.__name__ == resource_spec_class_name:
resource_spec_class = cls
if resource_spec_class is None:
print(
(
f"Unable to find a class named {resource_spec_class_name} in "
f"altimeter.aws.scan.settings.RESOURCE_SPEC_CLASSES: {RESOURCE_SPEC_CLASSES}."
)
)
sys.exit(1)
session = boto3.Session(region_name=region)
sts_client = session.client("sts")
account_id = sts_client.get_caller_identity()["Account"]
aws_accessor = AWSAccessor(session=session, account_id=account_id, region_name=region)
resource_scan_result = resource_spec_class.scan(aws_accessor)
resource_scan_result_dict = resource_scan_result.to_dict()
resource_scan_result_json = json.dumps(
resource_scan_result_dict, indent=2, default=json_encoder
)
print(resource_scan_result_json)
if __name__ == "__main__":
sys.exit(main())
```
#### File: resource/awslambda/test_function.py
```python
from unittest import TestCase
import boto3
from moto import mock_ec2, mock_lambda
from altimeter.aws.resource.awslambda.function import LambdaFunctionResourceSpec
from altimeter.aws.scan.aws_accessor import AWSAccessor
class TestLambdaFunctionResourceSpec(TestCase):
@mock_lambda
@mock_ec2
def test_scan(self):
account_id = "123456789012"
region_name = "us-east-1"
session = boto3.Session()
lambda_client = session.client("lambda", region_name=region_name)
lambda_client.create_function(
FunctionName="func_name",
Runtime="python3.7",
Role="testrole",
Handler="testhandler",
Description="testdescr",
Timeout=90,
MemorySize=128,
Code={"ZipFile": b"1234"},
Publish=False,
VpcConfig={"SubnetIds": ["subnet-123"], "SecurityGroupIds": ["sg-123"]},
DeadLetterConfig={"TargetArn": "test_dl_config"},
Environment={"Variables": {"TEST_VAR": "test_val"}},
KMSKeyArn="test_kms_arn",
TracingConfig={"Mode": "Active"},
Tags={"tagkey1": "tagval1", "tagkey2": "tagval2"},
Layers=["test_layer1"],
)
scan_accessor = AWSAccessor(session=session, account_id=account_id, region_name=region_name)
scan_result = LambdaFunctionResourceSpec.scan(scan_accessor=scan_accessor)
scan_result_dict = scan_result.to_dict()
self.maxDiff=None
expected_scan_result_dict = {
"resources": [
{
"type": "aws:lambda:function",
"links": [
{"pred": "function_name", "obj": "func_name", "type": "simple"},
{
"pred": "runtime",
"obj": "python3.7",
"type": "simple",
},
{
"pred": "vpc",
"obj": f"arn:aws:ec2:{region_name}:{account_id}:vpc/vpc-123abc",
"type": "transient_resource_link",
},
{
"pred": "account",
"obj": f"arn:aws::::account/{account_id}",
"type": "resource_link",
},
{
"pred": "region",
"obj": f"arn:aws:::{account_id}:region/{region_name}",
"type": "resource_link",
},
],
}
],
"stats": {
"count": 1,
account_id: {
"count": 1,
region_name: {
"count": 1,
"lambda": {"count": 1, "ListFunctions": {"count": 1}},
},
},
},
"errors": [],
}
self.assertDictEqual(scan_result_dict, expected_scan_result_dict)
```
#### File: resource/ec2/test_ec2_route_table.py
```python
import unittest
from altimeter.core.resource.resource import Resource
from altimeter.aws.resource.ec2.route_table import EC2RouteTableResourceSpec
class TestRouteTableSchema(unittest.TestCase):
def test_schema_parse(self):
resource_arn = "arn:aws:ec2:us-east-2:111122223333:route-table/rtb-099c7b032f2bbddda"
aws_resource_dict = {
"Associations": [
{
"Main": False,
"RouteTableAssociationId": "rtbassoc-069d59127bf10a728",
"RouteTableId": "rtb-099c7b032f2bbddda",
"SubnetId": "subnet-00f9fe55b9d7ca4fb",
},
{
"Main": False,
"RouteTableAssociationId": "rtbassoc-07bfd170c4ece33c8",
"RouteTableId": "rtb-099c7b032f2bbddda",
"SubnetId": "subnet-0b98092b454c882cf",
},
],
"PropagatingVgws": [],
"RouteTableId": "rtb-099c7b032f2bbddda",
"Routes": [
{
"DestinationCidrBlock": "172.31.0.0/16",
"GatewayId": "local",
"Origin": "CreateRouteTable",
"State": "active",
},
{
"DestinationCidrBlock": "0.0.0.0/0",
"GatewayId": "igw-092e5ec1685fd0c0b",
"Origin": "CreateRoute",
"State": "active",
},
{
"DestinationPrefixListId": "pl-68a54001",
"GatewayId": "vpce-0678bce2b63b8ad0f",
"Origin": "CreateRoute",
"State": "active",
},
],
"VpcId": "vpc-03c33051f57d21ff0",
"OwnerId": "210554966933",
}
links = EC2RouteTableResourceSpec.schema.parse(
data=aws_resource_dict, context={"account_id": "111122223333", "region": "us-west-2"}
)
resource = Resource(
resource_id=resource_arn, type_name=EC2RouteTableResourceSpec.type_name, links=links
)
alti_resource_dict = resource.to_dict()
expected_alti_resource_dict = {
"type": "route-table",
"links": [
{"pred": "route_table_id", "obj": "rtb-099c7b032f2bbddda", "type": "simple"},
{
"pred": "vpc",
"obj": "arn:aws:ec2:us-west-2:111122223333:vpc/vpc-03c33051f57d21ff0",
"type": "resource_link",
},
{"pred": "owner_id", "obj": "210554966933", "type": "simple"},
{
"pred": "route",
"obj": [
{
"pred": "destination_cidr_block",
"obj": "172.31.0.0/16",
"type": "simple",
},
{"pred": "gateway_id", "obj": "local", "type": "simple"},
{"pred": "origin", "obj": "CreateRouteTable", "type": "simple"},
{"pred": "state", "obj": "active", "type": "simple"},
],
"type": "multi",
},
{
"pred": "route",
"obj": [
{"pred": "destination_cidr_block", "obj": "0.0.0.0/0", "type": "simple"},
{"pred": "gateway_id", "obj": "igw-092e5ec1685fd0c0b", "type": "simple"},
{"pred": "origin", "obj": "CreateRoute", "type": "simple"},
{"pred": "state", "obj": "active", "type": "simple"},
],
"type": "multi",
},
{
"pred": "route",
"obj": [
{
"pred": "destination_prefix_list_id",
"obj": "pl-68a54001",
"type": "simple",
},
{"pred": "gateway_id", "obj": "vpce-0678bce2b63b8ad0f", "type": "simple"},
{"pred": "origin", "obj": "CreateRoute", "type": "simple"},
{"pred": "state", "obj": "active", "type": "simple"},
],
"type": "multi",
},
{
"pred": "association",
"obj": [
{"pred": "main", "obj": False, "type": "simple"},
{
"pred": "route_table_association_id",
"obj": "rtbassoc-069d59127bf10a728",
"type": "simple",
},
{
"pred": "route_table_id",
"obj": "rtb-099c7b032f2bbddda",
"type": "simple",
},
{"pred": "subnet_id", "obj": "subnet-00f9fe55b9d7ca4fb", "type": "simple"},
],
"type": "multi",
},
{
"pred": "association",
"obj": [
{"pred": "main", "obj": False, "type": "simple"},
{
"pred": "route_table_association_id",
"obj": "rtbassoc-07bfd170c4ece33c8",
"type": "simple",
},
{
"pred": "route_table_id",
"obj": "rtb-099c7b032f2bbddda",
"type": "simple",
},
{"pred": "subnet_id", "obj": "subnet-0b98092b454c882cf", "type": "simple"},
],
"type": "multi",
},
],
}
self.assertDictEqual(alti_resource_dict, expected_alti_resource_dict)
```
#### File: core/neptune/test_sparql.py
```python
from unittest import TestCase
from altimeter.core.neptune.sparql import finalize_query, InvalidQueryException
class TestFinalizeQuerySingleGraph(TestCase):
def test_empty_query(self):
query = ""
graph_uris = ["http://graph/1"]
with self.assertRaises(InvalidQueryException):
finalize_query(query=query, graph_uris=graph_uris)
def test_one_line_query(self):
query = "select ?s ?p ?o where {?s ?p ?o}"
graph_uris = ["http://graph/1"]
expected_finalized_query = "select ?s ?p ?o FROM <http://graph/1> where {?s ?p ?o}"
finalized_query = finalize_query(query=query, graph_uris=graph_uris)
self.assertEqual(finalized_query, expected_finalized_query)
def test_one_line_query_with_trailing_comment(self):
query = "select ?s ?p ?o where {?s ?p ?o} # hi"
graph_uris = ["http://graph/1"]
expected_finalized_query = "select ?s ?p ?o FROM <http://graph/1> where {?s ?p ?o} # hi"
finalized_query = finalize_query(query=query, graph_uris=graph_uris)
self.assertEqual(finalized_query, expected_finalized_query)
def test_one_line_query_with_limit(self):
query = "select ?s ?p ?o where {?s ?p ?o} limit 100"
graph_uris = ["http://graph/1"]
expected_finalized_query = "select ?s ?p ?o FROM <http://graph/1> where {?s ?p ?o} limit 100"
finalized_query = finalize_query(query=query, graph_uris=graph_uris)
self.assertEqual(finalized_query, expected_finalized_query)
def test_one_line_query_with_trailing(self):
query = "select ?s ?p ?o where {?s ?p ?o} # ignore me please select where {"
graph_uris = ["http://graph/1"]
expected_finalized_query = "select ?s ?p ?o FROM <http://graph/1> where {?s ?p ?o} # ignore me please select where {"
finalized_query = finalize_query(query=query, graph_uris=graph_uris)
self.assertEqual(finalized_query, expected_finalized_query)
def test_one_line_query_missing_where(self):
query = "select ?s ?p ?o {?s ?p ?o}"
graph_uris = ["http://graph/1"]
with self.assertRaises(InvalidQueryException):
finalize_query(query=query, graph_uris=graph_uris)
def test_one_line_query_missing_where_due_to_comment(self):
query = "select ?s ?p ?o # where {?s ?p ?o}"
graph_uris = ["http://graph/1"]
with self.assertRaises(InvalidQueryException):
finalize_query(query=query, graph_uris=graph_uris)
def test_one_line_query_missing_where_due_to_leading_comment(self):
query = "#select ?s ?p ?o where {?s ?p ?o}"
graph_uris = ["http://graph/1"]
with self.assertRaises(InvalidQueryException):
finalize_query(query=query, graph_uris=graph_uris)
def test_one_line_query_full_rdf_type_syntax(self):
query = "select ?s ?p ?o where { ?s ?p ?o; <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <my:type> }"
graph_uris = ["http://graph/1"]
expected_finalized_query = "select ?s ?p ?o FROM <http://graph/1> where { ?s ?p ?o; <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <my:type> }"
finalized_query = finalize_query(query=query, graph_uris=graph_uris)
self.assertEqual(finalized_query, expected_finalized_query)
def test_multi_line_query(self):
query = "select ?s ?p ?o\nwhere {?s ?p ?o}"
graph_uris = ["http://graph/1"]
expected_finalized_query = "select ?s ?p ?o\nFROM <http://graph/1> where {?s ?p ?o}"
finalized_query = finalize_query(query=query, graph_uris=graph_uris)
self.assertEqual(finalized_query, expected_finalized_query)
def test_multi_line_query_with_trailing_comments(self):
query = "select ?s ?p ?o # this is the select\nwhere {?s ?p ?o} # this is the where clause"
graph_uris = ["http://graph/1"]
expected_finalized_query = "select ?s ?p ?o # this is the select\nFROM <http://graph/1> where {?s ?p ?o} # this is the where clause"
finalized_query = finalize_query(query=query, graph_uris=graph_uris)
self.assertEqual(finalized_query, expected_finalized_query)
def test_multi_line_query_missing_where(self):
query = "select ?s ?p ?o\n{?s ?p ?o}"
graph_uris = ["http://graph/1"]
with self.assertRaises(InvalidQueryException):
finalize_query(query=query, graph_uris=graph_uris)
def test_multi_line_query_with_comments(self):
query = "# select ?s ?p ?o where { ?s ?p ?o }\n# that was an old version\nselect ?s ?p ?o # where { not this where\n# or this where {\nwhere # this where\n{ ?s # sub\n?p # pred\n?o\nobj\n} # bye\n# test\n# select ?s ?p ?o where { ?s ?p ?o } # limit 100\n limit 1000 #real limit"
graph_uris = ["http://graph/1", "http://graph/2"]
expected_finalized_query = "# select ?s ?p ?o where { ?s ?p ?o }\n# that was an old version\nselect ?s ?p ?o # where { not this where\n# or this where {\nFROM <http://graph/1> FROM <http://graph/2> where # this where\n{ ?s # sub\n?p # pred\n?o\nobj\n} # bye\n# test\n# select ?s ?p ?o where { ?s ?p ?o } # limit 100\n limit 1000 #real limit"
finalized_query = finalize_query(query=query, graph_uris=graph_uris)
self.assertEqual(finalized_query, expected_finalized_query)
```
#### File: altimeter/core/test_multilevel_counter.py
```python
from unittest import TestCase
from altimeter.core.multilevel_counter import MultilevelCounter
class TestMultiLevelCounter(TestCase):
def test_increment(self):
ml_counter = MultilevelCounter()
ml_counter.increment("foo", "boo", "goo")
expected_data = {"count": 1, "foo": {"count": 1, "boo": {"count": 1, "goo": {"count": 1}}}}
self.assertDictEqual(expected_data, ml_counter.to_dict())
def test_merge_updates_self(self):
ml_counter_self = MultilevelCounter()
ml_counter_self.increment("foo", "boo", "goo")
ml_counter_other = MultilevelCounter()
ml_counter_other.increment("boo", "goo", "moo")
ml_counter_self.merge(ml_counter_other)
expected_data = {
"count": 2,
"foo": {"count": 1, "boo": {"count": 1, "goo": {"count": 1}}},
"boo": {"count": 1, "goo": {"count": 1, "moo": {"count": 1}}},
}
self.assertDictEqual(expected_data, ml_counter_self.to_dict())
def test_merge_does_not_update_other(self):
ml_counter_self = MultilevelCounter()
ml_counter_self.increment("foo", "boo", "goo")
ml_counter_other = MultilevelCounter()
ml_counter_other.increment("boo", "goo", "moo")
ml_counter_self.merge(ml_counter_other)
expected_data = {"count": 1, "boo": {"count": 1, "goo": {"count": 1, "moo": {"count": 1}}}}
self.assertDictEqual(expected_data, ml_counter_other.to_dict())
def test_from_dict(self):
data = {
"count": 2,
"foo": {"count": 1, "boo": {"count": 1, "goo": {"count": 1}}},
"boo": {"count": 1, "goo": {"count": 1, "moo": {"count": 1}}},
}
ml_counter = MultilevelCounter.from_dict(data)
self.assertDictEqual(ml_counter.to_dict(), data)
``` |
{
"source": "jpartogi/django-job-board",
"score": 2
} |
#### File: job_board/templatetags/tag_list.py
```python
from django.template import Library, Node, Variable, VariableDoesNotExist
from django.core.urlresolvers import reverse
from job_board.views import job_list_by_tag
register = Library()
def do_populate_tags(parser,token):
"""
render a list of tags, with it's link.
the token is tag.
Arguments:
- `parser`:
- `token`:
"""
bits = token.split_contents()
print bits
return PopulateTagsNode(bits[1])
class PopulateTagsNode(Node):
def __init__(self,tag):
self.tag_tag = Variable(tag)
def render(self,context):
try:
_tag = self.tag_tag.resolve(context)
_font_size = _tag.font_size + 10
_font_weight = min(900,(300 + (_tag.font_size*100)))
_url = reverse(job_list_by_tag, kwargs = {'tag_name' : _tag.name } )
return "<span style='font-size:%spx;font-weight:%s'><a href='%s'>%s</a></span>" % (_font_size,_font_weight,_url,_tag.name)
except VariableDoesNotExist:
return ''
register.tag('populate_tag', do_populate_tags)
```
#### File: django-job-board/job_board/views.py
```python
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template.loader import select_template
from django.contrib.formtools.preview import FormPreview
from django.views.generic import list_detail
from commons.search import get_query
from job_board.models import *
from job_board.forms import *
from job_board.signals import view_job
queryset = Job.objects.filter_date()
template_object_name = 'job'
paginate_by = 10
job_list_template = (
"job_board/list.html",
"job_board/job_list.html",
)
def job_list(request):
template = select_template(job_list_template) # returns Template object
template_name = template.name
return list_detail.object_list(request, queryset,
paginate_by = paginate_by,
template_name = template_name,
template_object_name = template_object_name)
def job_search(request):
query_string = ''
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
query = get_query(query_string, ['title', 'location', 'description', 'company_name', 'website'])
queryset = Job.objects.filter_date().filter(query).order_by('-posted')
template = select_template(job_list_template) # returns Template object
template_name = template.name
return list_detail.object_list(request, queryset,
paginate_by = paginate_by,
template_name = template_name,
template_object_name = template_object_name)
def job_detail(request, slug=None, object_id=None):
job_detail_template = (
"job_board/view.html",
"job_board/detail.html",
"job_board/job_detail.html",
)
template = select_template(job_detail_template)
template_name = template.name
job = Job.objects.get(pk=object_id)
view_job.send(sender=job_detail, job=job)
return list_detail.object_detail(request, queryset,
object_id = object_id,
slug = slug,
template_name = template_name,
template_object_name = template_object_name)
class JobFormPreview(FormPreview):
preview_template = 'job_board/preview.html'
form_template = 'job_board/form.html'
def done(self, request, cleaned_data):
form = JobForm(request.POST)
job = form.save()
message = """Your job posting has been saved successfully. Thank you very much.
"""
request.notifications.create(message, 'success')
params = {'slug': job.slug, 'object_id': job.id}
return HttpResponseRedirect(reverse('job-detail', kwargs=params))
``` |
{
"source": "jparyani/mediagoblin",
"score": 2
} |
#### File: plugins/sandstorm/views.py
```python
from mediagoblin import mg_globals, messages
from mediagoblin.auth.tools import register_user, create_basic_user
from mediagoblin.db.models import User, Privilege
from mediagoblin.decorators import allow_registration, auth_enabled
from mediagoblin.tools.translate import pass_to_ugettext as _
from mediagoblin.tools.response import redirect, render_to_response
from mediagoblin.plugins.sandstorm.models import SandstormUser
from random import getrandbits
import urllib
@auth_enabled
def login(request):
login_failed = False
username = request.headers.get('X-Sandstorm-Username', None)
user_id = request.headers.get('X-Sandstorm-User-Id', None)
permissions = request.headers.get('X-Sandstorm-Permissions', None)
if username != None:
username = urllib.unquote(username)
if permissions != None:
permissions = urllib.unquote(permissions)
default_privileges = None
if username and user_id:
suser = SandstormUser.query.filter_by(sandstorm_user_id=user_id).first()
if not suser:
if not mg_globals.app.auth:
messages.add_message(
request,
messages.WARNING,
_('Sorry, authentication is disabled on this '
'instance.'))
return redirect(request, 'index')
while User.query.filter_by(username=username).count() > 0:
username += '2'
user = User()
user.username = username
user.email = ''
user.pw_hash = unicode(getrandbits(192))
default_privileges = [
Privilege.query.filter(Privilege.privilege_name==u'commenter').first(),
Privilege.query.filter(Privilege.privilege_name==u'reporter').first(),
Privilege.query.filter(Privilege.privilege_name==u'active').first()]
else:
user = suser.user
if 'admin' in permissions.split(','):
default_privileges = [
Privilege.query.filter(Privilege.privilege_name==u'commenter').first(),
Privilege.query.filter(Privilege.privilege_name==u'reporter').first(),
Privilege.query.filter(Privilege.privilege_name==u'active').first(),
Privilege.query.filter(Privilege.privilege_name==u'admin').first(),
Privilege.query.filter(Privilege.privilege_name==u'moderator').first(),
Privilege.query.filter(Privilege.privilege_name==u'uploader').first()]
if default_privileges:
user.all_privileges += default_privileges
user.save()
if not suser:
suser = SandstormUser()
suser.user_id = user.id
suser.sandstorm_user_id = user_id
suser.save()
request.session['user_id'] = unicode(user.id)
request.session.save()
if request.form.get('next'):
return redirect(request, location=request.form['next'])
else:
return redirect(request, "index")
@allow_registration
@auth_enabled
def register(request):
return redirect(
request,
'mediagoblin.plugins.sandstorm.login')
``` |
{
"source": "JParzival/mediafier",
"score": 4
} |
#### File: mediafier/image/cropping.py
```python
import cv2
from ..utils.utils import intdetector, str2bool
from ..image.size import addBorders
def crop(img, x, y, w, h, fill=False):
"""
This function retrieves the image sent by the user cropped with the coordinates that the user inputs.
Args:
img (:obj: array, mandatory):
Image to crop
x (:obj: int, mandatory):
X coordinate of the closest point to the 0,0
y (:obj: int, mandatory):
Y coordinate of the closest point to the 0,0
w (:obj: int, mandatory):
Width of the crop in pixels
h (:obj: int, mandatory):
Height of the crop in pixels
fill (:obj: bool, optional):
If the image dimensions should be mantained filling the borders
Returns:
:obj: array:
The resulting object is the image, in the same format as inputted, but with the transformation applied.
Raises:
ValueError: Raised if any of the values of the size is not positive.
ArgumentTypeError: Raised if any of the values of the is not an int or the fill is not a bool.
"""
for n in [x, y, w, h]:
intdetector(n)
if n < 0:
raise ValueError("All values must be positive")
str2bool(fill)
if fill == False:
return img[y:y+h, x:x+w]
else:
org_w, org_h = img.shape[0], img.shape[1]
img = img[y:y+h, x:x+w]
return addBorders(img, y, org_h-(y+h), x, org_w-(x+w), 'constant', 'black')
def slice(img, rows=2, cols=2):
"""
This function retrieves the image sent by the user sliced in several images, depending on the rows and cols that the user inputs.
Args:
img (:obj: array, mandatory):
Image to slice.
rows (:obj: int, mandatory):
Number of rows that will slice the image.
Default is 2.
cols (:obj: int, mandatory):
Number of columns that will slice the image.
Default is 2.
Returns:
:obj: array[array]:
The resulting object an array with the images. They will be ordered by from the up-left one to the bottom-right one,
following a first row - later column fashion.
Raises:
ValueError: Raised if any of the values of the size is not positive and over zero.
ArgumentTypeError: Raised if any of the values of the is not an int or the fill is not a bool.
"""
for n in [rows, cols]:
intdetector(n)
if n <= 0:
raise ValueError("All values must be positive and over zero")
if rows == 1 and cols == 1:
return [img]
height, width = img.shape[0], img.shape[1]
height_chunk = height/rows
width_chunk = width/cols
sliced = []
w, h = 0, 0
while h < height:
while w < width:
sliced.append(crop(img, int(w), int(h), int(width_chunk), int(height_chunk), False))
w += width_chunk
w = 0
h += height_chunk
return sliced
```
#### File: mediafier/image/draw.py
```python
import cv2
from ..utils.utils import intdetector, str2bool, stringdetector
def drawBBox(img, x, y, w, h, color='black', thickness=2):
"""
This function retrieves an image with the bounding box that the user inputs painted.
Args:
img (:obj: array, mandatory):
Image to flip.
x (:obj: int, mandatory):
X point of the first point (uppermost and leftmost)
y (:obj: int, mandatory):
Y point of the first point (uppermost and leftmost)
w (:obj: int, mandatory):
Width of the bounding box
h (:obj: int, mandatory):
Height of the bounding box
color (:obj: str, optional):
Color that the bounding box will have.
Options are:
- black
- white
- red
- blue
Defaults to black
thickness (:obj: int, optional):
Thickness that the bounding box will have.
Defaults to 2.
Returns:
:obj: array:
The resulting object is the image, in the same format as inputted, but with the transformation applied.
Raises:
ValueError: Raised if any of the values is not inputted properly.
ArgumentTypeError: Raised if any of the values does not have the proper type.
"""
def _checks(x, y, w, h, color, thickness):
stringdetector(color)
color = color.lower()
if color not in ['black', 'white', 'red', 'blue']:
raise ValueError("Supported colors are 'black', 'white', 'red' and 'blue'")
intdetector(x)
intdetector(y)
intdetector(w)
intdetector(h)
if x < 0 or y < 0 or w < 0 or h < 0:
raise ValueError("Values must be positive!")
intdetector(thickness)
if thickness <= 0:
raise ValueError("Thickness must be over zero!")
return color
def _colorChoice(color):
if color == 'black':
return (0, 0, 0)
elif color == 'white':
return (255, 255, 255)
elif color == 'red':
return (0, 0, 255)
elif color == 'blue':
return (255, 0, 0)
color = _checks(x, y, w, h, color, thickness)
color = _colorChoice(color)
return cv2.rectangle(img, (x, y), (x+w, y+h), color, thickness)
```
#### File: mediafier/image/modifications.py
```python
import cv2
from ..utils.utils import intdetector, stringdetector, str2bool
from .size import resize
def blur(img, method='default', value='medium'):
"""
This function retrieves an image with the amount of blur that the user inputs.
Args:
img (:obj: array, mandatory):
Image to modify.
method (:obj: str, optional):
Method that will be applied to make the transformation.
Possible values are:
- default
- gaussian
Defaults to 'default'
value (:obj: str, optional):
Amount of blurriness that will be applied to the image.
Possible values are:
- extreme
- high
- medium
- low
Defaults to 'medium'
Returns:
:obj: array:
The resulting object is the image, in the same format as inputted, but with the transformation applied.
Raises:
ValueError: Raised if any of the values is not inputted properly.
ArgumentTypeError: Raised if any of the values does not have the proper type.
"""
def _checks(img, method, value):
stringdetector(method)
method = method.lower()
if method not in ['default', 'gaussian']:
raise ValueError("Method parameter must be 'default' or 'gaussian'")
stringdetector(value)
value = value.lower()
if value not in ['extreme', 'high', 'medium', 'low']:
raise ValueError("Value must be 'extreme', 'high', 'medium' or 'low'")
return method, value
def _valuechoice(method, value):
if method == 'default':
if value == 'extreme':
return (75,75)
elif value == 'high':
return (50, 50)
elif value == 'medium':
return (35, 35)
else:
return (10, 10)
else: # In gaussian, return odd values
if value == 'extreme':
return (149, 149)
elif value == 'high':
return (99, 99)
elif value == 'medium':
return (49, 49)
else:
return (25, 25)
method, value = _checks(img, method, value)
if method == 'default':
return cv2.blur(img, _valuechoice(method, value))
else:
return cv2.GaussianBlur(img, _valuechoice(method, value), 0, 0)
def pixelate(img, value='medium'):
"""
This function retrieves an image pixelated.
Args:
img (:obj: array, mandatory):
Image to modify.
value (:obj: str, optional):
Amount of pixelation that will be applied to the image.
Possible values are:
- extreme
- high
- medium
- low
Defaults to 'medium'
Returns:
:obj: array:
The resulting object is the image, in the same format as inputted, but with the transformation applied.
Raises:
ValueError: Raised if any of the values is not inputted properly.
ArgumentTypeError: Raised if any of the values does not have the proper type.
"""
def _valuechoice(value, w, h):
if value == 'extreme':
return (int(w/30), int(h/30))
elif value == 'high':
return (int(w/20), int(h/20))
elif value == 'medium':
return (int(w/15), int(h/15))
else:
return (int(w/10), int(h/10))
def _checks(value):
stringdetector(value)
value = value.lower()
if value not in ['extreme', 'high', 'medium', 'low']:
raise ValueError("Value must be 'extreme', 'high', 'medium' or 'low'")
return value
value = _checks(value)
w, h = img.shape[0], img.shape[0]
value = _valuechoice(value, w, h)
tmp_img = resize(img, size=value, interpolation='linear')
return resize(tmp_img, size=(w, h), interpolation='nearest')
```
#### File: mediafier/tests/test_video_common.py
```python
import cv2
import os
from mediafier.video.common import extractFrames, modifyFps, crop
SRC_IMG_DIR = os.path.join('test_media', 'video_src_test')
SAVE_IMG_DIR = os.path.join('test_media', 'video_result_test', 'common')
if not os.path.exists(SAVE_IMG_DIR):
os.makedirs(SAVE_IMG_DIR)
def test_video_common_extractFrames():
"""
Not with the other tests fashion, cause it would be much more work to make it that fashion for no benefit
"""
video = os.path.join(SRC_IMG_DIR, 'test2.mp4')
"""Extract in memory one of every 20"""
frames = extractFrames(video, every=20)
i = 0
savepath = os.path.join(SAVE_IMG_DIR, 'test1')
if not os.path.exists(savepath):
os.makedirs(savepath)
for frame in frames:
cv2.imwrite(os.path.join(savepath, f'frame_{i}.png'), frame)
i += 1
"""Extract in disk in png"""
savepath = os.path.join(SAVE_IMG_DIR, 'test2')
if not os.path.exists(savepath):
os.makedirs(savepath)
ok = extractFrames(video, save='disk', savePath=savepath)
"""Extract in disk in jpg"""
savepath = os.path.join(SAVE_IMG_DIR, 'test3')
if not os.path.exists(savepath):
os.makedirs(savepath)
ok = extractFrames(video, save='disk', savePath=savepath, format='jpg')
"""Extract in disk in jpg every 30"""
savepath = os.path.join(SAVE_IMG_DIR, 'test4')
if not os.path.exists(savepath):
os.makedirs(savepath)
ok = extractFrames(video, save='disk', savePath=savepath, format='jpg', every=30)
"""Extract in disk every 30 resizing the jpg"""
savepath = os.path.join(SAVE_IMG_DIR, 'test5')
if not os.path.exists(savepath):
os.makedirs(savepath)
ok = extractFrames(video, save='disk', savePath=savepath, format='jpg', every=30, resizeImg=True, newWidth=200, newHeight=200)
"""Failure example"""
#video = os.path.join(SRC_IMG_DIR, 'test.mp4')
#extractFrames(video, 'nowhere')
#extractFrames(video, 'disk')
#extractFrames(video, every=0)
#extractFrames(video, format='mine')
def test_video_common_modifyFps():
video = os.path.join(SRC_IMG_DIR, 'test2.mp4')
params = [
{
'videoPath': video,
'newFps': 10,
'output': os.path.join(SAVE_IMG_DIR, "modifyFps_10.avi")
},
{
'videoPath': video,
'newFps': 60,
'output': os.path.join(SAVE_IMG_DIR, "modifyFps_60.avi")
},
{
'videoPath': video,
'newFps': 120,
'output': os.path.join(SAVE_IMG_DIR, "modifyFps_120.avi")
},
{
'videoPath': video,
'newFps': 120,
'output': os.path.join(SAVE_IMG_DIR, "modifyFps_120.mp4")
}
]
for param in params:
modifyFps(param['videoPath'], param['newFps'], param['output'])
"""Failure example"""
#modifyFps(video, "a")
#modifyFps(video, -1)
#modifyFps(video, 120, "modifyFps_60.mov")
#modifyFps(video, 120, "modifyFps_60")
def test_video_common_crop():
video = os.path.join(SRC_IMG_DIR, 'test2.mp4')
params = [
{
'videoPath': video,
'start': 0,
'end': 5,
'output': os.path.join(SAVE_IMG_DIR, "crop_0_5.avi")
},
{
'videoPath': video,
'start': 5,
'end': 15,
'output': os.path.join(SAVE_IMG_DIR, "crop_5_15.avi")
},
{
'videoPath': video,
'start': 0,
'end': 5,
'output': os.path.join(SAVE_IMG_DIR, "crop_0_5.mp4")
},
{
'videoPath': video,
'start': 5,
'end': 15,
'output': os.path.join(SAVE_IMG_DIR, "crop_5_15.mp4")
}
]
for param in params:
crop(param['videoPath'], param['start'], param['end'], param['output'])
``` |
{
"source": "jpasini/autounits",
"score": 4
} |
#### File: autounits/src/dimension.py
```python
from __future__ import division
class DimensionError(Exception): pass
class IncompatibleDimensionsError(DimensionError): pass
class Dimension(object):
"""Class describing dimensions: length, time, etc. and derivative dimensions."""
def __init__(self, *args, **kwargs):
"""Can be initialized like this:
Dimension(L = 1, T = -2) using named arguments,
Dimension(d) using another dimension
Dimension("L/T") using a string <== Not yet."""
self._dimensions_considered = ['M', 'L', 'T', 'Q', 'Theta']
# If args contains something, it should be a dimension
if len(args) > 1:
raise DimensionError
elif len(args) == 1:
d = args[0]
if type(d) != type(self): # it's not a dimension
raise DimensionError
if len(kwargs) > 0: # shouldn't have included more inputs
raise DimensionError
for k in self._dimensions_considered:
self.__dict__[k] = d.__dict__[k]
else: # len(args) == 0, so I should only have named arguments
if len(set(kwargs.keys()) - set(self._dimensions_considered)) > 0:
raise DimensionError
for k in self._dimensions_considered:
self.__dict__[k] = 0 if k not in kwargs else kwargs[k]
def __repr__(self):
args = ", ".join(["%s=%s" % (k, repr(self.__dict__[k])) for k in self._dimensions_considered])
return "Dimension(%s)" % args
def __str__(self):
return self.str(use_braces = False)
def is_primitive(self):
"""The dimension is primitive if it's either dimensionless or only one."""
number_of_ones = 0
number_of_nonzeros_and_nonones = 0
for k in self._dimensions_considered:
p = self.__dict__[k] # power of this dimension
if p == 1:
number_of_ones += 1
elif p != 0:
number_of_nonzeros_and_nonones += 1
return number_of_nonzeros_and_nonones == 0 and number_of_ones in [0, 1]
def str(self, use_braces = False):
if use_braces:
lbrace, rbrace = "{}"
else:
lbrace = rbrace = ""
numerator = ""
denominator = ""
for k in self._dimensions_considered:
v = self.__dict__[k]
if v == 0:
continue
if v == 1:
numerator += "%s%s%s" % (lbrace, k, rbrace)
elif v == -1:
denominator += "%s%s%s" % (lbrace, k, rbrace)
elif v > 0:
numerator += "%s%s%s^%s" % (lbrace, k, rbrace, v)
else:
denominator += "%s%s%s^%s" % (lbrace, k, rbrace, -v)
if numerator == "":
numerator = "1"
if denominator != "":
denominator = "/" + denominator
return numerator + denominator
def __eq__(self, other):
"""Check for equality."""
return self.M == other.M and self.L == other.L \
and self.T == other.T and self.Q == other.Q and self.Theta == other.Theta
def __ne__(self, other):
"""Check for difference."""
return self.M != other.M or self.L != other.L \
or self.T != other.T or self.Q != other.Q or self.Theta != other.Theta
def __add__(self, other):
"""Addition: checks for compatibility."""
if self != other:
raise IncompatibleDimensionsError
else:
return Dimension(self) # create a copy of self.
def __sub__(self, other):
"""Subtraction: checks for compatibility."""
if self != other:
raise IncompatibleDimensionsError
else:
return Dimension(self) # create a copy of self.
def __mul__(self, other):
"""Multiplication."""
return Dimension(M = self.M + other.M,
L = self.L + other.L,
T = self.T + other.T,
Q = self.Q + other.Q,
Theta = self.Theta + other.Theta)
def __div__(self, other):
"""Division (when __future__.division is not defined)."""
return Dimension(M = self.M - other.M,
L = self.L - other.L,
T = self.T - other.T,
Q = self.Q - other.Q,
Theta = self.Theta - other.Theta)
def __truediv__(self, other):
"""Division (when __future__.division is defined).."""
return Dimension(M = self.M - other.M,
L = self.L - other.L,
T = self.T - other.T,
Q = self.Q - other.Q,
Theta = self.Theta - other.Theta)
def __pow__(self, other):
"""Raise to integer or fractional powers"""
return Dimension(M = self.M*other,
L = self.L*other,
T = self.T*other,
Q = self.Q*other,
Theta = self.Theta*other)
def get_number():
from pyparsing import Word, nums, ParseException
def validate_and_convert_number(tokens):
try:
return float(tokens[0])
except ValueError:
raise ParseException("Invalid number (%s)" % tokens[0])
number = Word(nums + '-' + '+' + '.').setResultsName("value") # do not allow scientific notation
number.setParseAction(validate_and_convert_number)
return number
def get_units_literals(units_value_dictionary):
from pyparsing import Literal, replaceWith, Or
def make_literal(unit_string, val):
return Literal(unit_string).setParseAction(replaceWith(val))
units_value_dictionary["1"] = 1 # add one more term for dimensionless quantities
return Or([make_literal(s, v) for (s, v) in units_value_dictionary.iteritems()])
def get_term(units_value_dictionary):
from pyparsing import Optional
n = get_number()
unit = get_units_literals(units_value_dictionary)
term = unit + Optional("^" + n)
def exponentiate_if_needed(tokens):
if len(tokens) < 2:
return tokens[0]
else:
return tokens[0]**tokens[2]
term.setParseAction(exponentiate_if_needed)
#term.setParseAction(lambda t: float(t[0]))
return term
def get_numerator(units_value_dictionary):
from pyparsing import OneOrMore
term = get_term(units_value_dictionary)
numerator = OneOrMore(term)
def multiply_tokens(tokens):
return reduce(lambda x, y: x*y, tokens)
numerator.setParseAction(multiply_tokens)
return numerator
def get_expression(units_value_dictionary):
from pyparsing import Optional, stringEnd
numerator = get_numerator(units_value_dictionary)
expression = numerator + Optional("/" + numerator) + stringEnd
def calculate_final_value(tokens):
l = len(tokens)
if l == 1:
return tokens[0]
else:
return tokens[0]/tokens[2]
expression.setParseAction(calculate_final_value)
return expression
cached_unit_string_parsers = {}
def parse_unit_string(unit_string, units_value_dictionary):
"""Parse a string containing units.
For example:
LT/M^3Q
and return the corresponding value, after replacing with values
from units_value_dictionary, which contains, for example
{'L': 1, 'M': 1, 'T': 60, 'Q': 1, 'Theta': 1}
"""
combo = unit_string, tuple((k, v) for k,v in units_value_dictionary.iteritems())
if combo not in cached_unit_string_parsers:
cached_unit_string_parsers[combo] = get_expression(units_value_dictionary)
return cached_unit_string_parsers[combo].parseString(unit_string)[0]
```
#### File: autounits/test/test_physical_quantities.py
```python
from __future__ import division
import unittest
import sys
sys.path.append('../src')
from physical_quantities import PhysicalQuantity, Dimensionless, Mass, Distance, Time, Charge, Temperature
from physical_quantities import Speed, Energy
from physical_quantities import PhysicalQuantityFactory
from physical_quantities import BadInputError, BadUnitDictionaryError, IncompatibleUnitsError
from dimension import Dimension
class TestAuxiliaryFunctions(unittest.TestCase):
def test_flatten_good_dictionary(self):
"""Test flattening a units dictionary."""
from physical_quantities import flatten_dictionary
units_dictionary = {('kg', 'kilogram'): 1, ('g', 'gr', 'gram'): 0.001}
flat_dictionary = flatten_dictionary(units_dictionary)
self.assertEqual(flat_dictionary, {'kg': 1, 'kilogram': 1, 'g': 0.001, 'gr': 0.001, 'gram': 0.001})
def test_flatten_bad_dictionary(self):
"""Flattening a units dictionary with repeats should fail."""
from physical_quantities import flatten_dictionary
units_dictionary = {('kg', 'kilogram'): 1, ('g', 'gr', 'kg'): 0.001}
self.assertRaises(BadUnitDictionaryError, flatten_dictionary, units_dictionary)
def test_parser(self):
from physical_quantities import PhysicalQuantityStringParser
primitive_units_dictionaries = {
'M': {('kg', 'kilogram'): 1, ('g', 'gr', 'gram'): 0.001},
'L': {('m', 'meter'): 1, 'km': 1000},
'T': {'s': 1, ('min', 'minute'): 60},
'Q': {'C': 1 },
'Theta': {'K': 1} }
d1 = Dimension(M = 1, L = -2, T = 4, Theta = -1)
p1 = PhysicalQuantityStringParser(d1, primitive_units_dictionaries)
self.assertEqual(len(p1.flat_units_dictionary), 5*3*3*1)
# repeat the same case (should use caching)
p2 = PhysicalQuantityStringParser(d1, primitive_units_dictionaries)
self.assertEqual(p1.flat_units_dictionary, p2.flat_units_dictionary)
d2 = Dimension(T = -1, Q = 1, Theta = -2)
p3 = PhysicalQuantityStringParser(d2, primitive_units_dictionaries)
self.assertEqual(p3.flat_units_dictionary, {"C/sK^2": 1, "C/minK^2": 1/60, "C/minuteK^2": 1/60})
# Check a simple case
self.assertEqual(p3("60 C/minK^2"), 1)
self.assertEqual(p3("2.4e-2 C/minK^2"), 4e-4)
# Bad input should raise an exception
self.assertRaises(BadInputError, p3, "60 C/min K^2") # spaces in units
self.assertRaises(BadInputError, p3, "60.3.2 C/minK^2") # bad number
class TestPhysicalQuantity(unittest.TestCase):
"""Test the PhysicalQuantity class."""
def test_dimensionless_quantity(self):
"""Test dimensionless quantities."""
d = Dimension()
# creating
p = PhysicalQuantity(d)
# assigning & getting the value. The "unit" is "1": need a better interface.
p["1"] = 4
self.assertEqual(p["1"], 4)
# creating from a string
p = PhysicalQuantity(d, "7")
self.assertEqual(p["1"], 7)
# creating from a string: trying to use "1" as unit in the string fails
self.assertRaises(BadInputError, PhysicalQuantity, d, "7 1")
def test_bad_creation(self):
"""Creation with bad inputs should raise exceptions."""
from physical_quantities import PhysicalQuantityError
d = 3
self.assertRaises(PhysicalQuantityError, PhysicalQuantity, d, "3m") # not a dimension
def test_create_simply_physical_quantity(self):
"""Simple physical quantities."""
d = Dimension(L = 1)
p = PhysicalQuantity(d, "3m")
self.assertEqual(p['m'], 3)
self.assertEqual(p['meters'], 3)
self.assertEqual(p['km'], 0.003)
self.assertEqual(p['kilometers'], 0.003)
p['km'] = 2
self.assertEqual(p['m'], 2000)
self.assertEqual(p['meters'], 2000)
self.assertEqual(p['km'], 2)
self.assertEqual(p['kilometers'], 2)
def test_get_available_units(self):
"""Test that I can get the available units."""
self.assertEqual(set(PhysicalQuantity(Dimension()).get_available_units()), set(["1"]))
# test only whether it's a subset, so it doesn't fail as I add more units
self.assertTrue(set(["kg", "kilogram", "g", "gram"])
<= set(PhysicalQuantity(Dimension(M = 1)).get_available_units()))
self.assertTrue(set(["m/s", "meters/second", "miles/hour", "mi/hr"])
<= set(Speed().get_available_units()))
def test_comparisons(self):
"""All comparisons should be available between quantities of the same type."""
p1 = PhysicalQuantity(Dimension(L = 1), "2m")
p2 = PhysicalQuantity(Dimension(L = 1), "2m")
self.assertTrue(p1 == p2)
self.assertTrue(p1 >= p2)
self.assertTrue(p1 <= p2)
self.assertFalse(p1 != p2)
self.assertFalse(p1 < p2)
self.assertFalse(p1 > p2)
p2['km'] = 1
self.assertFalse(p1 == p2)
self.assertFalse(p1 >= p2)
self.assertTrue(p1 <= p2)
self.assertTrue(p1 != p2)
self.assertFalse(p1 > p2)
self.assertTrue(p1 < p2)
def test_repr(self):
"""repr() should give something that can be used to recreate the object."""
p1 = PhysicalQuantity(Dimension(L = 1), "2m")
p2 = eval(repr(p1))
self.assertEqual(p1, p2)
# special case: dimensionless quantities
p1 = PhysicalQuantity(Dimension(), "2")
p2 = eval(repr(p1))
self.assertEqual(p1, p2)
# derived quantities should also work
t1 = Time("3 min")
t2 = eval(repr(t1))
self.assertEqual(t1, t2)
# a more complicated case
p1 = Speed("30m/s")/Time("2s")/PhysicalQuantity(Dimension(M = 1), "3kg")
p2 = eval(repr(p1))
self.assertEqual(p1, p2)
def test_str(self):
"""str() prints a reasonable form for the quantity."""
# dimensionless case
p = PhysicalQuantity(Dimension(), "2.1e2")
self.assertEqual(str(p), "210")
# For quantities that are NOT dimensionless we use the "basic unit" (whatever has a unit conversion
# factor, so it's SI in our case) with the shortest string representation.
# Also, in both the numerator and denominator the order followed is M L T Q Theta
p = Speed("60 km/min")
self.assertEqual(str(p), "1000 m/s")
p = PhysicalQuantity(Dimension(Q = 1), "4 coulomb")
self.assertEqual(str(p), "4 C")
p = Temperature("4 kelvin")
self.assertEqual(str(p), "4 K")
p = Speed("30m/s")/Time("2s")/PhysicalQuantity(Dimension(M = 1), "3kg")
self.assertEqual(str(p), "5 m/kgs^2")
# Test primitive quantities
class TestMass(unittest.TestCase):
"""Tests for the Mass class."""
kilograms_in = {'kg' : 1, 'kilograms': 1, 'g': 0.001, 'grams': 0.001 }
def test_create_simple_masses(self):
"""Simple masses."""
# Check consistency
for unit,kilograms in self.kilograms_in.iteritems():
m = Mass('1' + unit) # create "1x" where x is the unit
self.assertEqual(m['kg'], kilograms) # the kilograms should be correct
# Check creating from other distances
m1 = Mass("1 kg")
m2 = Mass(m1)
self.assertEqual(m1['kg'], m2['kg'])
# Check creating from another quantity with same dimensions
m1 = PhysicalQuantity(Dimension(M = 1), "1 kg")
m2 = Mass(m1)
self.assertEqual(m1['kg'], m2['kg'])
# Check creating from another quantity with different dimensions
t = PhysicalQuantity(Dimension(T = 1), "1 s")
self.assertRaises(IncompatibleUnitsError, Mass, t)
def test_consistency(self):
"""In its own units, the value should be 1."""
for unit in self.kilograms_in.keys():
m = Mass('1' + unit) # create "1x" where x is the unit
self.assertEqual(m[unit], 1)
def test_mass_adding(self):
"""Test adding masses."""
m1 = Mass("10 kg")
m2 = Mass("300 g")
m3 = m1 + m2
self.assertEqual(m1.dimension, m3.dimension) # type is the same
self.assertEqual(m3['kg'], 10.3)
def test_mass_subtracting(self):
"""Test subtracting masses."""
m1 = Mass("10 kg")
m2 = Mass("300 g")
m3 = m1 - m2
self.assertEqual(m1.dimension, m3.dimension) # type is the same
self.assertAlmostEqual(m3['g'], 9700)
def test_for_mass_equality(self):
"""Test that masses are only compared by length."""
m1 = Mass("1g")
m2 = Mass("0.001kg")
self.assertEqual(m1['kg'], m2['kg']) # sanity check before the real test
self.assertEqual(m1, m2)
def test_creating_from_other_mass(self):
"""I can create a mass from another."""
m1 = Mass("10 kg")
m2 = Mass(m1)
self.assertEqual(m1, m2)
m2['kg'] = 2
self.assertEqual(m2['kg'], 2)
self.assertEqual(m1['kg'], 10) # check that we didn't modify the original one
class TestDistance(unittest.TestCase):
"""Tests for the Distance class."""
meters_in = {'m' : 1, 'meters': 1, 'mi': 1609.344, 'miles': 1609.344, 'km': 1000, 'kilometers': 1000, 'marathon': 42194.988 }
def test_create_simple_distances(self):
"""Simple distances."""
# Check consistency
for unit,meters in self.meters_in.iteritems():
d = Distance('1' + unit) # create "1x" where x is the unit
self.assertEqual(d['m'], meters) # the meters should be correct
# Check creating from other distances
d1 = Distance("1 m")
d2 = Distance(d1)
self.assertEqual(d1['m'], d2['m'])
# Check creating from another quantity with same dimensions
d1 = PhysicalQuantity(Dimension(L = 1), "1 m")
d2 = Distance(d1)
self.assertEqual(d1['m'], d2['m'])
# Check creating from another quantity with different dimensions
d1 = PhysicalQuantity(Dimension(T = 1), "1 s")
self.assertRaises(IncompatibleUnitsError, Distance, d1)
def test_consistency(self):
"""In its own units, the value should be 1."""
for unit in self.meters_in.keys():
d = Distance('1' + unit) # create "1x" where x is the unit
self.assertEqual(d[unit], 1)
def test_distance_adding(self):
"""Test adding distances."""
d1 = Distance("10 m")
d2 = Distance("3 km")
d3 = d1 + d2
self.assertEqual(d1.dimension, d3.dimension) # type is the same
self.assertEqual(d3['m'], 3010)
def test_distance_subtracting(self):
"""Test subtracting distances."""
d1 = Distance("10 m")
d2 = Distance("3 km")
d3 = d2 - d1
self.assertEqual(d1.dimension, d3.dimension) # type is the same
self.assertEqual(d3['m'], 2990)
def test_for_distance_equality(self):
"""Test that distances are only compared by length."""
d1 = Distance("1m")
d2 = Distance("0.001km")
self.assertEqual(d1['m'], d2['m']) # sanity check before the real test
self.assertEqual(d1, d2)
def test_creating_from_other_distance(self):
"""I can create a distance from another."""
d1 = Distance("10 m")
d2 = Distance(d1)
self.assertEqual(d1, d2)
d2['m'] = 2
self.assertEqual(d2['m'], 2)
self.assertEqual(d1['m'], 10)
class TestTime(unittest.TestCase):
"""Tests for the Time class."""
seconds_in = {'s': 1, 'seconds': 1, 'min': 60, 'minutes': 60, 'hr': 3600, 'hours': 3600 }
def test_create_simple_times(self):
"""Simple times."""
for unit,seconds in self.seconds_in.iteritems():
t = Time('1' + unit) # create "1x" where x is the unit
self.assertEqual(t['s'], seconds) # the seconds should be correct
def test_consistency(self):
"""In its own units, the value should be 1."""
for unit in self.seconds_in.keys():
t = Time('1' + unit) # create "1x" where x is the unit
self.assertEqual(t[unit], 1)
def test_string_output(self):
"""Test time output in string format."""
t = Time("1 min")
self.assertEqual(t.str, "01:00")
t = Time("60 s")
self.assertEqual(t.str, "01:00")
t = Time("3661 s")
self.assertEqual(t.str, "1:01:01")
t = Time("0.1 s")
self.assertEqual(t.str, "00:00")
class TestCharge(unittest.TestCase):
"""Tests for the Charge class."""
def test_create_simple_charges(self):
"""Simple charges."""
q = Charge("3 coulomb")
self.assertEqual(q['C'], 3)
class TestTemperature(unittest.TestCase):
"""Tests for the Temperature class."""
# kelvin, rankine, celsius, fahrenheit
known_values = [
[273.15, 491.67, 0, 32],
[373.15, 671.67, 100, 212]]
kelvins_in = {'K': 1, 'R': 5/9 }
def test_create_simple_temperatures(self):
"""Simple temperatures."""
for unit,kelvins in self.kelvins_in.iteritems():
t = Temperature('1' + unit) # create "1x" where x is the unit
self.assertEqual(t['K'], kelvins)
def test_consistency(self):
"""In its own units, the value should be 1."""
for unit in self.kelvins_in.keys():
t = Temperature('1' + unit) # create "1x" where x is the unit
self.assertEqual(t[unit], 1)
def test_known_values(self):
t1 = Temperature()
t2 = Temperature()
t3 = Temperature()
t4 = Temperature()
for K, R, C, F in self.known_values:
t1['K'] = K
self.assertAlmostEqual(t1['K'], K)
self.assertAlmostEqual(t1['R'], R)
self.assertAlmostEqual(t1['C'], C)
self.assertAlmostEqual(t1['F'], F)
t2['R'] = R
self.assertAlmostEqual(t2['K'], K)
self.assertAlmostEqual(t2['R'], R)
self.assertAlmostEqual(t2['C'], C)
self.assertAlmostEqual(t2['F'], F)
t3['C'] = C
self.assertAlmostEqual(t3['K'], K)
self.assertAlmostEqual(t3['R'], R)
self.assertAlmostEqual(t3['C'], C)
self.assertAlmostEqual(t3['F'], F)
t4['F'] = F
self.assertAlmostEqual(t4['K'], K)
self.assertAlmostEqual(t4['R'], R)
self.assertAlmostEqual(t4['C'], C)
self.assertAlmostEqual(t4['F'], F)
# Test derived quantities
class TestSpeed(unittest.TestCase):
"""Tests for the Speed class."""
def test_simple_speeds(self):
"""Create a few speeds and check the value."""
s = Speed('1 mi/hr')
self.assertEqual(s['mi/hr'], 1)
s['miles/hr'] = 2.5
self.assertEqual(s['mi/hr'], 2.5)
self.assertEqual(s['m/s'], 2.5*Distance('1mi')['m']/Time('1hr')['s'])
def test_check_known_pace(self):
"""Check pace for some speeds."""
# speed, distance for pace, pace
known_values = [
['1 m/s', '1 km', '1000s'],
['1 meters/s', '1 km', '1000s'],
['1 mi/hr', '1 mi', '1 hr']
]
for speed, distance, pace in known_values:
s, d, t = Speed(speed), Distance(distance), Time(pace)
self.assertEqual(s.pace(d)['s'], t['s']) # the seconds should be correct
class TestEnergy(unittest.TestCase):
"""Tests for the Energy class."""
def test_simple_energies(self):
"""Create a few energies and check the value."""
E = Energy('1 kgm^2/s^2')
self.assertEqual(E['J'], 1)
E['Btu'] = 2.5
self.assertEqual(E['Btu'], 2.5)
self.assertEqual(E['J'], 2.5*1055.05585)
class TestCombinedDimensions(unittest.TestCase):
"""Test combinations of units."""
def test_comparison_of_combined_units(self):
d = Distance("10m")
t = Time("5s")
self.assertFalse(d.dimension == t.dimension)
self.assertRaises(IncompatibleUnitsError, d.__lt__, t)
self.assertRaises(IncompatibleUnitsError, d.__gt__, t)
self.assertRaises(IncompatibleUnitsError, d.__le__, t)
self.assertRaises(IncompatibleUnitsError, d.__ge__, t)
def test_addition_and_subtraction_of_combined_units(self):
d = Distance("10m")
t = Time("5s")
self.assertRaises(IncompatibleUnitsError, d.__add__, t)
self.assertRaises(IncompatibleUnitsError, d.__sub__, t)
def test_multiplication_and_division_of_combined_units(self):
d = Distance("10m")
t = Time("5s")
s1 = d/t # division
s2 = Speed("2m/s")
self.assertEqual(s1.dimension, s2.dimension)
self.assertEqual(s1, s2)
d2 = s2*t # multiplication
self.assertEqual(d2, d)
def test_multiplication_and_division_involving_scalars(self):
d1 = Distance("10m")
d2 = d1/2
self.assertEqual(type(d2), Distance)
self.assertEqual(d2['m'], 5)
d3 = d1*2 # multiply on the right
self.assertEqual(type(d3), Distance)
self.assertEqual(d3['m'], 20)
d4 = 2*d1 # multiply on the left
self.assertEqual(type(d4), Distance)
self.assertEqual(d4['m'], 20)
t1 = Time("4hr")
rate = 8/t1
self.assertEqual(rate["1/hr"], 2)
t2 = 8/rate
self.assertEqual(type(t2), Time)
self.assertEqual(t2, t1)
def test_addition_and_subtraction_involving_scalars(self):
v1 = Dimensionless("1")
v2 = v1 + 2
self.assertEqual(type(v2), type(v1))
self.assertEqual(v2['1'], 3)
v3 = 2 + v1
self.assertEqual(type(v3), type(v1))
self.assertEqual(v3['1'], 3)
v4 = v1 - 3
self.assertEqual(type(v4), type(v1))
self.assertEqual(v4['1'], -2)
v5 = 3 - v1
self.assertEqual(type(v5), type(v1))
self.assertEqual(v5['1'], 2)
# this won't work with other dimensions
d = Distance("3m")
self.assertRaises(IncompatibleUnitsError, d.__add__, 4)
self.assertRaises(IncompatibleUnitsError, d.__radd__, 4)
self.assertRaises(IncompatibleUnitsError, d.__sub__, 4)
self.assertRaises(IncompatibleUnitsError, d.__rsub__, 4)
def test_power(self):
"""I can raise quantities to integer or fractional powers."""
L = Distance("3m")
A = L**2
self.assertEqual(A, L*L)
V = L**3
self.assertEqual(V, L*L*L)
L2 = A**0.5
self.assertEqual(L2, L)
# type guessing works
m = Mass("7 kg")
v = Speed("11 m/s")
E = 1/2*m*v**2
self.assertEqual(E['J'], 1/2*7*11*11)
def test_power_to_dimensionless_quantities(self):
"""I can raise quantities to dimensionless quantities."""
L1 = Distance("3m")
L2 = Distance("6m")
A = L1*L2
L3 = A**(L1/L2)
self.assertEqual(L3['m'], (3*6)**(1/2))
def test_type_coercion_on_addition_and_subtraction(self):
"""A PhysicalQuantity, when added/subtracted to/from a Time becomes a Time."""
t1 = Time("5s")
t2 = PhysicalQuantity(Dimension(T = 1), "1 min")
self.assertTrue(type(t1) != type(t2)) # sanity check before the real check
# coercion on the left & right
self.assertEqual(type(t1 + t2), type(t1))
self.assertEqual(type(t2 + t1), type(t1))
self.assertEqual(type(t1 - t2), type(t1))
self.assertEqual(type(t2 - t1), type(t1))
# A more complex example
s = Speed("3 m/s")
d = Distance("4 m")
t = Time("4 s")
self.assertEqual(type(s + d/t), Speed)
self.assertEqual(type(d/t + s), Speed)
def test_type_guessing_in_general(self):
"""The library should find the proper type depending on dimensions."""
d = Distance("10m")
t = Time("5s")
self.assertEqual(type(d/t), Speed)
v = Speed("10mi/hr")
self.assertEqual(type(v*t), Distance)
# charge density
rho = PhysicalQuantity(Dimension(L = -3, Q = 1), "4C/m^3")
q = rho*d*d*d
self.assertEqual(type(q), Charge)
self.assertEqual(q['C'], 4000)
# Note: this doesn't work for a quantity explicitly defined as a PhysicalQuantity
T1 = Temperature("3 K")
T2 = PhysicalQuantity(Dimension(Theta = 1), "3 K")
self.assertEqual(T1, T2)
self.assertEqual(type(T1), Temperature)
self.assertEqual(type(T2), PhysicalQuantity)
# But a multiplication or division by a dimensionless quantity should fix that
T3 = T2/PhysicalQuantity(Dimension(), "1")
self.assertEqual(type(T3), Temperature)
class TestFactory(unittest.TestCase):
"""Creating physical quantities through a factory."""
def test_basic_quantities(self):
factory = PhysicalQuantityFactory()
known_types = [
[Dimension(M = 1), Mass],
[Dimension(L = 1), Distance],
[Dimension(T = 1), Time],
[Dimension(Q = 1), Charge],
[Dimension(Theta = 1), Temperature],
[Dimension(L = 1, T = -1), Speed]
]
for (d,t) in known_types:
quantity = factory.new(d)
self.assertEqual(type(quantity), t)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpasquet/Photoz",
"score": 3
} |
#### File: jpasquet/Photoz/network.py
```python
import numpy as np
import tensorflow as tf
def prelu(x):
with tf.name_scope('PRELU'):
_alpha = tf.get_variable("prelu", shape=x.get_shape()[-1],
dtype = x.dtype, initializer=tf.constant_initializer(0.0))
return tf.maximum(0.0, x) + _alpha * tf.minimum(0.0, x)
def conv2d(input, num_output_channels, kernel_size, name):
with tf.variable_scope(name):
num_in_channels = input.get_shape()[-1].value
kernel_shape = [kernel_size,
kernel_size,
num_in_channels,
num_output_channels]
biases = tf.get_variable('biases',
shape=[num_output_channels],
initializer=tf.constant_initializer(0.1))
kernel = tf.get_variable('weights',
shape=kernel_shape,
initializer=tf.contrib.layers.xavier_initializer())
outputs = tf.nn.conv2d(input,
kernel,
strides=[1,1,1,1],
padding="SAME")
outputs = tf.nn.bias_add(outputs, biases)
outputs = prelu(outputs)
return outputs
def pool2d(input,kernel_size,stride,name):
print(input, [1, kernel_size, kernel_size, 1],[1, stride, stride], 1)
with tf.variable_scope(name):
return tf.nn.avg_pool(input,
ksize=[1, kernel_size, kernel_size, 1],
strides=[1, stride, stride, 1],
padding="SAME",
name=name)
def fully_connected(input, num_outputs, name, withrelu=True):
with tf.variable_scope(name):
num_input_units = input.get_shape()[-1].value
kernel_shape = [num_input_units, num_outputs]
kernel = tf.get_variable('weights',
shape=kernel_shape,
initializer=tf.contrib.layers.xavier_initializer())
outputs = tf.matmul(input, kernel)
biases = tf.get_variable('biases',
shape=[num_outputs],
initializer=tf.constant_initializer(0.1))
outputs = tf.nn.bias_add(outputs, biases)
if withrelu:
outputs = tf.nn.relu(outputs)
return outputs
def inception(input, nbS1, nbS2, name, output_name, without_kernel_5=False):
with tf.variable_scope(name):
s1_0 = conv2d(input=input,
num_output_channels=nbS1,
kernel_size=1,
name=name + "S1_0")
s2_0 = conv2d(input=s1_0,
num_output_channels=nbS2,
kernel_size=3,
name=name + "S2_0")
s1_2 = conv2d(input=input,
num_output_channels=nbS1,
kernel_size=1,
name=name + "S1_2")
pool0 = pool2d(input=s1_2,
kernel_size=2,
stride=1,
name=name + "pool0")
if not(without_kernel_5):
s1_1 = conv2d(input=input,
num_output_channels=nbS1,
kernel_size=1,
name=name + "S1_1")
s2_1 = conv2d(input=s1_1,
num_output_channels=nbS2,
kernel_size=5,
name=name + "S2_1")
s2_2 = conv2d(input=input,
num_output_channels=nbS2,
kernel_size=1,
name=name + "S2_2")
if not(without_kernel_5):
output = tf.concat(values=[s2_2, s2_1, s2_0, pool0],
name=output_name,
axis=3)
else:
output = tf.concat(values=[s2_2, s2_0, pool0],
name=output_name,
axis=3)
return output
def model():
reddening = tf.placeholder(tf.float32, shape=[None, 1], name="reddening")
x = tf.placeholder(tf.float32, shape=[None, 64, 64, 5], name="x")
conv0 = conv2d(input=x, num_output_channels=64, kernel_size=5, name="conv0")
conv0p = pool2d(input=conv0, kernel_size=2, stride=2, name="conv0p")
i0 = inception(conv0p, 48, 64, name="I0_", output_name="INCEPTION0")
i1 = inception(i0, 64, 92, name="I1_", output_name="INCEPTION1")
i1p = pool2d(input=i1, kernel_size=2, name="INCEPTION1p", stride=2)
i2 = inception(i1p, 92, 128, name="I2_", output_name="INCEPTION2")
i3 = inception(i2, 92, 128, name="I3_", output_name="INCEPTION3")
i3p = pool2d(input=i3, kernel_size=2, name="INCEPTION3p", stride=2)
i4 = inception(i3p, 92,128, name="I4_", output_name="INCEPTION4",
without_kernel_5=True)
flat = tf.layers.Flatten()(i4)
concat = tf.concat(values=[flat,reddening], axis=1)
fc0 = fully_connected(input=concat, num_outputs=1096, name="fc0")
fc1 = fully_connected(input=fc0, num_outputs=1096, name="fc0b")
fc2 = fully_connected(input=fc1, num_outputs=180, name="fc1",
withrelu=False)
output = tf.nn.softmax(fc2)
params = {"output": output, "x": x, "reddening": reddening}
return params
``` |
{
"source": "jpastorino/Data-Blind-ML",
"score": 3
} |
#### File: src/data_analysis/ds_analysis.py
```python
import sys
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
from sklearn import preprocessing
from datetime import date
def autolabel(ax, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{:.2f}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', fontsize=8)
def addlabel(ax, rects, labels):
i = 0
for rect in rects:
height = rect.get_height()
ax.annotate(labels[i],
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
i += 1
def plot_metric(labels, metric_data, dataset_name, metric, save_to, out_file, line_value=0.5):
fig, ax = plt.subplots()
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
rects1 = ax.bar(x - width / 2, metric_data, width, label='Features')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('{}'.format(metric))
ax.set_title('{} dataset {} analysis'.format(dataset_name, metric))
plt.hlines(line_value, 0, len(labels), linestyles='dotted')
plt.hlines(-line_value, 0, len(labels), linestyles='dotted')
if len(labels) < 25:
ax.set_xticks(x)
ax.set_xticklabels(labels)
autolabel(ax, rects1)
plt.xticks(rotation=40)
ax.set_xlabel("Features")
# else:
# ax.set_xticklabels(["F_" + str(a) for a in x])
fig.tight_layout()
plt.savefig(save_to)
out_file.write("Plot was generated and stored at >>>{}\n".format(save_to))
plt.show()
# --------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
""" RUNs the Dataset Analysis for the given csv file"""
DATA_PATH = "./data/source"
OUTPUT_PATH = "./output"
today = date.today()
### DISPLAY FILES
files = []
for file in os.listdir(DATA_PATH):
if file.endswith(".csv"):
files.append(file[:-4])
files.sort()
for i, file in enumerate(files):
print("{} - {}".format(i + 1, file))
print("-" * 30)
selection = int(input("Choose file to process [1-{}]:".format(len(files))))
if not (selection >= 1 and selection <= len(files)):
print("Invalid Selection. Program Terminated.")
exit(1)
## FILE SELECTED - OPEN
filename = files[selection - 1]
csv_file = f"{DATA_PATH}/{filename}.csv"
image_out_file_placeholder = f"{OUTPUT_PATH}/{filename}.analysis.{'{}'}.png"
out_file = open(f"{OUTPUT_PATH}/{filename}.analysis.report.txt", "w")
print("Processing {}".format(filename))
print()
out_file.write("Data Analysis Report for {}\n".format(filename))
out_file.write("{}\n".format(today))
has_header = input("Does the file has a header? [Y/n]")
if has_header.lower() == "n":
df = pd.read_csv(csv_file, header=None, prefix="x")
else:
df = pd.read_csv(csv_file)
# ######### SNS PAIR PLOT
user_option = input("Do you want to generate PairPlot ? [y/N]")
if user_option.lower() == "y":
print("-" * 40)
print("working....")
img_out_file = image_out_file_placeholder.format("pair-plot")
sns_plot = sns.pairplot(df)
sns_plot.savefig(img_out_file)
out_file.write("Pair plot was generated and saved at {}\n".format(img_out_file))
# ######### SKEWNESS
user_option = input("Compute Skewness? [Y/n]")
if not user_option.lower() == "n":
out_file.write("Computing Skewness\n")
out_file.write("-" * 40 + "\n")
print("-" * 40)
labels = []
skewness = []
dataTypeDict = dict(df.dtypes)
print("{:^40} | {:^15}".format("Feature", "Skewness"))
print("-" * 60)
out_file.write("{:^40} | {:^15}\n".format("Feature", "Skewness"))
out_file.write("-" * 60 + "\n")
for col in df.columns:
data = df[col].dropna()
notes = ""
if not np.issubdtype(dataTypeDict[col], np.number):
notes = "Encoding {} dType: {}".format(col, dataTypeDict[col])
le = preprocessing.LabelEncoder()
le.fit(data)
data = le.transform(data)
labels.append(col)
skewness.append(stats.skew(data))
if col == df.columns[-1]:
print("{:^40} | {:10.5f} {}".format(col, skewness[-1], notes))
out_file.write("{:^40} | {:10.5f} {}\n".format(col, skewness[-1], notes))
plot_metric(labels, skewness, filename, "Skewness", image_out_file_placeholder.format("skewness"), out_file)
# ######### KURTOSIS
user_option = input("Compute Kurtosis? [Y/n]")
if not user_option.lower() == "n":
out_file.write("\n\nComputing Kurtosis\n")
out_file.write("-" * 40 + "\n")
print("-" * 40)
labels = []
kurtosis = []
dataTypeDict = dict(df.dtypes)
print("{:^40} | {:^15}".format("Feature", "Kurtosis"))
print("-" * 60)
out_file.write("{:^40} | {:^15}\n".format("Feature", "Kurtosis"))
out_file.write("-" * 60 + "\n")
for col in df.columns:
data = df[col].dropna()
notes = ""
if not np.issubdtype(dataTypeDict[col], np.number):
notes = "Encoding {} dType: {}".format(col, dataTypeDict[col])
le = preprocessing.LabelEncoder()
le.fit(data)
data = le.transform(data)
labels.append(col)
kurtosis.append(stats.kurtosis(data))
if col == df.columns[-1]:
print("{:^40} | {:10.5f} {}".format(col, kurtosis[-1], notes))
out_file.write("{:^40} | {:10.5f} {}\n".format(col, kurtosis[-1], notes))
plot_metric(labels, kurtosis, filename, "Excess Kurtosis", image_out_file_placeholder.format("kurtosis"),
out_file, line_value=0)
# ##### Shapiro-Wilk Test (Data normality)
user_option = input("Test Data is Normal Distributed? [Y/n]")
if not user_option.lower() == "n":
print("-" * 40)
out_file.write("\n\nTesting If Data Follows Normal Distribution\n")
out_file.write("-" * 40 + "\n")
labels = []
shapiro_p_value = []
dataTypeDict = dict(df.dtypes)
print("{:^40} | {:15} | {:^20}".format("Feature", "Shapiro P-Value", "Normally Dist"))
print("-" * 81)
out_file.write("{:^40} | {:15} | {:^20}\n".format("Feature", "Shapiro P-Value", "Normally Dist"))
out_file.write("-" * 81 + "\n")
for col in df.columns:
data = df[col].dropna()
notes = ""
if not np.issubdtype(dataTypeDict[col], np.number):
notes = "Encoding {} dType: {}".format(col, dataTypeDict[col])
le = preprocessing.LabelEncoder()
le.fit(data)
data = le.transform(data)
labels.append(col)
shapiro_p_value.append(stats.shapiro(data)[1])
if shapiro_p_value[-1] < 0.05:
is_normal = "NO"
else:
is_normal = "YES"
print("{:40} | {:3.9E} | {:^20} {}".format(col, shapiro_p_value[-1], is_normal, notes))
out_file.write("{:40} | {:3.9E} | {:^20} {}\n".format(col, shapiro_p_value[-1], is_normal, notes))
```
#### File: src/data_server/data_server_app.py
```python
import sys
import pandas as pd
from time import time
from os import system, name
from sdv.tabular import CTGAN
from Dataset import MetadataDB, Dataset
from sklearn.utils.random import sample_without_replacement
from Exceptions import NoMetadataError, NoDatasetFoundError
from Server import Server
CTGAN_TRAIN_THRESHOLD_SIZE = 2000 # Max size to train CTGAN without asking the the user.
CTGAN_EPOCHS = 30
CTGAN_BATCH_SIZE = 100
def cls():
if name == 'nt':# for windows
_ = system('cls')
else:# for mac and linux(here, os.name is 'posix')
_ = system('clear')
def display_menu() -> int:
cls()
opt = -1
opt_range = [0, 5, 99]
while not opt_range[0] <= opt <= opt_range[1] and opt != opt_range[2]:
print(f"""{"=" * 40}""")
print(f"""{"Main Menu":^40}""")
print(f"""{"=" * 40}""")
print(f"""{" " * 5}{"0) Create New Metadata"}""")
print(f"""{" " * 5}{"1) Load Current Metadata"}""")
print(f"""{" " * 5}{"2) Add Dataset"}""")
print(f"""{" " * 5}{"3) Print Current Metadata"}""")
print(f"""{" " * 5}{"4) Learn Synthetic Model"}""")
print(f"""{" " * 5}{"5) Start Server"}""")
print(f"""{"99) Exit":>40}""")
print(f"""{"-" * 40}""")
opt = int(input(f"""{" " * 5}{"Select Option -> "}"""))
return opt
def create_metadata():
cont = input("Warning! this will delete all pre-existing metadata. Continue [yes/NO]?:")
if cont.upper() == "YES":
dataset_source = input("Metadata filename (do not include path) [metadata.csv]:")
if dataset_source == "":
dataset_source = "metadata.csv"
m = MetadataDB(dataset_source)
return m
else:
print("Aborted by user.")
input("Press any key to continue...")
return None
def load_metadata():
dataset_source = input("Metadata filename (do not include path) [metadata.csv]:")
if dataset_source == "":
dataset_source = "metadata.csv"
start_time = time()
m = MetadataDB(dataset_source)
m.load()
end_time = time()
print(f"""Loaded in {end_time-start_time:.2f} sec.""")
return m
def print_metadata(meta):
if meta is None:
raise NoMetadataError("Invalid metadata. Load metadata first.")
else:
print(meta)
input("Press any key to continue...")
def add_dataset(meta: MetadataDB):
if meta is None:
raise NoMetadataError("Invalid metadata. Load metadata first.")
else:
dataset_source = input("Dataset Source filename (do not include path):")
start_time = time()
meta.add_dataset(dataset_source)
print("Dataset added successfully.")
end_time = time()
print(f"""Loaded in {end_time - start_time:.2f} sec.""")
input("Press any key to continue...")
def learn_ctgan_model(meta: MetadataDB):
if meta is None:
raise NoMetadataError("Invalid metadata. Load metadata first.")
cls()
print("=" * 80)
print(f"={'Select Dataset to learn Model':^78}=")
print("=" * 80)
ds: Dataset
print(f"""{"ID":^15}|{"Source":^50}|{"Has SynMod?":^15}""")
print("-" * 80)
for ds in meta.datasets:
if ds.has_synthetic_model:
print(f"""{ds.ds_id:^15}|{ds.source_filename:^50}|{"Yes":^15}""")
else:
print(f"""{ds.ds_id:^15}|{ds.source_filename:^50}|{"NO":^15}""")
print("-" * 80)
input_ds_id = input(f"""{" " * 5}Input Dataset ID [empty to abort]>""")
if input_ds_id.strip() == "":
print("Aborted by User...")
input("Press any key to continue...")
else:
selected_ds: Dataset = None
for ds in meta.datasets:
if ds.ds_id == input_ds_id.upper():
selected_ds = ds
if selected_ds is None:
raise NoDatasetFoundError(f"""Can't find dataset with that id ({input_ds_id.upper()}). Try again.""")
print("-" * 80)
print(f"""={f"Learning Synthetic Model for {selected_ds.ds_id}":^78}=""")
print("-" * 80)
train, _ = selected_ds.load_split_files()
if train.shape[0] > CTGAN_TRAIN_THRESHOLD_SIZE:
print(f"""{" " * 5}Input Dataset Size is large ({train.shape[0]}).""")
input_train_size = input(f"""{" " * 10}Select Max size for learning [{CTGAN_TRAIN_THRESHOLD_SIZE}]>""")
if input_train_size == "":
input_train_size = CTGAN_TRAIN_THRESHOLD_SIZE
else:
input_train_size = min(int(input_train_size),train.shape[0])
print(f"""{" " * 5}Setting learning data size to {input_train_size}.""")
if input_train_size > CTGAN_TRAIN_THRESHOLD_SIZE:
print(f"""{" " * 5}NOTICE: This process may take some time.""")
ids = sample_without_replacement(n_population=train.shape[0], n_samples=input_train_size)
train = train[ids]
print(f"""{" " * 5}Learning Synthetic model...""")
start_time = time()
df_train = pd.DataFrame(train, columns=["col" + str(i) for i in range(train.shape[1])])
model_filename = "ctgan_" + selected_ds.ds_id + ".pkl"
gen_model = CTGAN(batch_size=CTGAN_BATCH_SIZE, epochs=CTGAN_EPOCHS)
gen_model.fit(df_train)
gen_model.save(Dataset.config["MODELS_PATH"] + "/" + model_filename)
meta.associate_synthetic_model(selected_ds.ds_id, model_filename)
print("-" * 80)
print(f"""={f"Model for {selected_ds.ds_id} Learned and Saved.":^78}=""")
print("-" * 80)
end_time = time()
print(f"""Learnt in {end_time - start_time:.2f} sec.""")
input("Press any key to continue...")
def run_server(meta: MetadataDB):
if meta is None:
raise NoMetadataError("Invalid metadata. Load metadata first.")
else:
print(meta)
# server = Server("127.0.0.1", 32000, 10, metadata)
server = Server("0.0.0.0", 32000, 10, metadata)
server.run()
pass
# #####################################################################################################################
# MAIN PROGRAM
# #####################################################################################################################
if __name__ == "__main__":
if not sys.warnoptions:
import warnings
warnings.simplefilter(action='ignore') # , category=FutureWarning) #hide warnings.
option: int = -1
metadata: MetadataDB = None
while True:
try:
option = display_menu()
if option == 0: #Creates new metadata
metadata = create_metadata()
elif option == 1: # Load Metadata
metadata = load_metadata()
elif option == 2: # Add Dataset
add_dataset(metadata)
elif option == 3: # Print Metadata
print_metadata(metadata)
elif option == 4: # Learn CTGAN Model
learn_ctgan_model(metadata)
elif option == 5: # Start Server
run_server(metadata)
pass
elif option == 99: # Terminate server and exit
print("Terminating Server and Exiting...")
exit(0)
except Exception as e:
print(f"""Error --> {e}""")
input("Press any key to continue...")
```
#### File: src/devel_framework/evaluation.py
```python
import os
import time
import socket
import pickle
import inspect
import numpy as np
from preprocessing import Preprocessing
from tensorflow.keras.models import Sequential
# ======================================================================================================================
# ======================================================================================================================
class EvaluationClient:
def __init__(self, ip: str, port: int):
"""
Connects to the evaluation server to evaluate the performance of the model.
:param ip: evaluation server ip
:param port: evaluation server port.
"""
self.__ip = ip
self.__port = port
self.__client_socket = None
def __connect(self):
self.__client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__client_socket.connect((self.__ip, self.__port))
#
# **************************************************************************************************************
def _receive_message(self, max_length: int = 16_384) -> str:
"""
Receives bytes from the server.
:return: the whole message in bytes
"""
msg = self.__client_socket.recvmsg(max_length)[0]
print(f'<<<<<[{msg}] len:{len(msg)}')
msg = msg.decode("UTF-8")
return msg
#
# **************************************************************************************************************
def _send_message(self, msg: str):
print(f">>>>[{msg}]")
self.__client_socket.send(msg.encode("UTF-8"))
# self.__client_socket.sendall(msg.encode("UTF-8"))
time.sleep(1)
#
# **************************************************************************************************************
def _receive_bytes(self, len_msg:int, max_length:int = 16_384) -> bytes:
chunks = []
received_bytes = 0
while received_bytes < len_msg:
buff = self.__client_socket.recvmsg(max_length)[0]
if not buff == b"":
chunks.append(buff)
received_bytes += len(buff)
msg = b"".join(chunks)
print(f'<<<<<<<<<< Received {len(msg)} bytes')
return msg
#
# **************************************************************************************************************
def _send_bytes(self, msg: bytes, sleep_time:float = 1):
# self._display_message(f"""SEND>> {msg}""")
# self.__client_socket.send(msg.encode("UTF-8"))
print(f'sending {len(msg)} bytes')
sent = self.__client_socket.send(msg)
# sent = self.__client_socket.sendall(msg)
print(f'Sent {sent} bytes')
# self.__client_socket.
time.sleep(sleep_time)
#
# **************************************************************************************************************
def __disconnect(self):
self.__client_socket.close()
#
# **************************************************************************************************************
def evaluate_model(self, ds_id: str, model_id: str, model: Sequential, preprocessing: Preprocessing):
"""
Evaluates a model remotely using the real data.
:param ds_id: dataset id. should match the ds_id provided by the data owner.
:param model_id: model id/name, for reference.
:param model: model object
:param preprocessing: preprocessing object. will be used to transform real test data to evaluate the model.
"""
print(f"""{"=" * 80}""")
print(f"""{"MODEL EVALUATION with REAL DATA.":^80}""")
print(f"""{"=" * 80}""")
# ### Connection
print(f"""{" " * 5}Connecting to Evaluation server [{self.__ip}:{self.__port}]...""")
self.__connect()
response = self._receive_message()
print(f"""{" " * 5}{response}""")
# ### Evaluation
print(f"""{" " * 5}Evaluating model [{model_id}] with real data on dataset [{ds_id}]...""")
msg = f"""E|{ds_id}|{model_id}|"""
self._send_message(msg)
# Reading Response. May take a while
response = self._receive_message()
arguments = response.split("|")
if arguments[0] == "OK":
# Send Payload:
# 1st preprocessing
# # Send Class name
# # Send Class code
preproc_class_code_b = inspect.getsource(type(preprocessing)).encode("UTF-8")
print("Sending Preproc Class name.")
self._send_message(f'''{preprocessing.__class__.__name__}|{len(preproc_class_code_b)}''')
print("Sending Preproc Class Code.")
self._send_bytes(preproc_class_code_b)
# 2nd Model
print("Sending model.")
model.save("./temp.h5")
file_size = os.stat("./temp.h5").st_size
self._send_message(f'''MODEL_SIZE|{file_size}''')
sent_so_far = 0
with open("./temp.h5",mode="rb") as model_file:
buffer = model_file.read(16_384)
while (buffer):
print(f'{sent_so_far}...',end="")
self._send_bytes(buffer,sleep_time=0.1)
sent_so_far += len(buffer)
buffer = model_file.read(16_384)
print()
# Reading Response. May take a while
response = self._receive_message()
arguments = response.split("|")
if arguments[0] == "OK":
print()
print(f"""{" " * 5}Evaluation completed successfully.""")
print(f"""{" " * 15}Loss:{float(arguments[1]):.8f} Accuracy:{float(arguments[2]):.8f}""")
else:
print(f"""{" " * 5}An Error received from while evaluating server: {arguments[1]}""")
else:
print(f"""{" " * 5}An Error received from server: {arguments[1]}""")
print()
print(f"""{" " * 5}Disconnecting...""")
msg = f"""D|"""
self._send_message(msg)
response = self._receive_message()
print(f"""{" " * 5}Disconnected from server.""")
print()
print(f"""{" " * 5}Evaluation concluded.""")
print(f"""{"-" * 80}""")
try:
if os.path.exists("./temp.h5"):
os.remove("./temp.h5")
except:
pass
```
#### File: src/practitioner/classify_iris.py
```python
import numpy as np
import sklearn as scikit
import tensorflow as tf
from preprocessing import Preprocessing
from evaluation import EvaluationClient
from sklearn.model_selection import train_test_split
# #####################################################################################################################
# Implementation of Pre-Processing
# #####################################################################################################################
class MyPreprocess(Preprocessing):
def prepare(self, data):
x = data[: , 0:4]
x = np.asarray(x).astype('float32')
x = scikit.preprocessing.normalize(x)
# labels encoding.
y = data[: , 4]
le = scikit.preprocessing.LabelEncoder()
le.fit(y)
y = le.transform(y)
return x, y
if __name__ == "__main__":
print (f"""Using Tensorflow version {tf.__version__}""")
# ################################################################################
# LOADING DATA
iris_synthetic = np.load("../../data/generated/iris_synt.npz", allow_pickle=True)
iris_data = iris_synthetic["data"]
print(f"""Iris Synthetic data shape:{iris_data.shape}""")
# ################################################################################
# Preprocessing
pre_proc = MyPreprocess()
x, y = pre_proc.prepare(iris_data)
print(f"""Preprocessed data: x:{x.shape}, y:{y.shape}""")
x_train, x_test, y_train, y_test = train_test_split(x, y)
print(f"""Train: x:{x_train.shape}, y:{y_train.shape}. Test: x:{x_test.shape}, y:{y_test.shape}""")
# ################################################################################
# DEFINING THE MODEL AND TRAINING
model = tf.keras.models.Sequential(name="Iris_Synthetic")
# model.add( tf.keras.layers.LayerNormalization( input_shape=[4],
# axis=-1, center=True, scale=True,
# trainable=True, name='input_normalized'))
model.add(tf.keras.layers.Dense(units=150, name="dense1", input_shape=[4]))
model.add(tf.keras.layers.Dropout(0.8, name="dropout_1"))
model.add(tf.keras.layers.Dense(units=150, name="dense2"))
model.add(tf.keras.layers.Dropout(0.8, name="dropout_2"))
model.add(tf.keras.layers.Dense(3, activation=tf.nn.softmax, name="dense3_softmax"))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=["accuracy"])
# ################################################################################
# Training
model.fit(x_train, y_train, batch_size=8, epochs=15)
# ################################################################################
# Local Evaluation
print()
print(f"={'Evaluating using synthetic data':^78}=")
print(model.evaluate(x_test, y_test))
# ################################################################################
# Remote Evaluation
eval = EvaluationClient("goliath.ucdenver.pvt", 35000)
eval.evaluate_model("79e3b","Iris_Synthetic", model, pre_proc)
```
#### File: src/real_data_evaluation/classify_miocardial.py
```python
import numpy as np
import pandas as pd
import sklearn as scikit
import tensorflow as tf
from preprocessing import Preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
# #####################################################################################################################
# Implementation of Pre-Processing
# #####################################################################################################################
class MyPreprocess(Preprocessing):
def prepare(self, in_data):
# Data has a shape of (-1, 124) where the last 12 correspond to different events that can be predicted.
# In this script we will evaluate the performance of predicting Atrial Fib, #113, and first target class.
# Discarding ID and other targets.
x = in_data[:, 1:112] # First 112 features expect for the very first which is record id.
y = in_data[:, 112].astype('float32') # Using atrial fib targer with label 0/1
return x, y
if __name__ == "__main__":
print(f"""Using Tensorflow version {tf.__version__}""")
print("*" * 80)
print("""---- THIS IS THE EVALUATION OF THE MODEL TRAINED DIRECTLY WITH REAL DATA""")
print("*" * 80)
# ------------------------------------------------------------------------------------------------------------------
# LOADING DATA
data_df = pd.read_csv("../../data/source/miocardial.csv")
data = data_df.values
print(f"""MIOCARDIAL Real DS shape:{data.shape}""")
# ------------------------------------------------------------------------------------------------------------------
# Preprocessing
#
pre_proc = MyPreprocess()
x, y = pre_proc.prepare(data)
print(f"""Preprocessed data: x:{x.shape}, y:{y.shape}""")
x_train, x_test, y_train, y_test = train_test_split(x, y)
print(f"""Train: x:{x_train.shape}, y:{y_train.shape}. Test: x:{x_test.shape}, y:{y_test.shape}""")
# ------------------------------------------------------------------------------------------------------------------
# DEFINING THE MODEL AND TRAINING
model = tf.keras.models.Sequential(name="Miocardial_Real")
model.add(tf.keras.layers.Dense(units=150, name="dense1", input_shape=[111]))
model.add(tf.keras.layers.Dropout(0.8, name="dropout_1"))
model.add(tf.keras.layers.Dense(units=150, name="dense2"))
model.add(tf.keras.layers.Dropout(0.8, name="dropout_2"))
model.add(tf.keras.layers.Dense(3, activation=tf.nn.softmax, name="dense3_softmax"))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=["accuracy"])
# ------------------------------------------------------------------------------------------------------------------
# Training
model.fit(x_train, y_train, batch_size=8, epochs=15)
# ------------------------------------------------------------------------------------------------------------------
# Local Evaluation
print()
print(f"={'Evaluating using Real data':^78}=")
print(model.evaluate(x_test, y_test))
``` |
{
"source": "jpasyeva/python_for_QA",
"score": 3
} |
#### File: python_for_QA/bdd/contact_steps.py
```python
from pytest_bdd import given, when, then
from models.contact import Contact
import random
import pytest
@given('a contact list')
def contact_list(db):
return db.get_contact_list()
@given('a contact with <lastname>, <firstname>, <address>, <homephone>, <mobilephone>, <workphone>, '
'<secondaryphone>, <email>, <email2> and <email3>')
def new_contact(lastname, firstname, address, homephone, mobilephone, workphone, secondaryphone, email, email2, email3):
return Contact(lastname=lastname, firstname=firstname, address=address,
home_phone=homephone, mobile_phone=mobilephone, work_phone=workphone, phone2=secondaryphone,
email=email, email2=email2, email3=email3)
# Add contact
@when('I add the contact to the list')
def add_new_contact(app, new_contact):
app.contact.add_new(new_contact)
@then('the new contact list is equal to the old list with the added contact')
def verify_contact_added(db, contact_list, new_contact, app, check_ui):
old_contacts = contact_list
new_contacts = db.get_contact_list()
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(db, app):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="New contact"))
return db.get_contact_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
# Delete contact
@when('I delete the contact from the list')
def delete_contact(app, random_contact):
app.contact.delete_contact_by_id(random_contact.id_contact)
@then('the new contact list is equal to the old list without the deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(random_contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
# Modify contact
@given('a random contact from the list')
def index_random_contact(non_empty_contact_list):
index_random_contact = random.randrange(len(non_empty_contact_list))
return index_random_contact
@given('a contact with <lastname>, <firstname>, <address>, <homephone>, <mobilephone>, <workphone>, '
'<secondaryphone>, <email>, <email2> and <email3>')
def contact_modify(lastname, firstname, address, homephone, mobilephone, workphone, secondaryphone, email, email2, email3):
return Contact(lastname=lastname, firstname=firstname, address=address,
home_phone=homephone, mobile_phone=mobilephone, work_phone=workphone, phone2=secondaryphone,
email=email, email2=email2, email3=email3)
@when('I modify the contact in the list')
def modify_contact(app, non_empty_contact_list, index_random_contact, contact_modify):
contact_modify.id_contact = non_empty_contact_list[index_random_contact].id_contact
app.contact.edit_contact_by_id(contact_modify.id_contact, contact_modify)
@then('the new contact list is equal to the old list with the modified contact')
def verify_contact_modified(app, db, non_empty_contact_list, index_random_contact, contact_modify, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index_random_contact] = contact_modify
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_group_list(), key=Contact.id_or_max)
```
#### File: python_for_QA/tests/test_phones_and_emails.py
```python
import re
def test_phones_on_homepage(app, db):
contacts_from_homepage = app.contact.get_contact_list()
contacts_from_db = db.get_contact_list_from_db()
for contact_from_homepage in contacts_from_homepage:
id_contact = contact_from_homepage.id_contact
for contact_from_db in contacts_from_db:
id_contact2 = contact_from_db.id_contact
if id_contact == id_contact2:
assert contact_from_homepage.lastname == contact_from_db.lastname
assert contact_from_homepage.firstname == contact_from_db.firstname
assert contact_from_homepage.address == contact_from_db.address
assert contact_from_homepage.all_emails_from_home_page == merge_email_like_on_homepage(contact_from_db)
assert contact_from_homepage.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_db)
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x), filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone,
contact.work_phone, contact.phone2]))))
def merge_email_like_on_homepage(contact):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3])))
```
#### File: python_for_QA/tests/test_phones.py
```python
from models.contact import Contact
from random import randrange
import re
def test_contact_on_home_page(app):
contact = app.contact.get_contact_list()
index = randrange(len(contact))
contact_from_home_page = app.contact.get_contact_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.address == contact_from_edit_page.address
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
def test_contact_like_db_and_home(app, db):
contacts_db = sorted(db.get_contact_list(), key=Contact.id_or_max)
contacts_on_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
for i in range(len(contacts_on_home_page)):
contact_from_home_page = contacts_on_home_page[i]
contact_from_db = contacts_db[i]
assert contact_from_home_page.id_contact == contact_from_db.id_contact
assert contact_from_home_page.firstname == contact_from_db.firstname
assert contact_from_home_page.lastname == contact_from_db.lastname
def test_phones_on_contact_view_page(app):
contact = app.contact.get_contact_list()
index = randrange(len(contact))
contact_from_view_page = app.contact.get_contact_from_view_page(index)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_view_page.home_phone == contact_from_edit_page.home_phone
assert contact_from_view_page.mobile_phone == contact_from_edit_page.mobile_phone
assert contact_from_view_page.work_phone == contact_from_edit_page.work_phone
assert contact_from_view_page.phone2 == contact_from_edit_page.phone2
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x), filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone,
contact.work_phone, contact.phone2]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3])))
``` |
{
"source": "jpat82792/Sequence_Finder",
"score": 2
} |
#### File: jpat82792/Sequence_Finder/ModelQueryResults.py
```python
class ModelQueryResults:
units_before_target = None
target = None
units_after_target = None
start_of_captured_units = None
end_of_captured_units = None
def __init__(self, units_before_target, target, units_after_target, start_of_captured_units, end_of_captured_units):
self.units_before_target = units_before_target
self.units_after_target = units_after_target
self.target = target
self.start_of_captured_units = start_of_captured_units
self.end_of_captured_units = end_of_captured_units
```
#### File: jpat82792/Sequence_Finder/ViewReview.py
```python
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.relativelayout import RelativeLayout
from kivy.clock import Clock
from kivy.uix.button import Button
import UiConstants
class ViewReview(Screen):
def __init__(self, next_screen, previous_screen, secretary, screen_manager, **kwargs):
super().__init__(**kwargs)
self.main_layout = RelativeLayout()
self.label_screen = Label(text="Review", size_hint=(0.5, 0.15), pos_hint={'x': 0.25, 'y': 0.85},
font_name="fonts/RobotoMono-Bold.ttf",
font_size=UiConstants.UiConstants.labe_main_screen, color=[0,0,0,1])
self.main_layout.add_widget(self.label_screen)
self.label_sequence_type = Label(text="Sequence Type:", size_hint=(0.25, 0.1), pos_hint={'x': 0.25, 'y': 0.73},
color=[0, 0, 0, 1])
self.text_input_sequence_type = TextInput(text="secretary",
size_hint=(0.25, 0.1),
pos_hint={'x': 0.5, 'y': 0.73}
, background_normal="backgrounds/input-background.jpg")
self.label_units_before = Label(text="Units before", size_hint=(0.25, 0.1), pos_hint={'x': 0.25, 'y': 0.61}
, color=[0, 0, 0, 1])
self.label_units_target = Label(text="Target sequence", size_hint=(0.25, 0.1), pos_hint={'x': 0.25, 'y': 0.49}
, color=[0, 0, 0, 1])
self.label_units_after = Label(text="Units after", size_hint=(0.25, 0.1), pos_hint={'x': 0.25, 'y': 0.37}
, color=[0, 0, 0, 1])
self.label_units_path = Label(text="Path", size_hint=(0.25, 0.1), pos_hint={'x': 0.25, 'y': 0.25}
, color=[0, 0, 0, 1])
self.label_units_file_name = Label(text="File name", size_hint=(0.25, 0.1), pos_hint={'x': 0.25, 'y': 0.13}
, color=[0, 0, 0, 1])
self.text_input_units_before = TextInput(text=secretary.before_target_sequence, size_hint=(0.25, 0.1),
pos_hint={'x': 0.5, 'y': 0.61}, background_normal="backgrounds/input-background.jpg")
self.text_input_target = TextInput(text=secretary.target_sequence, size_hint=(0.25, 0.1),
pos_hint={'x': 0.5, 'y': 0.49},background_normal="backgrounds/input-background.jpg")
self.text_input_units_after = TextInput(text=secretary.after_target_sequence, size_hint=(0.25, 0.1),
pos_hint={'x': 0.5, 'y': 0.37},background_normal="backgrounds/input-background.jpg")
self.text_input_units_path = TextInput(text=secretary.output_file_path, size_hint=(0.25, 0.1),
pos_hint={'x': 0.5, 'y': 0.25},background_normal="backgrounds/input-background.jpg")
self.text_input_units_file_name = TextInput(text=secretary.output_file_name,
size_hint=(0.25, 0.1), pos_hint={'x': 0.5, 'y': 0.13}
, background_normal="backgrounds/input-background.jpg")
self.button_run_analysis = Button(text="Run analysis", size_hint=(0.25, 0.1), pos_hint={'x': 0.5, 'y': 0},
on_release=lambda btn: self.run_analysis(secretary=secretary,
screen_manager=screen_manager,
next_screen=next_screen),
font_size=UiConstants.UiConstants.label_font_small_size,
background_normal="backgrounds/next-button.jpg",
background_down="backgrounds/next-button-pressed.jpg",
font_name="fonts/RobotoMono-Bold.ttf",)
self.button_previous_screen = Button(text="Previous", size_hint=(0.25, 0.1), pos_hint={'x': 0.25, 'y': 0},
font_size=UiConstants.UiConstants.label_font_small_size,
on_release=lambda btn: self.go_back(previous_screen=previous_screen,
screen_manager=screen_manager),
background_normal="backgrounds/back-button.jpg",
background_down="backgrounds/back-button-down.jpg",
font_name="fonts/RobotoMono-Bold.ttf")
self.main_layout.add_widget(self.label_sequence_type)
self.main_layout.add_widget(self.text_input_sequence_type)
self.main_layout.add_widget(self.label_units_before)
self.main_layout.add_widget(self.text_input_units_before)
self.main_layout.add_widget(self.label_units_target)
self.main_layout.add_widget(self.text_input_target)
self.main_layout.add_widget(self.label_units_after)
self.main_layout.add_widget(self.text_input_units_after)
self.main_layout.add_widget(self.label_units_path)
self.main_layout.add_widget(self.text_input_units_path)
self.main_layout.add_widget(self.label_units_file_name)
self.main_layout.add_widget(self.text_input_units_file_name)
self.main_layout.add_widget(self.button_run_analysis)
self.main_layout.add_widget(self.button_previous_screen)
Clock.schedule_once(self.custom_init, 1)
def custom_init(self, *args):
self.add_widget(self.main_layout)
def reload_ui(self, secretary):
self.text_input_units_before.text = secretary.before_target_sequence
self.text_input_target.text = secretary.target_sequence
self.text_input_units_after.text = secretary.after_target_sequence
self.text_input_sequence_type.text = secretary.sequence_type
self.text_input_units_file_name.text = secretary.output_file_name
self.text_input_units_path.text = secretary.output_file_path
def go_back(self, previous_screen, screen_manager):
screen_manager.current = previous_screen
def run_analysis(self, secretary, screen_manager, next_screen):
print("run_analysis()")
secretary.get_target_sequences()
screen_manager.current = next_screen
``` |
{
"source": "jpatel33/Fall-2021-SE-Group-37",
"score": 3
} |
#### File: Fall-2021-SE-Group-37/test/test_cube.py
```python
from code import cubeme, squareme, inc
def testcube():
assert cubeme(-2) == -8
assert cubeme(2) == 8
def testsquare():
assert squareme(2) == 4
assert squareme(-2) == 4
def test_inc():
assert inc(3) == 4
``` |
{
"source": "jpatel3/django-inapp-survey",
"score": 2
} |
#### File: django-inapp-survey/inapp_survey/serializers.py
```python
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from .models import Campaign, CampaignCustomParam, \
CampaignQuestion, UserCampaign, UserCampaignResponse
class CampaignCustomParamSerializer(serializers.ModelSerializer):
class Meta:
model = CampaignCustomParam
fields = (
"param_key",
"param_value",
)
class CampaignListSerializer(serializers.ModelSerializer):
class Meta:
model = Campaign
fields = (
'id',
'title',
'slug',
'description',
'is_authenticated',
'expiry_date',
'campaign_type',
)
class CampaignQuestionSerializer(serializers.ModelSerializer):
class Meta:
model = CampaignQuestion
fields = (
"id",
"question",
"order",
)
class CampaignSerializer(serializers.ModelSerializer):
custom_param = CampaignCustomParamSerializer(
many=True, read_only=True)
steps = CampaignQuestionSerializer(
source='questions',
many=True)
class Meta:
model = Campaign
fields = (
'id',
'title',
'slug',
'description',
'is_authenticated',
'expiry_date',
'campaign_type',
'steps',
'custom_param'
)
# User Campaign Responses
class UserCampaignResponseSerializer(serializers.ModelSerializer):
class Meta:
model = UserCampaignResponse
exclude = ( 'user_campaign', )
# To handle the unique entry per user_campaing and question
# validators = [
# UniqueTogetherValidator(
# queryset=UserCampaignResponse.objects.all(),
# fields=('user_campaign', 'question')
# )
# ]
class UserCampaignSerializer(serializers.ModelSerializer):
answers = UserCampaignResponseSerializer(
source='usercampaignresponse_set',
many=True,
required=False)
# While doing default save, we run into the issue because
# user_campaign is not available for the nested responses
# serializer
def create(self, validated_data):
answer_data = validated_data.pop('usercampaignresponse_set')
usercampaign_post = UserCampaign.objects.create(**validated_data)
for post in answer_data:
e = UserCampaignResponse.objects.create(
user_campaign=usercampaign_post, **post)
return usercampaign_post
class Meta:
model = UserCampaign
fields = (
'id',
'user',
'campaign',
'is_completed',
'is_canceled',
'answers',
)
validators = [
UniqueTogetherValidator(
queryset=UserCampaign.objects.all(),
fields=('user', 'campaign')
)
]
``` |
{
"source": "jpatel888/deep-screens",
"score": 3
} |
#### File: deep-screens/figures/image.py
```python
from utils.image_utils import concatenate_images_by_width
import numpy as np
import cv2
class Image:
def __init__(self, config, input_imgs, label=None, logit=None):
self.config = config
self.baseline_img = input_imgs[:, :, :3]
self.current_img = input_imgs[:, :, 3:]
self.label = label
self.logit = logit
def apply_box(self, image, bounding_box, color):
"""
:param image:
:param bounding_box: (left_x, top_y, right_x, bottom_y)
:param color:
:return:
"""
line_width = self.config.figure.line_width
left_x, top_y, right_x, bottom_y = bounding_box
try:
mid_x = int((left_x + right_x) / 2)
mid_y = int((top_y + bottom_y) / 2)
image[mid_y - line_width:mid_y + line_width, mid_x - line_width + mid_x + line_width] = color
image[top_y - line_width:top_y, left_x:right_x] = color
image[top_y:bottom_y, left_x:left_x + line_width] = color
image[bottom_y - line_width:bottom_y, left_x:right_x] = color
image[top_y:bottom_y, right_x:right_x + line_width] = color
except Exception as exception:
pass
return image
def get_bounding_box(self, y_idx, x_idx, yxhw):
num_grid_cells_width = self.config.model.model_output_size[1]
num_grid_cells_height = self.config.model.model_output_size[0]
image_width = self.config.model.input_shape[1]
image_height = self.config.model.input_shape[0]
grid_cell_width = image_width / num_grid_cells_width
grid_cell_height = image_height / num_grid_cells_height
my, mx, h, w = yxhw[0], yxhw[1], yxhw[2], yxhw[3]
# print(yxhw)
mx = (mx * grid_cell_width) + (x_idx * grid_cell_width)
my = (my * grid_cell_height) + (y_idx * grid_cell_height)
h = h * grid_cell_height
w = w * grid_cell_width
# print(mx, my, h, w)
lx = mx - (w / 2)
rx = mx + (w / 2)
ty = my - (h / 2)
by = my + (h / 2)
# print(lx, rx, ty, by)
ret = int(lx), int(ty), int(rx), int(by)
return ret
def get_color(self, categories):
category = np.argmax(categories)
return self.config.figure.color_map[category]
def apply_label(self, image, label):
for _y in range(label.shape[0]):
for _x in range(label.shape[1]):
has_defect = label[_y, _x, 0]
if has_defect > 0.5:
bounding_box = self.get_bounding_box(_y, _x, label[_y, _x, 5:])
color = self.get_color(label[_y, _x, 1:5])
image = self.apply_box(image, bounding_box, color)
return image
def get_has_defect_graph(self, grid):
white_space = np.transpose([Image.sigmoid(grid[:, :, 0]) * 255] * 3, [1, 2, 0])
new_size = (self.current_img.shape[1], self.current_img.shape[0])
return cv2.resize(white_space, new_size, interpolation=cv2.INTER_NEAREST)
def get_log_image(self):
first_image = self.baseline_img
second_image = self.apply_label(np.copy(self.current_img), self.label) if self.label is not None else None
third_image = self.apply_label(np.copy(self.current_img), self.logit) if self.logit is not None else None
fourth_image = self.get_has_defect_graph(self.label) if self.label is not None else None
fifth_image = self.get_has_defect_graph(self.logit) if self.logit is not None else None
images = [first_image, second_image, third_image, fourth_image, fifth_image]
all_images = filter(lambda el: el is not None, images)
return concatenate_images_by_width(list(all_images))
@staticmethod
def sigmoid(x):
return 1 / (1 + np.exp(-x))
```
#### File: deep-screens/utils/config.py
```python
import json
from bunch import bunchify
import os
from utils.utils import get_args
from utils.utils import get_dict_from_json
def get_config_from_json(config_file_path):
config_dict = get_dict_from_json("values/" + config_file_path)
config = bunchify(config_dict)
return config, config_dict
def process_configs(json_file_names):
configs = []
for json_file_name in json_file_names:
config, _ = get_config_from_json(json_file_name)
config.summary_dir = os.path.join("./experiments", "summary/", config.exp_name + "/")
config.checkpoint_dir = os.path.join("./experiments", "checkpoint/", config.exp_name + "/")
config.figure_dir = os.path.join("./experiments", "generated_figures/", config.exp_name + "/")
config.tflite_dir = os.path.join("./experiments", "tflite/", config.exp_name + "/")
configs.append(config)
return configs
def get_all_available_configs():
all_config_paths = [path for path in os.listdir("values") if path.endswith("config.json")]
all_config_paths.sort()
return all_config_paths
def get_default_configs():
print("Couldn't get config params or none provided, running all configs in configs/ sequentially")
all_config_paths = get_all_available_configs()
try:
return process_configs(all_config_paths)
except json.decoder.JSONDecodeError:
print("Error in JSON")
exit(0)
except Exception as exception:
print("Error in fetching config:", exception)
exit(0)
def get_configs():
try:
args = get_args()
configs = process_configs(args.config.split(","))
except:
configs = get_default_configs()
print("Successfully loaded", len(configs), "configs")
return configs
``` |
{
"source": "jpatelbappa/youtube_search_engine_project",
"score": 3
} |
#### File: jpatelbappa/youtube_search_engine_project/query_on_whoosh.py
```python
from whoosh.qparser import QueryParser
from whoosh import scoring
from whoosh.index import open_dir
import sys
import json
ix = open_dir("indexdir")
def query(query_str, items_per_page=10, current_page=1):
with ix.searcher(weighting=scoring.Frequency) as searcher:
query = QueryParser("description", ix.schema).parse(query_str)
results = searcher.search(query, limit=None)
num_query_results = len(results)
query_results = []
start_index = (current_page - 1) * items_per_page
end_index = start_index + items_per_page
for i in range(start_index, min(len(results), end_index)):
d={}
d['url'] = "https://www.youtube.com/watch?v=%s" % results[i]['id']
d['title'] = results[i]['title']
d['description'] = results[i].highlights('description')
d['score'] = results[i].score
query_results.append(d)
return query_results, num_query_results
if __name__ == "__main__":
query_str = sys.argv[1]
items_per_page = int(sys.argv[2])
current_page = int(sys.argv[3])
query_results, num_query_results = query(query_str, items_per_page=items_per_page, current_page=current_page)
print(json.dumps(query_results))
``` |
{
"source": "jpatnayk/i3wm-themer",
"score": 3
} |
#### File: i3wm-themer/src/fileutils.py
```python
import os.path
def locate_folder( path ):
return os.path.isdir( path )
def locate_file( path ):
return os.path.isfile( path )
```
#### File: i3wm-themer/src/replace_xresources.py
```python
import json
import replace_line as rl
import msgfunc as prnt
import fileutils as fileu
def replace_xresources( configuration, json_file):
prnt.prnt( '-n', 'Replacing the colors in .Xresources')
if( fileu.locate_file(configuration['xresources'])):
prnt.prnt( '-s', 'Located your .Xresources file')
if 'xresources' in json_file:
xresources = json_file['xresources']
prnt.prnt( '-s', 'Found the Xresources info in the JSON file')
rl.replace_line( configuration['xresources'], '*background:', '*background: '+xresources['background'])
rl.replace_line( configuration['xresources'], '*foreground:', '*foreground: '+xresources['foreground'])
rl.replace_line( configuration['xresources'], '*cursorColor:', '*cursorColor: '+xresources['cursorcolor'])
for i in range(15):
rl.replace_line( configuration['xresources'], '*color'+str(i)+':', '*color'+str(i)+': '+xresources['color'+str(i)])
rl.replace_line( configuration['xresources'], 'rofi.color-window:', 'rofi.color-window: '+xresources['rofi.color-window'])
rl.replace_line( configuration['xresources'], 'rofi.color-normal:', 'rofi.color-normal: '+xresources['rofi.color-normal'])
rl.replace_line( configuration['xresources'], 'rofi.color-active:', 'rofi.color-active: '+xresources['rofi.color-active'])
rl.replace_line( configuration['xresources'], 'rofi.color-urgent:', 'rofi.color-urgent: '+xresources['rofi.color-urgent'])
else:
prnt.prnt( '-f', 'Failed to locate the Xresources info in the JSON file')
else:
prnt.prnt( '-f', 'Failed to locate your .Xresources file')
``` |
{
"source": "JPatricio/jls",
"score": 3
} |
#### File: jls/jls/contents.py
```python
import glob
import itertools
import math
import os
from argparse import Namespace
from jls.disk_object import DiskObject
class DirectoryContents(object):
def __init__(self, args: Namespace):
"""
Will gather and store contents of a certain directory
:param path: Path must be absolute
"""
self.islink = os.path.islink(args.path)
self.isdir = os.path.isdir(args.path) and not args.dir_as_files
self.follow_link = self.islink and self.isdir and not args.detailed and not args.clarify
self.path = os.readlink(args.path) if self.follow_link else args.path
self.args = args
self.contents = list()
self.max_width_filename = 0
objects_in_path = list()
if self.isdir and (not self.islink or self.islink and self.follow_link):
objects_in_path = glob.iglob(f'{self.path}/*', recursive=False)
# if -a specified, need to include hidden objects.
# Glob does not support one look up so separate look up needed
if args.all:
# lol.. cheat
objects_in_path = itertools.chain(
['.', '..'], objects_in_path, glob.iglob(f'{self.path}/.*', recursive=False)
)
if args.almost_all:
objects_in_path = itertools.chain(objects_in_path, glob.iglob(f'{self.path}/.*', recursive=False))
else:
objects_in_path = [os.path.abspath(self.path)]
for object_full_path in objects_in_path:
# Add to contents
object = DiskObject(object_full_path)
self.contents.append(object)
# Track max width
self.max_width_filename = max(self.max_width_filename, len(object.name))
# Add 1 for space after name
self.max_width_filename += 1
def print_contents(self):
"""
:return:
"""
# We'll print the files in table like fashion, using the full width of the terminal (number of characters)
terminal_width = os.get_terminal_size().columns
# How many objects can be print per row
obj_per_row = math.floor(terminal_width / self.max_width_filename)
if not self.args.no_sort:
self.contents.sort(key=lambda disk_object: disk_object.name)
if not self.args.detailed:
# By default, sort contents by name property
# TODO: ls prints order vertically, we print horizontally. Crap. Check out how to fix it later.
printed_obj = 0
for file in self.contents:
print(
file.__str__(length=self.max_width_filename, args=self.args),
end="\n" if printed_obj >= obj_per_row or self.args.one else ""
)
printed_obj = printed_obj + 1 if printed_obj >= obj_per_row else 0
else:
# This will be a detailed view.
for file in self.contents:
print(file.__str__(length=None, args=self.args))
# This fixes some weird left out buffer on print
print("", end="")
``` |
{
"source": "JPatrick9793/chemical_vae",
"score": 3
} |
#### File: chemical_vae/chemvae/tgru_k2_gpu.py
```python
from keras.layers.recurrent import GRU
from keras import backend as K
from keras.engine import InputSpec
import numpy as np
if K.backend() == 'tensorflow':
from .sampled_rnn_tf import sampled_rnn
else:
raise NotImplemented("Backend not implemented")
class TerminalGRU(GRU):
# Heavily adapted from GRU in recurrent.py
# Implements professor forcing
def __init__(self, units,
temperature=1., rnd_seed=None, recurrent_dropout=0.0,
**kwargs):
# @param: temperature - sampling temperature
# Annealing will be handled in the callbacks
super(TerminalGRU, self).__init__(units, **kwargs)
self.units = units
self.temperature = temperature
self.rnd_seed = rnd_seed
self.uses_learning_phase = True
self.supports_masking = False
self.units = units
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.input_spec = [InputSpec(ndim=3),
InputSpec(ndim=3)]
def build(self, input_shape):
# all of this is copied from GRU, except for one part commented below
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec = [InputSpec(shape=(batch_size, None, self.input_dim)),
InputSpec(shape=(batch_size, None, self.units))]
self.state_spec = InputSpec(shape=(batch_size, self.units))
self.states = [None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight((self.input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
# adding an extra recurrent weight here, change from GRU layer:
# this last recurrent weight applied to true sequence input from prev. timestep,
# or sampled output from prev. time step.
self.recurrent_kernel = self.add_weight(
(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight((self.units * 4,),
name='bias',
initializer='zero',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units: self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:,
self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:self.units * 3]
self.recurrent_kernel_y = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units: self.units * 2]
self.bias_h = self.bias[self.units * 2: self.units * 3]
self.bias_h = self.bias[self.units * 3:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def get_initial_states(self, x):
# build an all-zero tensor of shape [(samples, output_dim), (samples, output_dim)]
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=1) # (samples, input_dim)
reducer = K.random_uniform((self.input_dim, self.units))
reducer = reducer / K.exp(reducer)
initial_state = K.dot(initial_state, reducer) # (samples, output_dim)
initial_states = [K.stack([initial_state, initial_state]) for _ in range(len(self.states))]
return initial_states
def compute_mask(self, input, mask):
# Forced to be single dimension, following behavior of Merge layer
# not implemented
return None
def get_constants(self, inputs, training=None):
constants = []
if 0. < self.recurrent_dropout < 1.:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(3)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def call(self, inputs, mask=None):
if type(inputs) is not list or len(inputs) != 2:
raise Exception('terminal gru runs on list of length 2')
X = inputs[0]
true_seq = inputs[1]
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(X)
# preprocessing makes input into right form for gpu/cpu settings
# from original GRU code
recurrent_dropout_constants = self.get_constants(X)[0]
preprocessed_input = self.preprocess_input(X)
#################
## Section for index matching of true inputs
#################
# Basically, we need to add an extra timestep of just 0s for predicting the first timestep output
axes = [1, 0] + list(range(2, K.ndim(true_seq)))
true_seq = K.permute_dimensions(true_seq, axes)
zeros = K.zeros_like(true_seq[:1, :, :])
# add a column of zeros, remove last element
true_seq = K.concatenate([zeros, true_seq[:K.int_shape(true_seq)[0] - 1, :, :]], axis=0)
shifted_raw_inputs = K.permute_dimensions(true_seq, axes)
## concatenate to have same dimension as preprocessed inputs 3xoutput_dim
# only for self.implementation = 0?
shifted_raw_inputs = K.concatenate([shifted_raw_inputs,
shifted_raw_inputs,
shifted_raw_inputs], axis=2)
all_inputs = K.stack([preprocessed_input, shifted_raw_inputs])
num_dim = K.ndim(all_inputs)
axes = [1, 2, 0] + list(range(3, num_dim))
all_inputs = K.permute_dimensions(all_inputs, axes)
# If not using true sequence, want to feed in a tensor of zeros instead.
zeros_input_seq = K.zeros_like(preprocessed_input)
test_phase_all_inputs = K.stack([preprocessed_input, zeros_input_seq])
test_phase_all_inputs = K.permute_dimensions(test_phase_all_inputs, axes)
all_inputs = K.in_train_phase(all_inputs, test_phase_all_inputs)
last_output, outputs, states = sampled_rnn(self.step,
all_inputs,
initial_states,
self.units,
self.rnd_seed,
go_backwards=self.go_backwards,
rec_dp_constants=recurrent_dropout_constants,
mask=None)
if self.return_sequences:
return outputs
else:
return last_output
def compute_output_shape(self, input_shape):
# expect input_shape is a list:
assert type(input_shape) is list
input_shapes = input_shape
# from original recurrent unit, can probably delete entire if this works
if self.return_sequences:
return input_shapes[1]
else:
return (input_shapes[1][0], input_shapes[1][1])
def get_config(self):
config = {'units': self.units,
'temperature': self.temperature,
'rnd_seed': self.rnd_seed}
base_config = super(TerminalGRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def output_sampling(self, output, rand_matrix):
# Generates a sampled selection based on raw output state vector
# Creates a cdf vector and compares against a randomly generated vector
# Requires a pre-generated rand_matrix (i.e. generated outside step function)
sampled_output = output / K.sum(output, axis=-1, keepdims=True) # (batch_size, self.units)
mod_sampled_output = sampled_output / K.exp(self.temperature)
norm_exp_sampled_output = mod_sampled_output / K.sum(mod_sampled_output, axis=-1, keepdims=True)
cdf_vector = K.cumsum(norm_exp_sampled_output, axis=-1)
cdf_minus_vector = cdf_vector - norm_exp_sampled_output
rand_matrix = K.stack([rand_matrix], axis=0)
rand_matrix = K.stack([rand_matrix], axis=2)
compared_greater_output = K.cast(K.greater(cdf_vector, rand_matrix), dtype='float32')
compared_lesser_output = K.cast(K.less(cdf_minus_vector, rand_matrix), dtype='float32')
final_output = compared_greater_output * compared_lesser_output
return final_output
def step(self, h, states):
'''
receives inputs for a time step
@inp : h - [previous_layer_input, true_input_for_previous_timestep] at train time
or [previous_layer_input, zeros] at test time
@inp : states - a dictionary, contains the following
- 'initial_states' - state vector
- At train time, this includes the true input sequence for the given time step, in addition to the state for the previous time step.
- At test time,
- 'random_cutoff_prob' - random cutoff matrix used for sampling at test time
- 'rec_dp_mask' - for use with dropout (not tested - may break)
@return: output - raw output, unsampled
@return: final_output - output that has been sampled in test case
'''
################
# Parsing the states vector
################
initial_states = states['initial_states']
random_cutoff_vec = states['random_cutoff_prob']
if self.recurrent_dropout > 0:
rec_dp_mask = states['rec_dp_mask']
else:
rec_dp_mask = np.array([1., 1., 1., 1.], dtype='float32')
h_tm1 = initial_states[0][:1, :, :]
def teacher_forced(h, states):
# switching from (batch_size, previous_layer_input|true_input, output_dim)
# to ( previous_layer_input|true_input, batch_size, output_dim)
axes = [1, 0] + list(range(2, K.ndim(h)))
h = K.permute_dimensions(h, axes)
prev_layer_input = h[0:1, :, :]
true_input = h[1:, :, :self.units]
# this should correspond to true input
prev_sampled_output = true_input
if self.implementation == 0:
x_z = prev_layer_input[0, :, :self.units]
x_r = prev_layer_input[0, :, self.units: 2 * self.units]
x_h = prev_layer_input[0, :, 2 * self.units:]
else:
raise ValueError('Implementation type ' + self.implementation + ' is invalid')
z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
self.recurrent_kernel_r))
hh = self.activation(x_h +
K.dot(r * h_tm1 * rec_dp_mask[2],
self.recurrent_kernel_h) +
K.dot(r * prev_sampled_output, self.recurrent_kernel_y))
output = z * h_tm1 + (1. - z) * hh
return K.stack([output, output])
def free_running(h, states):
prev_generated_output = initial_states[0][1:, :, :]
prev_sampled_output = prev_generated_output
# switching from (batch_size, previous_layer_input|true_input, output_dim)
# to ( previous_layer_input|true_input, batch_size, output_dim)
axes = [1, 0] + list(range(2, K.ndim(h)))
h = K.permute_dimensions(h, axes)
prev_layer_input = h[0:1, :, :]
if self.implementation == 0:
x_z = prev_layer_input[0, :, :self.units]
x_r = prev_layer_input[0, :, self.units: 2 * self.units]
x_h = prev_layer_input[0, :, 2 * self.units:]
z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
self.recurrent_kernel_r))
hh = self.activation(x_h +
K.dot(r * h_tm1 * rec_dp_mask[2],
self.recurrent_kernel_h) +
K.dot(r * prev_sampled_output, self.recurrent_kernel_y))
output = z * h_tm1 + (1. - z) * hh
final_output = self.output_sampling(output, random_cutoff_vec)
return K.stack([output, final_output])
output_2d_tensor = K.in_train_phase(teacher_forced(h, states),
free_running(h, states))
output_2d_tensor = K.squeeze(output_2d_tensor, 1)
return output_2d_tensor, [output_2d_tensor]
``` |
{
"source": "JPatrick9793/colorization-pytorch",
"score": 2
} |
#### File: JPatrick9793/colorization-pytorch/train_linear_classifiers.py
```python
from functools import reduce
from pathlib import Path
from typing import List, Dict
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from models import create_model
from options.train_options import TrainOptions
from util import util
from sklearn.metrics import accuracy_score, f1_score
n_classes: int = 100
kernel_sizes = {
'model1': 32,
'model2': 16,
'model3': 16,
'model4': 8,
'model5': 8,
}
interpolate_size = {
'model1': (12, 12),
'model2': (9, 8),
'model3': (6, 6),
'model4': (4, 4),
'model5': (4, 4),
}
get_params = lambda tensor: reduce(lambda x, y: x * y, tensor.shape)
def reshape_activation_outputs(activation):
# global kernel_sizes
# global interpolate_size
outputs = {}
for key, items in activation.items():
# Acquire dimensions
batch, depth, width, height = items.shape
# Get kernel size
kernel_size = kernel_sizes[key]
# Pool the tensor
output = F.avg_pool2d(items, kernel_size=kernel_size, stride=kernel_size)
interp_size = interpolate_size[key]
if interp_size is not None:
output = F.interpolate(input=output, size=interp_size, scale_factor=None,
mode='bilinear', align_corners=True)
outputs[key] = output.view(batch, -1)
return outputs
def get_validation_feature_tensors(activation, device, model, opt, validation_dataset_loader, validation_dataset_size):
validation_batches = []
with torch.no_grad():
for e, data_raw in tqdm(enumerate(validation_dataset_loader), total=validation_dataset_size // opt.batch_size):
data_raw[0] = data_raw[0].to(device)
data = util.get_colorization_data(data_raw, opt, p=opt.sample_p)
if data is None: continue
model.set_input(data)
model.test(compute_losses=False)
outputs = reshape_activation_outputs(activation)
validation_batches.append(outputs)
# Break inner loop if theres enough data
if ((e + 1) * opt.batch_size) >= opt.max_dataset_size:
break
return validation_batches
def get_dataloader(dataset, opt, shuffle: bool = True):
dataset_loader = torch.utils.data.DataLoader(
dataset, batch_size=opt.batch_size, shuffle=shuffle, num_workers=int(opt.num_threads))
return dataset_loader
def get_dataset(opt, dataroot):
dataset = torchvision.datasets.ImageFolder(dataroot,
transform=transforms.Compose([
transforms.RandomChoice([
transforms.Resize(opt.loadSize, interpolation=1),
transforms.Resize(opt.loadSize, interpolation=2),
transforms.Resize(opt.loadSize, interpolation=3),
transforms.Resize((opt.loadSize, opt.loadSize), interpolation=1),
transforms.Resize((opt.loadSize, opt.loadSize), interpolation=2),
transforms.Resize((opt.loadSize, opt.loadSize), interpolation=3)
]),
transforms.RandomChoice([
transforms.RandomResizedCrop(opt.fineSize, interpolation=1),
transforms.RandomResizedCrop(opt.fineSize, interpolation=2),
transforms.RandomResizedCrop(opt.fineSize, interpolation=3)
]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()]))
return dataset
def main(opt):
# Create checkpoints
# linear_classifier_ckpts: Path = Path("classifier_ckpts_pretrained")
assert opt.linear_checkpoints is not None, "Please specify output directory for checkpoints"
linear_classifier_ckpts: Path = Path(opt.linear_checkpoints)
linear_classifier_ckpts.mkdir(exist_ok=True, parents=True)
# Always force load model
opt.load_model = True
# Specify CUDA device if passed into args
device = torch.device("cpu" if len(opt.gpu_ids) <= 0 else f"cuda:{opt.gpu_ids[0]}")
# Load training data
print(f"Creating Training Dataset Loader")
dataset = get_dataset(opt=opt, dataroot=opt.dataroot)
dataset_loader = get_dataloader(dataset, opt)
dataset_size = min(len(dataset), opt.max_dataset_size)
# Load validation Data
print(f"Creating Validation Dataset Loader")
validation_dataset = get_dataset(opt=opt, dataroot=opt.dataroot_validation)
validation_dataset_loader = get_dataloader(dataset=validation_dataset, opt=opt, shuffle=False)
validation_dataset_size = min(len(validation_dataset), opt.max_dataset_size_validation)
# Load siggraph model for feature extraction
print('#training images = %d' % dataset_size)
model = create_model(opt)
model.setup(opt)
model.eval()
# TODO does passing in "activation" work by reference, and does it help?
# Wrapper function to create "hooks" for extracting layer activations
activation = {}
def get_activation(name, activation):
def hook(model, input, output):
activation[name] = output.detach()
return hook
# Place hooks in original model to extract the features at each layer
model1_hook = model.netG.module.model1.register_forward_hook(get_activation('model1', activation=activation))
model2_hook = model.netG.module.model2.register_forward_hook(get_activation('model2', activation=activation))
model3_hook = model.netG.module.model3.register_forward_hook(get_activation('model3', activation=activation))
model4_hook = model.netG.module.model4.register_forward_hook(get_activation('model4', activation=activation))
model5_hook = model.netG.module.model5.register_forward_hook(get_activation('model5', activation=activation))
# Create separate linear classifiers, one for each layer, independently
linear_models = {
'model1': nn.Sequential(nn.Linear(9216, n_classes), nn.Softmax(-1)).to(device),
'model2': nn.Sequential(nn.Linear(9216, n_classes), nn.Softmax(-1)).to(device),
'model3': nn.Sequential(nn.Linear(9216, n_classes), nn.Softmax(-1)).to(device),
'model4': nn.Sequential(nn.Linear(8192, n_classes), nn.Softmax(-1)).to(device),
'model5': nn.Sequential(nn.Linear(8192, n_classes), nn.Softmax(-1)).to(device)
}
# Create optimizers for each linear classifier, independently
linear_models_optimizers = {
'model1': torch.optim.Adam(linear_models['model1'].parameters(), lr=1e-3),
'model2': torch.optim.Adam(linear_models['model2'].parameters(), lr=1e-3),
'model3': torch.optim.Adam(linear_models['model3'].parameters(), lr=1e-3),
'model4': torch.optim.Adam(linear_models['model4'].parameters(), lr=1e-3),
'model5': torch.optim.Adam(linear_models['model5'].parameters(), lr=1e-3),
}
# Keep track of validation losses for all linear classifiers, independently
linear_models_validation_losses = {
'model1': np.inf,
'model2': np.inf,
'model3': np.inf,
'model4': np.inf,
'model5': np.inf,
}
loss_fn = nn.CrossEntropyLoss()
# Writer will output to ./runs/ directory by default
writer = SummaryWriter()
# Loop over epochs
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay):
# Place all models in "train" mode
for _, linear_model in linear_models.items():
linear_model.train()
# Variables to keep track of metrics
training_losses: Dict[str, List[float]] = {model_name: [0.0] for model_name in linear_models}
training_accuracies: Dict[str, List[float]] = {model_name: [0.0] for model_name in linear_models}
training_f1_scores: Dict[str, List[float]] = {model_name: [0.0] for model_name in linear_models}
# for i, data in enumerate(dataset)
for i, data_raw in tqdm(enumerate(dataset_loader), total=dataset_size // opt.batch_size):
# Place data on GPU (or CPU)
data_raw[0] = data_raw[0].to(device)
data_raw[1] = data_raw[1].to(device)
# Run input batch through model to get feature activations
with torch.no_grad():
data = util.get_colorization_data(data_raw, opt, p=opt.sample_p)
if data is None:
continue
model.set_input(data)
model.test(compute_losses=False)
outputs = reshape_activation_outputs(activation)
# Sometimes batch sizes don't line up, so skip this batch if that happens...
if outputs['model1'].shape[0] != data_raw[1].shape[0]:
# print('xv.shape[0] != yv[1].shape[0]')
continue
# Loop through individual linear classifiers per batch
for model_name, linear_classifier in linear_models.items():
# Run feature vector through linear classifier
feature_input = outputs[model_name]
preds = linear_classifier(feature_input)
# Optimize and Backpropagation
loss = loss_fn(preds, data_raw[1])
optimizer = linear_models_optimizers[model_name]
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Find batch metrics, and add to list
train_accuracy, train_f1 = get_numpy_metrics(targets=data_raw[1], preds=preds)
training_accuracies[model_name].append(train_accuracy)
training_losses[model_name].append(loss.detach())
training_f1_scores[model_name].append(train_f1)
# Break inner loop if theres enough data
if ((i + 1) * opt.batch_size) >= opt.max_dataset_size:
break
# Iterate over linear models at the end of the epoch to add training metrics to tensorboard
for model_name, linear_model in linear_models.items():
writer.add_scalar(f'TrainAccuracy/{model_name}',
sum(training_accuracies[model_name]) / len(training_accuracies[model_name]), epoch)
writer.add_scalar(f'TrainLoss/{model_name}',
sum(training_losses[model_name]) / len(training_losses[model_name]), epoch)
writer.add_scalar(f'TrainF1/{model_name}',
sum(training_f1_scores[model_name]) / len(training_f1_scores[model_name]), epoch)
# Evaluate the classifier performance on validation data
with torch.no_grad():
# Convert all linear classifiers to eval mode
for model_name, linear_model in linear_models.items():
model.eval()
validation_losses: Dict[str, List[float]] = {model_name: [0.0] for model_name in linear_models}
validation_accuracies: Dict[str, List[float]] = {model_name: [0.0] for model_name in linear_models}
validation_f1_scores: Dict[str, List[float]] = {model_name: [0.0] for model_name in linear_models}
# iterate over validation data
for e, data_raw in tqdm(enumerate(validation_dataset_loader),
total=validation_dataset_size // opt.batch_size):
# Run image through model for feature vectors
data_raw[0] = data_raw[0].to(device)
data_raw[1] = data_raw[1].to(device)
data = util.get_colorization_data(data_raw, opt, p=opt.sample_p)
if data is None: continue
model.set_input(data)
model.test(compute_losses=False)
outputs = reshape_activation_outputs(activation)
# Skip this validation batch if there was an error during processing
if outputs[model_name].shape[0] != data_raw[1].shape[0]:
# print('xv.shape[0] != yv[1].shape[0]')
continue
# Iterate over linear classifiers to evaluate validation batch
for model_name, linear_model in linear_models.items():
val_feature_vector = outputs[model_name]
val_preds = linear_model(val_feature_vector)
loss = loss_fn(val_preds, data_raw[1])
# Add accuracy and loss to dictionary to keep track
accuracy, f1 = get_numpy_metrics(targets=data_raw[1], preds=val_preds)
validation_losses[model_name].append(loss)
validation_accuracies[model_name].append(accuracy)
validation_f1_scores[model_name].append(f1)
# Break inner loop if theres enough data
if ((e + 1) * opt.batch_size) >= opt.max_dataset_size:
break
# Iterate over models one final time to evaluate overall validation performance for this epoch
for model_name, linear_model in linear_models.items():
print(f"Evaluating {model_name}")
# Current lowest validation loss for this linear classifier
curr_validation_loss = linear_models_validation_losses[model_name]
# Add validation metrics to graph
validation_loss = sum(validation_losses[model_name]) / len(validation_losses[model_name])
validation_accuracy = sum(validation_accuracies[model_name]) / len(validation_accuracies[model_name])
validation_f1 = sum(validation_f1_scores[model_name]) / len(validation_f1_scores[model_name])
writer.add_scalar(f'ValLoss/{model_name}', validation_loss, epoch)
writer.add_scalar(f'ValAccuracy/{model_name}', validation_accuracy, epoch)
writer.add_scalar(f'ValF1/{model_name}', validation_f1, epoch)
# Check if best model or not
if validation_loss <= curr_validation_loss:
# This is best validation loss so far, save model as "best" and update dictionary
print(f"Model {model_name} best validation loss so far: {validation_loss} < {curr_validation_loss}")
torch.save(linear_model.state_dict(), f"{linear_classifier_ckpts}/{model_name}_best.pth")
linear_models_validation_losses[model_name] = validation_loss
else:
print(f"Model {model_name} validation did not improve: {validation_loss} > {curr_validation_loss}")
# Always save checkpoints at every epoch
torch.save(linear_model.state_dict(), f"{linear_classifier_ckpts}/{model_name}_e{epoch}.pth")
# Close tensorboard writer
writer.close()
def get_numpy_metrics(targets, preds):
preds_numpy = preds.argmax(-1).cpu().numpy()
targt_numpy = targets.cpu().numpy()
accuracy = accuracy_score(y_pred=preds_numpy, y_true=targt_numpy)
f1 = f1_score(y_pred=preds_numpy, y_true=targt_numpy, average='micro')
return accuracy, f1
if __name__ == '__main__':
opt = TrainOptions().parse()
opt.dataroot = opt.dataroot if opt.dataroot is not None else './dataset/ilsvrc2012/%s/' % opt.phase
assert opt.dataroot_validation is not None, "When training linear classifiers, please specify a validation " \
"dataset via --dataroot_validation"
main(opt=opt)
``` |
{
"source": "jpatrickdill/figa",
"score": 2
} |
#### File: figa/figa/__init__.py
```python
import platform
from functools import wraps
from figa.loaders import detect_and_parse
from figa.loaders.parser import DictValueReader
from os import environ as env
# ease of use
system = platform.system().lower()
version = platform.python_version()
no_warnings = False
def config(cls):
# add __required__ if not included
cls.__required__ = getattr(cls, "__required__", {})
@wraps(cls)
def get_config(environment: str = None, **kwargs) -> DictValueReader:
no_warn = kwargs.get("no_warnings", no_warnings)
# detect env
if environment is None:
if not hasattr(cls, "get_env"):
raise NotImplementedError("No get_env() method defined, can't detect environment")
environment = cls.get_env(cls)
if environment is None:
raise ValueError("No environment was provided or could be detected")
# find config arguments
args = getattr(cls, environment, None)
if args is None:
raise ValueError("No environment named {!r}".format(environment))
if not isinstance(args, tuple):
args = (args,)
# get default values if not currently default
default = get_config("default")._values if environment != "default" and hasattr(cls, "default") else None
# get reader
if isinstance(args[0], dict): # dict object
return DictValueReader(args[0], default=default, required=cls.__required__)
elif isinstance(args[0], str): # string parser name
parsed = detect_and_parse(args, default=default, required=cls.__required__, no_warnings=no_warn)
else:
# parser specified explicitly
parsed = args[0].__handler__(*args[1:], default=default, required=cls.__required__, no_warnings=no_warn)
return parsed
return get_config
```
#### File: figa/loaders/hocon.py
```python
from figa.loaders.parser import Parser
from pyhocon import ConfigFactory, HOCONConverter
import json
class HoconParser(Parser):
def parse_string(self, s):
conf = ConfigFactory.parse_string(s)
return json.loads(HOCONConverter.to_json(conf))
def parse_fp(self, fp):
conf = ConfigFactory.parse_file(fp)
return json.loads(HOCONConverter.to_json(conf))
```
#### File: figa/loaders/ini.py
```python
from figa.loaders.parser import Parser
from configobj import ConfigObj
from io import StringIO
class IniParser(Parser):
def parse_string(self, s):
config = ConfigObj(s.splitlines())
return config.dict()
```
#### File: figa/figa/typechecking.py
```python
converters = [str, int, float, bool]
def check(types, values, trace=None):
# checks whether the values match types of types argument, or can be converted
trace = trace or []
for item, type_ in types.items():
# check if item exists
if item not in values:
trace.append(item)
raise ValueError("Missing required item {!r}".format(".".join(trace)))
val = values[item]
if isinstance(type_, dict): # is sub-dict
check(type_, val, trace + [item])
else:
if not isinstance(val, type_):
# try converting value if reasonable (str, int, float)
if type_ in converters and type(val) in converters:
try:
values[item] = type_(val) # try converting to expected type
except ValueError:
trace.append(item)
raise ValueError("item {!r} doesn't match type {!r}".format(".".join(trace), type_.__name__))
else:
trace.append(item)
raise ValueError("item {!r} doesn't match type {!r}".format(".".join(trace), type_.__name__))
``` |
{
"source": "jpatrickdill/roblox.py",
"score": 3
} |
#### File: roblox.py/roblox/abc.py
```python
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from datetime import datetime
from typing import AsyncGenerator, Optional, List, Union
from roblox.enums import AssetType
class User(metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox user."""
# @classmethod
# def __subclasshook__(cls, C):
# if cls is User:
# mro = C.__mro__
# for attr in ("username", "id", "description", "status", "created_at", "banned"):
# for base in mro:
# if attr in base.__dict__:
# break
# else:
# return NotImplemented
# return True
# return NotImplemented
@property
@abstractmethod
async def id(self) -> int:
"""
Async property that returns the User's ID.
"""
raise NotImplemented
@property
@abstractmethod
async def username(self) -> str:
"""
Async property that returns the User's username.
"""
raise NotImplemented
@property
@abstractmethod
async def url(self) -> str:
"""
Async property that returns the User's profile URL.
"""
raise NotImplemented
@property
@abstractmethod
async def created_at(self) -> datetime:
"""
Async property that returns the datetime at which the user was created.
"""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""
Async property that returns the User's profile description.
"""
raise NotImplemented
@abstractmethod
async def status(self) -> str:
"""
Returns the User's current status.
"""
raise NotImplemented
@property
@abstractmethod
async def is_banned(self) -> bool:
"""
Async property that returns whether the user is banned.
"""
raise NotImplemented
@property
@abstractmethod
async def is_premium(self) -> bool:
"""
Async property that returns whether the user has a premium subscription.
"""
raise NotImplemented
@abstractmethod
async def friends(self) -> AsyncGenerator[User]:
"""
Async Generator yielding the user's friends.
"""
raise NotImplemented
@abstractmethod
async def is_friends(self, other: Optional[User] = None) -> bool:
"""
Checks whether this user is friends with another user or the client user.
"""
raise NotImplemented
@property
@abstractmethod
def followers(self):
"""
Property that returns FollowerList for this user.
"""
raise NotImplemented
@property
@abstractmethod
def followings(self):
"""
Property that returns FollowingsList for this user.
"""
raise NotImplemented
@property
@abstractmethod
def inventory(self):
"""
Property that returns Inventory for this user.
"""
raise NotImplemented
@abstractmethod
def games(self) -> AsyncGenerator[Universe, None]:
"""
Async Generator that yields the user's games.
"""
raise NotImplemented
class ClientUser(metaclass=ABCMeta):
"""An ABC that details operations on the client user."""
@abstractmethod
async def set_status(self, status: str) -> str:
"""
Sets the client user's status.
:param status: New status.
:return: Moderated status.
"""
raise NotImplemented
@property
@abstractmethod
async def robux(self) -> int:
"""
Returns the client user's amount of currency.
"""
class OtherUser(metaclass=ABCMeta):
"""An ABC that details operations on non-client users."""
async def follow(self):
"""Follows this user from the client user."""
raise NotImplemented
async def unfollow(self):
"""Unfollows this user from the client user."""
raise NotImplemented
async def request_friendship(self):
"""Sends a friend request to this user."""
raise NotImplemented
async def unfriend(self):
"""Unfriends this user.."""
raise NotImplemented
class DisplayPage(metaclass=ABCMeta):
"""An ABC that details an object with a display page, such as an asset, place, or universe."""
@property
@abstractmethod
async def id(self) -> int:
"""
Async property that returns the object's ID.
"""
raise NotImplemented
@property
@abstractmethod
async def name(self) -> str:
"""
Async property that returns the object's name.
"""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""
Async property that returns the object's description.
"""
raise NotImplemented
@property
@abstractmethod
async def url(self) -> str:
"""
Async property that returns the object's URL.
"""
raise NotImplemented
@property
@abstractmethod
async def created_at(self) -> datetime:
"""
Async property that returns when the object was created.
"""
raise NotImplemented
@property
@abstractmethod
async def updated_at(self) -> datetime:
"""
Async property that returns when the object was last updated.
"""
raise NotImplemented
class Votable(metaclass=ABCMeta):
"""ABC that represents on object that can be voted on, e.g., favorites, thumbs-up, thumbs-down"""
@property
@abstractmethod
async def favorites(self) -> int:
"""
Async property that returns the asset's current number of favorites.
"""
raise NotImplemented
@property
@abstractmethod
async def is_favorited(self) -> bool:
"""
Async property that returns whether the asset is favorited by the client.
"""
raise NotImplemented
@abstractmethod
async def favorite(self):
"""
Favorites the asset for the client user.
"""
raise NotImplemented
@abstractmethod
async def unfavorite(self):
"""
Unfavorites the asset for the client user.
"""
raise NotImplemented
class Asset(DisplayPage, Votable, metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox asset."""
@property
@abstractmethod
async def type(self) -> AssetType:
"""
Async property that returns the Asset's type.
"""
raise NotImplemented
@property
@abstractmethod
async def price(self) -> int:
"""
Async property that returns the asset's current price in Robux.
"""
raise NotImplemented
@property
@abstractmethod
async def for_sale(self) -> bool:
"""
Async property that returns whether the asset can be purchased.
"""
raise NotImplemented
@property
@abstractmethod
async def creator(self) -> User:
"""
Async property that returns the creator of the asset.
"""
raise NotImplemented
@abstractmethod
async def purchase(self, expected_price: Optional[int] = None):
"""
Purchases the asset for the client user. If `expected_price` is specified, the asset will not be
purchased unless the `expected_price` matches the current price.
"""
raise NotImplemented
@abstractmethod
async def delete(self):
"""
Deletes asset from the client user's inventory.
"""
class Place(Asset, metaclass=ABCMeta):
"""An ABC that details operations on a Roblox Place asset."""
@property
@abstractmethod
async def universe(self) -> Universe:
"""Async property that returns the Universe the place belongs to."""
raise NotImplemented
class Universe(DisplayPage, Votable, metaclass=ABCMeta):
"""An ABC that details common operations on a Roblox Universe (Game)."""
@property
@abstractmethod
async def visits(self) -> int:
"""Async property that returns the number of visits to this game."""
raise NotImplemented
@property
@abstractmethod
async def playing(self) -> int:
"""Async property that returns the number of players in this game."""
raise NotImplemented
@property
@abstractmethod
async def max_players(self) -> int:
"""Async property that returns the max players per server in this game."""
raise NotImplemented
@property
@abstractmethod
async def root_place(self) -> Place:
"""Async property that returns the universe's root place."""
raise NotImplemented
class Group(DisplayPage, metaclass=ABCMeta):
"""ABC detailing operations on a Roblox Group."""
@property
@abstractmethod
async def owner(self) -> Optional[User]:
"""Async property that returns the group's current owner, if it has one."""
raise NotImplemented
@property
@abstractmethod
async def shout(self) -> Optional[Shout]:
"""Async property that returns the group's current shout."""
raise NotImplemented
@property
@abstractmethod
async def members(self) -> AsyncGenerator[GroupMember, None]:
"""Async generator that yields the group's members."""
raise NotImplemented
@abstractmethod
async def get_member(self, user: Union[User, str, int]) -> GroupMember:
"""Tries to find a group member given a username, user ID, or User object."""
raise NotImplemented
@property
@abstractmethod
async def is_public(self) -> bool:
"""Async property that returns whether the group allows public entry."""
raise NotImplemented
@property
@abstractmethod
async def roles(self) -> List[Role]:
"""Async property that returns a list of the group's roles"""
raise NotImplemented
class GroupMember(User, metaclass=ABCMeta):
"""ABC describing operations on a Group Member."""
@property
@abstractmethod
async def role(self) -> Role:
"""Async property that eturns the member's group role."""
raise NotImplemented
@property
@abstractmethod
async def rank(self) -> int:
"""Shortcut for the numerical rank of the member's role."""
raise NotImplemented
class Role(metaclass=ABCMeta):
"""ABC describing a group roleset."""
@property
@abstractmethod
async def id(self) -> int:
"""Async property that returns the role's ID."""
raise NotImplemented
@property
@abstractmethod
async def name(self) -> str:
"""Async property that returns the role's name."""
raise NotImplemented
@property
@abstractmethod
async def description(self) -> str:
"""Async property that returns the role's description."""
raise NotImplemented
@property
@abstractmethod
async def rank(self) -> int:
"""Async property that returns the role's numerical rank."""
raise NotImplemented
@property
@abstractmethod
async def member_count(self) -> int:
"""Async property that returns the number of members with this role."""
raise NotImplemented
class Shout(metaclass=ABCMeta):
"""ABC describing a group shout."""
@property
@abstractmethod
def body(self) -> str:
"""Returns the shout's body."""
raise NotImplemented
@property
@abstractmethod
def created_at(self) -> datetime:
"""Returns the time the shout was created at."""
raise NotImplemented
@property
@abstractmethod
async def poster(self) -> User:
"""Returns the user who posted the shout."""
raise NotImplemented
```
#### File: roblox.py/roblox/asset.py
```python
import logging
import maya
from CaseInsensitiveDict import CaseInsensitiveDict
from async_property import async_property, async_cached_property
from roblox.enums import AssetType
from roblox.abc import Asset as _BaseAsset
from roblox.errors import *
from functools import wraps
from roblox.http import Session
from roblox.util import urlify
log = logging.getLogger(__name__)
# util decorator
def p_info(name, nocache=False):
"""This decorator will check if the property is in the asset's _data, and if it isn't send a request to the
ProductInfo API endpoint"""
def decorator(fn):
@wraps(fn)
async def new_fn(self):
if nocache or self._data[name] is None:
await self._get_product_info()
return self._data[name]
return new_fn
return decorator
class Asset(_BaseAsset):
"""
Represents a Roblox Asset.
"""
__slots__ = ("_data", "_state")
def __init__(self, *, state: Session, data):
self._state = state
self._data = CaseInsensitiveDict({
"name": None,
"description": None,
"id": None,
"productid": None,
"created": None,
"updated": None,
"price": None,
"assettypeid": None,
"sales": None,
"isforsale": None,
"ispublicdomain": None,
"islimited": None,
"islimitedunique": None,
"remaining": None,
"serialnumber": None,
"creator": None
})
self._update(data)
def __repr__(self):
return "Asset({!r})".format(self._data["name"] or self._data["id"])
def __hash__(self):
return self._data["id"] or -2
def __eq__(self, other):
if not isinstance(other, Asset):
return False
return self._data["id"] == other._data["id"]
def _update(self, data: dict):
for k in list(data.keys()):
data[k.lower()] = data[k]
data.setdefault("price", data.get("priceinrobux"))
data.setdefault("id", data.get("assetid"))
data.setdefault("name", data.get("assetname"))
self._data.update(data)
async def _get_product_info(self):
data = await self._state.product_info(self._data["id"])
self._update(data)
@async_property
@p_info("name")
async def name(self):
"""|asyncprop|
The asset's name.
:rtype: str
"""
pass
@async_property
@p_info("description")
async def description(self):
"""|asyncprop|
The asset's description.
:rtype: str
"""
pass
@async_property
@p_info("id")
async def id(self):
"""|asyncprop|
The asset's ID.
:rtype: int
"""
pass
@async_property
async def type(self):
"""|asyncprop|
The asset's type.
:rtype: :class:`.AssetType`
"""
if self._data["assettypeid"] is None:
await self._get_product_info()
return AssetType(self._data["assettypeid"])
@async_property
async def url(self):
"""|asyncprop|
URL to the asset's page.
:rtype: str
"""
safe_name = urlify(await self.name)
return "https://roblox.com/library/{}/{}".format(await self.id, safe_name)
@async_property
@p_info("productid")
async def product_id(self):
"""|asyncprop|
The asset's product ID.
:rtype: int
"""
pass
@async_property
async def created_at(self):
"""|asyncprop|
Time at which the asset was created.
:rtype: :class:`datetime.datetime`
"""
if self._data["created"] is None:
await self._get_product_info()
try:
return maya.parse(self._data["created"]).datetime()
except OSError:
return None
@async_cached_property
async def updated_at(self):
"""|asyncprop|
Time at which the asset was last updated.
:rtype: :class:`datetime.datetime`
"""
if self._data["updated"] is None:
await self._get_product_info()
try:
return maya.parse(self._data["updated"]).datetime()
except OSError:
return None
@async_property
@p_info("price", nocache=True)
async def price(self):
"""|asyncprop|
The asset's purchase price.
:rtype: int
"""
pass
@async_property
@p_info("sales", nocache=True)
async def sales(self):
"""|asyncprop|
Number of sales the asset has.
:rtype: int
"""
pass
@async_property
async def for_sale(self):
"""|asyncprop|
Whether the asset can be purchased/taken.
:rtype: bool
"""
if self._data["isforsale"] is None and self._data["ispublicdomain"] is None:
await self._get_product_info()
return self._data["isforsale"] or self._data["ispublicdomain"]
@async_property
async def creator(self):
"""|asyncprop|
The asset's creator.
:rtype: :class:`.User`
"""
if self._data["creator"] is None:
await self._get_product_info()
creator = self._data["creator"]
if creator.get("CreatorType") == "User":
return await self._state.client.get_user(username=creator["Name"])
@async_property
async def favorites(self):
"""|asyncprop|
Number of favorites the asset has.
:rtype: int
"""
return await self._state.favorites_count(await self.id)
@async_property
async def is_favorited(self):
"""|asyncprop|
Whether the client has favorited the asset.
:rtype: bool
"""
model = await self._state.favorite_model(await self._state.client.user.id, await self.id)
return False if model is None else True
async def favorite(self):
"""
Favorites the asset.
"""
return await self._state.create_favorite(await self._state.client.user.id, await self.id)
async def unfavorite(self):
"""
Unfavorites the asset.
"""
return await self._state.delete_favorite(await self._state.client.user.id, await self.id)
async def toggle_favorite(self):
"""
Toggles the asset's favorite.
"""
if await self.is_favorited:
return await self.unfavorite()
else:
return await self.favorite()
async def purchase(self, expected_price: int = None):
"""
Purchases the asset from the client user.
Args:
expected_price: Price to check the asset against before purchasing. Asset will not be purchased if the
current price doesn't match the expected price.
"""
expected_price = expected_price or await self.price
expected_seller = await (await self.creator).id
return await self._state.purchase_product(await self.product_id, expected_price, expected_seller)
async def delete(self):
"""
Deletes the asset from the client user's inventory.
"""
return await self._state.delete_from_inventory(await self.id)
async def download(self, path):
file = open(path, "wb")
await self._state.download_asset(await self.id, file)
file.close()
```
#### File: roblox.py/roblox/game.py
```python
import logging
from abc import ABC
import maya
from CaseInsensitiveDict import CaseInsensitiveDict
from async_property import async_property, async_cached_property
from roblox.asset import Asset
from roblox.abc import Universe as _BaseUniverse
from roblox.http import Session
log = logging.getLogger(__name__)
class Place(Asset):
__slots__ = ("_data", "_state")
def __init__(self, *, state, data):
super().__init__(state=state, data=data)
self._data.update({
"isplayable": None,
"universeid": None,
"reasonprohibited": None,
"imageToken": None,
"universerootplaceid": None,
"url": None
})
self._update(data)
def __repr__(self):
return "Place({!r})".format(self._data["name"] or self._data["id"])
async def _get_place_details(self):
details = (await self._state.get_place_details(await self.id))[0]
self._update(details)
@async_property
async def universe(self):
if self._data["universeid"] is None:
await self._get_place_details()
return Universe(state=self._state, data={"id": self._data["universeid"]})
game = universe
@async_property
async def url(self):
if self._data["url"] is None:
await self._get_place_details()
return self._data["url"]
# util decorator
def g_info(name, nocache=False):
"""This decorator will check if the property is in the game's _data, and if it isn't send a request to the
Games API endpoint"""
def decorator(fn):
async def new_fn(self):
if nocache or self._data[name] is None:
await self._get_game_details()
return self._data[name]
return new_fn
return decorator
class Universe(_BaseUniverse):
__slots__ = ("_data", "_state")
def __init__(self, *, state: Session, data):
self._state = state
self._data = CaseInsensitiveDict({
"name": None,
"description": None,
"id": None,
"rootplaceid": None,
"created": None,
"updated": None,
"price": None,
"sales": None,
"creator": None,
"allowedgearcategories": None,
"playing": None,
"visits": None,
"maxplayers": None,
"studioaccesstoapisallowed": None,
"createvipserversallowed": None,
"universeavatartype": None,
"genre": None
})
self._update(data)
def __repr__(self):
return "Universe({!r})".format(self._data["name"] or self._data["id"])
def __hash__(self):
return hash(self._data["id"] or -1)
def _update(self, data):
for k in list(data.keys()):
data[k.lower()] = data[k]
self._data.update(data)
async def _get_game_details(self):
details = (await self._state.get_game_details(await self.id))["data"][0]
self._update(details)
@async_property
async def id(self):
return self._data["id"]
@async_property
@g_info("name")
async def name(self):
pass
@async_property
@g_info("description")
async def description(self):
pass
@async_property
async def created_at(self):
if self._data["created"] is None:
await self._get_game_details()
try:
return maya.parse(self._data["created"]).datetime()
except OSError:
return None
@async_cached_property
async def updated_at(self):
if self._data["updated"] is None:
await self._get_game_details()
try:
return maya.parse(self._data["updated"]).datetime()
except OSError:
return None
@async_property
async def creator(self):
if self._data["creator"] is None:
await self._get_game_details()
creator = self._data["creator"]
if creator.get("type") == "User":
return await self._state.client.get_user(username=creator["name"])
@async_property
async def root_place(self):
if self._data["rootplaceid"] is None:
await self._get_game_details()
return await self._state.client.get_asset(self._data["rootplaceid"])
@async_property
async def url(self):
return await (await self.root_place).url
@async_property
@g_info("visits", nocache=True)
async def visits(self):
pass
@async_property
@g_info("playing", nocache=True)
async def playing(self):
pass
@async_property
@g_info("maxplayers", nocache=True)
async def max_players(self):
pass
@property
async def is_favorited(self) -> bool:
data = await self._state.universe_favorited(await self.id)
return data.get("isFavorited")
async def favorite(self):
await self._state.favorite_universe(await self.id, True)
return True
async def unfavorite(self):
await self._state.favorite_universe(await self.id, False)
return True
@property
async def favorites(self) -> int:
data = await self._state.universe_favorites(await self.id)
return data.get("favoritesCount")
```
#### File: roblox.py/roblox/group.py
```python
from __future__ import annotations
import logging
from functools import wraps
import maya
from CaseInsensitiveDict import CaseInsensitiveDict
from async_property import async_property
from roblox.abc import Group as _Group
from roblox.abc import GroupMember as _GroupMember
from roblox.abc import Role as _Role
from roblox.abc import Shout as _Shout
from roblox.errors import *
from roblox.http import Session
from roblox.iterables import AsyncIterator
from roblox.user import User, BaseUser
from roblox.util import urlify
from typing import Union
log = logging.getLogger(__name__)
# util decorator
def g_info(name, nocache=False):
"""This decorator will check if the property is in the group"s _data, and if it isn"t send a request to the
groups API endpoint"""
def decorator(fn):
@wraps(fn)
async def new_fn(self):
if nocache or self._data[name] is None:
await self._get_group_details()
return self._data[name]
return new_fn
return decorator
class Group(_Group):
__slots__ = ("_data", "_state")
def __init__(self, *, state: Session, data):
self._state = state
self._data = CaseInsensitiveDict({
"id": None,
"name": None,
"description": None,
"owner": None,
"shout": None,
"membercount": None,
"isbuildersclubonly": None,
"publicentryallowed": None,
"islocked": None
})
self._update(data)
def __repr__(self):
return "Group({!r})".format(self._data["name"] or self._data["id"])
def __hash__(self):
return self._data["id"] or -2
def __eq__(self, other):
if not isinstance(other, Group):
return False
return self._data["id"] == other._data["id"]
def _update(self, data):
self._data.update(data)
async def _get_group_details(self):
data = await self._state.get_group_details(await self.id)
self._update(data)
@async_property
@g_info("id")
async def id(self):
"""|asyncprop|
The group's ID.
:rtype: int
"""
pass
@async_property
@g_info("name")
async def name(self):
"""|asyncprop|
The group's name.
:rtype: str
"""
pass
@async_property
@g_info("description")
async def description(self):
"""|asyncprop|
The group's description.
:rtype: str
"""
pass
@async_property
@g_info("publicentryallowed", nocache=True)
async def is_public(self):
"""|asyncprop|
Whether new members must be approved.
:rtype: bool
"""
pass
@async_property
async def owner(self):
"""|asyncprop|
The group's owner.
:rtype: :class:`.User`
"""
await self._get_group_details()
if self._data["owner"] is None:
return None
data = {
"user": self._data["owner"],
"role": (await self.roles)[-1]._data
}
return GroupMember(state=self._state, data=data, group=self)
@async_property
async def created_at(self):
if self._data["shout"] is None:
await self._get_group_details()
raise NotImplemented
updated_at = created_at
@async_property
async def url(self):
"""|asyncprop|
The group's URL.
:rtype: str
"""
if self._data["id"] is None or self._data["name"] is None:
await self._get_group_details()
return "https://roblox.com/groups/{}/{}#!/about".format(self._data["id"], urlify(self._data["name"]))
@async_property
async def shout(self):
"""|asyncprop|
The group's current shout.
:rtype: :class:`.Shout`
"""
await self._get_group_details()
try:
return Shout(state=self._state, data=self._data.get("shout"), group=self)
except TypeError:
return None
@async_property
async def roles(self, reverse=False):
"""|asyncprop|
List of the group's roles.
:rtype: List[:class:`.Role`]
"""
data = await self._state.get_group_roles(await self.id)
roles = []
for role in data["roles"]:
roles.append(
Role(state=self._state, data=role, group=self)
)
roles.sort(key=lambda r: r._data["rank"], reverse=reverse)
return roles
async def get_role(self, role: Union[str, int]):
"""
Attempts to find a role within a group given a name or ID.
Args:
role: Role's name or ID.
:rtype: Optional[:class:`.Role`]
"""
if isinstance(role, str) or isinstance(role, int):
role = str(role).lower()
for obj in await self.roles:
if obj == role or str(await obj.id) == role or (await obj.name).lower() == role:
return obj
@property
def members(self) -> _MembersIterator:
"""
:class:`.AsyncIterator` for this group's members.
Yields:
:class:`.GroupMember`
"""
return _MembersIterator(state=self._state, opts={"group": self})
async def get_member(self, user):
"""
Tries to find a group member given a username, ID, or :class:`.User`.
Args:
user: User to try and find within the group.
:rtype: :class:`.GroupMember`
"""
user_data = {}
if isinstance(user, BaseUser):
user_id = await user.id
user_data = user._data
elif isinstance(user, str):
user = await self._state.client.get_user(username=user)
user_id = await user.id
user_data = user._data
else:
user_id = int(user)
user_data["id"] = user_id
all_roles = await self._state.get_user_roles(user_id)
match = None
for data in all_roles["data"]:
if data["group"]["id"] == await self.id:
match = data
break
if match is None:
raise UserNotInGroup
data = {
"user": user_data,
"role": match["role"]
}
return GroupMember(state=self._state, data=data, group=self)
async def upload_asset(self, file, name, asset_type):
if isinstance(file, str):
file = open(file, "rb")
r = await self._state.upload_asset(file, name, int(asset_type), group_id=await self.id)
file.close()
return r
class _MembersIterator(AsyncIterator):
async def __aiter__(self):
async for data in self._state.get_group_members(await self._opts["group"].id):
yield GroupMember(state=self._state, data=data, group=self)
async def count(self):
await self._opts["group"]._get_group_details()
return self._opts["group"]._data.get("membercount")
class Shout(_Shout):
__slots__ = ("_data", "_state", "group")
def __init__(self, *, state, data, group: Group):
self._state = state
self._data = CaseInsensitiveDict({
"body": None,
"poster": None,
"created": None,
"updated": None
})
self.group = group
self._update(data)
def __repr__(self):
return "Shout({!r}, {!r}, {!r})".format(self.group,
self._data["poster"]["username"],
self._data["body"])
def _update(self, data):
self._data.update(data)
@property
def body(self):
"""
Shout's body.
:type: str
"""
return self._data["body"]
@property
def created_at(self):
"""
Date/time when the shout was last updated.
:type: :class:`datetime.datetime`
"""
try:
return maya.parse(self._data["updated"]).datetime()
except OSError:
return None
@async_property
async def poster(self) -> Union[GroupMember, User]:
"""|asyncprop|
User who posted the shout.
Return:
:class:`.GroupMember` if poster is still in the group, :class:`.User` otherwise.
:rtype: Union[:class:`.GroupMember`, :class:`.User`]
"""
poster_id = self._data["poster"]["userid"]
poster_user = self._data["poster"]["username"]
user = await self._state.client.get_user(id=poster_id, username=poster_user)
try: # try to get a GroupMember instead of User
return await self.group.get_member(user)
except UserNotInGroup:
return user
class GroupMember(User, _GroupMember):
"""
Represents a group member.
This class inherits all functionality from :class:`.User`.
**Operations**
**x == y**
Checks that two users are equal.
**x != y**
Checks that two users are not equal.
**x > y**
Checks that member X has a greater rank than member Y.
**x >= y**
Checks that member X's rank is greater than or equal to member Y.
**x < y**
Checks that member X has a lesser rank than member Y.
**x <= y**
Checks that member X's rank is less than or equal to member Y.
Attributes:
group (:class:`.Group`): Group the member belongs to.
"""
__slots__ = ("_state", "_data", "group")
def __init__(self, *, state, data, group):
super().__init__(state=state, data=data.get("user", data.get("User")))
self._data.update({
"role": None
})
self._data.update(data)
self.group = group
def __repr__(self):
return "GroupMember({!r}, group={!r}, rank={!r})".format(self._data["username"] or self._data["id"],
self.group._data["name"] or self.group._data["id"],
(self._data["role"] or {}).get("rank"))
def _comp(self, other):
if self._data["role"] is None:
my_rank = 0
else:
my_rank = self._data["role"].get("rank", 0)
if isinstance(other, GroupMember):
if other._data["role"] is None:
other_rank = 0
else:
other_rank = other._data["role"].get("rank", 0)
elif isinstance(other, Role):
other_rank = other._data.get("rank", 0)
else:
raise UserError("Can't compare GroupMember with {}".format(other.__class__.__name__))
if my_rank > other_rank:
return 1
elif my_rank == other_rank:
return 0
else:
return -1
def __gt__(self, other):
c = self._comp(other)
return c == 1
def __ge__(self, other):
c = self._comp(other)
return c > 0
def __lt__(self, other):
c = self._comp(other)
return c == -1
def __le__(self, other):
c = self._comp(other)
return c < 0
@property
def role(self):
"""
Member's role within the group.
:type: :class:`.Role`
"""
if self._data["role"] is None:
raise RoleNotFound
return Role(state=self._state, data=self._data["role"], group=self.group)
@async_property
async def rank(self):
"""|asyncprop|
Member's rank within the group. Shortcut for ``await member.role.rank`
:rtype: int
"""
return await self.role.rank
class Role(_Role):
"""
Represents a roleset within a group.
**Operations**
**x == y**
Checks that two roles are equal.
**x != y**
Checks that two roles are not equal.
**x > y**
Checks that role X has a greater rank than role Y.
**x >= y**
Checks that role X's rank is greater than or equal to role Y.
**x < y**
Checks that role X has a lesser rank than role Y.
**x <= y**
Checks that role X's rank is less than or equal to role Y.
"""
__slots__ = ("_state", "_data", "group")
def __init__(self, *, state: Session, data, group):
self._state = state
self._data = CaseInsensitiveDict({
"id": None,
"name": None,
"description": None,
"rank": None,
"membercount": None,
"permissions": None,
})
self._update(data)
self.group = group
def __repr__(self):
return "Role({!r}, {!r}, rank={!r})".format(self.group, self._data["name"], self._data["rank"])
def __hash__(self):
return hash(self._data["id"] << 22 | self.group._data["id"])
def __eq__(self, other):
if not isinstance(other, Role):
return False
if self.group != other.group:
return False
return self._data["id"] == other._data["id"] and self._data["id"] is not None
def _comp(self, other):
if isinstance(other, Role):
other = other._data["rank"]
elif isinstance(other, GroupMember):
if other._data["role"] is None:
return False
other = other._data["role"]["rank"]
if self._data["rank"] > other:
return 1
elif self._data["rank"] < other:
return -1
else:
return 0
def __gt__(self, other):
c = self._comp(other)
return c == 1
def __ge__(self, other):
c = self._comp(other)
return c >= 0
def __lt__(self, other):
c = self._comp(other)
return c == -1
def __le__(self, other):
c = self._comp(other)
return c <= 0
def _update(self, data):
self._data.update(data)
async def _get_role_details(self):
data = await self._state.get_role_details(await self.id)
data = data["data"][0]
self._update(data)
@async_property
async def id(self):
"""|asyncprop|
The role's ID.
:rtype: int
"""
return self._data["id"]
@async_property
async def name(self):
"""|asyncprop|
The role's name.
:rtype: str
"""
if self._data["name"] is None:
await self._get_role_details()
return self._data["name"]
@async_property
async def description(self):
"""|asyncprop|
The role's description.
:rtype: str
"""
if self._data["description"] is None:
await self._get_role_details()
return self._data["description"]
@async_property
async def rank(self):
"""|asyncprop|
The role's integer rank.
:rtype: int
"""
if self._data["rank"] is None:
await self._get_role_details()
return self._data["rank"]
@async_property
async def member_count(self):
"""|asyncprop|
Number of members belonging to this role.
:rtype: int
"""
await self._get_role_details()
return self._data["membercount"]
```
#### File: roblox.py/roblox/util.py
```python
import re
def urlify(s):
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespace with a single dash
s = re.sub(r"\s+", '-', s)
return s
``` |
{
"source": "JPatryk13/Portfolio",
"score": 3
} |
#### File: projects/tests/test_models.py
```python
from django.test import TestCase
from projects.models import Project
class ProjectModelTest(TestCase):
@classmethod
def setUpTestData(cls):
Project.objects.create(title="Title of the test project.")
def test_label_title(self):
project = Project.objects.get(id=1)
field_label = project._meta.get_field('title').verbose_name
self.assertEquals(field_label, 'Project title.')
def test_label_prev_description(self):
project = Project.objects.get(id=1)
field_label = project._meta.get_field('prev_description').verbose_name
self.assertEquals(field_label, 'Short description.')
def test_label_description(self):
project = Project.objects.get(id=1)
field_label = project._meta.get_field('description').verbose_name
self.assertEquals(field_label, 'Description.')
def test_max_length_title(self):
project = Project.objects.get(id=1)
max_length = project._meta.get_field('title').max_length
self.assertEquals(max_length, 100)
def test_max_length_prev_description(self):
project = Project.objects.get(id=1)
max_length = project._meta.get_field('prev_description').max_length
self.assertEquals(max_length, 300)
def test_max_length_description(self):
project = Project.objects.get(id=1)
max_length = project._meta.get_field('description').max_length
self.assertEquals(max_length, 2500)
def test_max_length_phase(self):
project = Project.objects.get(id=1)
max_length = project._meta.get_field('phase').max_length
self.assertEquals(max_length, 1)
def test_object_name(self):
project = Project.objects.get(id=1)
expected_name = project.title
self.assertEquals(expected_name, str(project))
def test_get_absolute_url(self):
project = Project.objects.get(id=1)
self.assertEquals(project.get_absolute_url(), '/project/1')
```
#### File: Portfolio/projects/views_old.py
```python
from django.shortcuts import render, redirect
from django.views import generic
from django.template.loader import get_template
from django.core.mail import EmailMessage
from .models import Project
from .forms import ContactForm
def index(request):
return render(request, 'index.html')
def about(request):
return render(request, 'about.html')
def contact(request):
# Get contact form (name, email, message)
form_class = ContactForm
if request.method == 'POST': # If the submit button was pressed...
form = form_class(data=request.POST) # ...extract the data from the request into 'form'
#
# HttpRequest.POST
# A dictionary-like object containing all given HTTP POST parameters,
# providing that the request contains form data.
#
# dict.get(key, value)
# The get() method returns the value for the specified key if key is
# in dictionary.
#
# email = EmailMessage(
# subject= 'Hello',
# body= 'Body goes here',
# from_email= '<EMAIL>',
# to= ['<EMAIL>', '<EMAIL>'],
# bcc= ['<EMAIL>'],
# reply_to= ['<EMAIL>'],
# headers= {'Message-ID': 'foo'},
# )
#
if form.is_valid(): # If the content of the form (extracted data) is valid...
name = request.POST.get('name', '') # ...extract the name
email = request.POST.get('email', '') # ...extract the email
message = request.POST.get('message', '') # ...extract the message
# Email the profile with the contact information
template = get_template('contact_template.txt') # Grab template for message output
context = {
'name': name,
'email': email,
'message': message,
} # Organise user input into context dictionary
content = template.render(context) # Feed context into contact_template
email_message = EmailMessage(
subject='New message.',
body=content,
from_email=email,
to=['<EMAIL>'],
headers={'Reply-To': email}
) # Build a message
email_message.send() # And send it
return redirect('contact') # Get the user back to the contact page
# Return (render) contact page with form_class as form (context)
return render(request, 'contact.html', {'form': form_class})
class ProjectListView(generic.ListView):
model = Project
class ProjectDetailView(generic.DetailView):
model = Project
``` |
{
"source": "jpatton-USGS/earthquake-processing-formats",
"score": 3
} |
#### File: python/test/testSite.py
```python
import processingformats.site
# stdlib imports
import unittest
class TestSite(unittest.TestCase):
STATION = 'BOZ'
CHANNEL = 'BHZ'
NETWORK = 'US'
LOCATION = '00'
LATITUDE = 45.596970
LONGITUDE = -111.629670
ELEVATION = 1589.000000
JSONSTRING = '{"Station": "BOZ", "Network": "US", "Latitude": 45.59697, "Longitude": -111.62967, "Elevation": 1589.0, "Channel": "BHZ", "Location": "00"}'
DICT = {'Station': 'BOZ', 'Channel': 'BHZ', 'Network': 'US', 'Location': '00', 'Latitude': 45.596970, 'Longitude': -111.629670, 'Elevation': 1589.000000}
def test_init(self):
site = processingformats.site.Site()
self.assertFalse(hasattr(site, 'station'))
self.assertFalse(hasattr(site, 'channel'))
self.assertFalse(hasattr(site, 'network'))
self.assertFalse(hasattr(site, 'location'))
self.assertFalse(hasattr(site, 'latitude'))
self.assertFalse(hasattr(site, 'longitude'))
self.assertFalse(hasattr(site, 'elevation'))
site = processingformats.site.Site(self.STATION, self.CHANNEL, self.NETWORK, self.LOCATION, self.LATITUDE, self.LONGITUDE, self.ELEVATION)
self.assertTrue(hasattr(site, 'station'))
self.assertTrue(hasattr(site, 'channel'))
self.assertTrue(hasattr(site, 'network'))
self.assertTrue(hasattr(site, 'location'))
self.assertTrue(hasattr(site, 'latitude'))
self.assertTrue(hasattr(site, 'longitude'))
self.assertTrue(hasattr(site, 'elevation'))
self.assertEqual(site.station, self.STATION)
self.assertEqual(site.channel, self.CHANNEL)
self.assertEqual(site.network, self.NETWORK)
self.assertEqual(site.location, self.LOCATION)
self.assertEqual(site.latitude, self.LATITUDE)
self.assertEqual(site.longitude, self.LONGITUDE)
self.assertEqual(site.elevation, self.ELEVATION)
def test_toJSON(self):
site = processingformats.site.Site(self.STATION, self.CHANNEL, self.NETWORK, self.LOCATION, self.LATITUDE, self.LONGITUDE, self.ELEVATION)
self.assertEqual(site.toJSONString(), self.JSONSTRING)
def test_fromJSON(self):
site = processingformats.site.Site()
site.fromJSONString(self.JSONSTRING)
self.assertEqual(site.station, self.STATION)
self.assertEqual(site.channel, self.CHANNEL)
self.assertEqual(site.network, self.NETWORK)
self.assertEqual(site.location, self.LOCATION)
self.assertEqual(site.latitude, self.LATITUDE)
self.assertEqual(site.longitude, self.LONGITUDE)
self.assertEqual(site.elevation, self.ELEVATION)
def test_toDict(self):
site = processingformats.site.Site(self.STATION, self.CHANNEL, self.NETWORK, self.LOCATION, self.LATITUDE, self.LONGITUDE, self.ELEVATION)
self.assertEqual(site.toDict(), self.DICT)
def test_fromDict(self):
site = processingformats.site.Site()
site.fromDict(self.DICT)
self.assertEqual(site.station, self.STATION)
self.assertEqual(site.channel, self.CHANNEL)
self.assertEqual(site.network, self.NETWORK)
self.assertEqual(site.location, self.LOCATION)
self.assertEqual(site.latitude, self.LATITUDE)
self.assertEqual(site.longitude, self.LONGITUDE)
self.assertEqual(site.elevation, self.ELEVATION)
def test_isValid(self):
site = processingformats.site.Site(self.STATION, self.CHANNEL, self.NETWORK, self.LOCATION, self.LATITUDE, self.LONGITUDE, self.ELEVATION)
self.assertTrue(site.isValid())
badSite = processingformats.site.Site()
self.assertFalse(badSite.isValid())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpaul121/amzn-review-sentiment",
"score": 3
} |
#### File: jpaul121/amzn-review-sentiment/model.py
```python
import string
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
import settings as S
from torch.utils.data import Dataset
class ReviewDataset(Dataset):
def __init__(self, review_df, vectorizer):
self.review_df = review_df
self.vectorizer_ = vectorizer
self.train_df = self.review_df[self.review_df["split"] == "train"]
self.train_size = len(self.train_df)
self.val_df = self.review_df[self.review_df["split"] == "val"]
self.val_size = len(self.val_df)
self.test_df = self.review_df[self.review_df["split"] == "test"]
self.test_size = len(self.test_df)
self.lookup_dict_ = {
"train": (self.train_df, self.train_size),
"val": (self.val_df, self.val_size),
"test": (self.test_df, self.test_size)
}
self.set_split("train")
def set_split(self, split="train"):
self.target_split_ = split
self.target_df_, self.target_size_ = self.lookup_dict_[split]
def __len__(self):
return self.target_size_
def __getitem__(self, index):
row = self.target_df_.iloc[index]
review_vector = \
self.vectorizer_.vectorize(row["review_text"])
rating_index = \
self.vectorizer_.rating_vocab.lookup_token(row["binary_score"])
return {
"x_data": review_vector,
"y_target": rating_index
}
def get_n_batches(self, batch_size):
return len(self) // batch_size
class Vocabulary(object):
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
if token_to_idx is None:
token_to_idx = {}
self.token_to_idx_ = token_to_idx
self.idx_to_token_ = {
idx: token
for token, idx in self.token_to_idx_.items()
}
self.add_unk_ = add_unk
self.unk_token_ = unk_token
self.unk_index = -1
if self.add_unk_:
self.unk_index_ = self.add_token(unk_token)
def add_token(self, token):
if token in self.token_to_idx_:
index = self.token_to_idx_[token]
else:
index = len(self.token_to_idx_)
self.token_to_idx_[token] = index
self.idx_to_token_[index] = token
return index
def lookup_token(self, token):
if self.add_unk_:
return self.token_to_idx_.get(token, self.unk_index)
else:
return self.token_to_idx_[token]
def lookup_index(self, index):
if index not in self.idx_to_token_:
raise KeyError(f"index ({index}) is not in the Vocabulary")
return self.idx_to_token_[index]
def __str__(self):
return f"<Vocabulary(size={len(self)})>"
def __len__(self):
return len(self.token_to_idx_)
class Classifier(nn.Module):
def __init__(self, num_features, hidden_dim=S.HIDDEN_DIM):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(in_features=num_features, out_features=hidden_dim)
self.fc2 = nn.Linear(in_features=hidden_dim, out_features=1)
def forward(self, x_in, dropout=False, dropout_p=S.DROPOUT_P, apply_sigmoid=False):
intermediate = F.relu(self.fc1(x_in))
if dropout == True:
y_out = self.fc2(F.dropout(intermediate, p=dropout_p)).squeeze()
else:
y_out = self.fc2(intermediate).squeeze()
if apply_sigmoid:
y_out = F.sigmoid(y_out)
return y_out
``` |
{
"source": "jpaul121/Banter",
"score": 2
} |
#### File: Banter/authentication/serializers.py
```python
from django.conf import settings
from django.contrib.auth.models import User
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework import serializers
class AppUserSerializer(serializers.ModelSerializer):
password = serializers.CharField(min_length=8, write_only=True)
class Meta:
model = User
extra_kwargs = { 'password': { 'write_only': True } }
fields = [ 'username', 'password' ]
def create(self, validated_data):
password = validated_data['password']
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
class AppTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
# Just in case I need to modify user tokens later on
return token
``` |
{
"source": "jpaulhart/BC-Covid-19",
"score": 3
} |
#### File: src/pages/recents.py
```python
import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import streamlit as st
import awesome_streamlit as ast
import constants as cn
# pylint: disable=line-too-long
def write():
"""Used to write the page in the streamlit_app.py file"""
st.title("Countries Covid Cases")
cn.DATE_SPANS()
st.markdown('#### ')
country_lists = [
['Italy'],
['Spain'],
['Portugal'],
['France'],
['Canada'],
['US'],
['Oman','Jordan','Morocco','Tunisia', 'Algeria'],
['Thailand','Cambodia','Vietnam'],
['Argentina','Chile','Uruguay'],
]
# Detail trend report for select countries
for country_list in country_lists:
print(f"Countries: {len(country_list)}, Country List: {country_list}")
if len(country_list) != 1:
return
country_display = ', '.join(country_list)
st.markdown(cn.HORIZONTAL_RULE, unsafe_allow_html=True)
#st.markdown(f'**Country:** {country_display}')
st.markdown('#### ')
fig2 = plt.figure(1, figsize=(8, 5))
plt.xlabel="Date"
plt.ylabel="Number"
#plt.xticks(rotation=45)
ax = plt.gca()
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
for cty in country_list:
plt.title(f'{cty} New Confirmed Cases - Last 20 Days', fontsize='large')
file_name = cty + '.csv'
file_url = f'{cn.CASES_BASE_URL}{file_name.replace(" ", "%20")}'
df = pd.read_csv(file_url)
df = df.tail(20)
plt.plot(df['Date'], df['ConfirmedNewMean'], label=df['Country_Region'], linewidth=2, color = 'darkslategrey')
plt.bar(df['Date'], df['ConfirmedNew'], label=df['Country_Region'], color = 'lightseagreen')
# Add a legend
plt.legend(country_list)
plt.grid(b=True, which='major')
st.pyplot(fig2)
plt.close()
``` |
{
"source": "JPaulMora/grin-pool",
"score": 3
} |
#### File: grin-py/utils/MWGP_earningsEstimate.py
```python
import os
import sys
import argparse
from datetime import datetime, timedelta
try:
import requests
except Exception as e:
print("Error: This script requires the 'requests' module, please run `pip3 install requests`")
Graph = True
try:
import plotly
import plotly.graph_objs as go
except Exception as e:
Graph = False
mwURL = "https://api.mwgrinpool.com"
NanoGrin = 1.0/1000000000.0
SecondsInDay = float(60*60*24)
PPLNGSeconds = float(60*60*4)
def print_header():
print(" ")
print("############# MWGrinPool Average Daily Earnings #############")
print("## ")
if Graph == False:
print(" WARNING: ")
print(" This script requires the 'plotly' module to produce a graph")
print(" Please run: `pip3 install plotly`")
print(" (running in text mode)")
print(" ")
def print_footer(rewardTotal, c29gps, c31gps, numDays, startTS, endTS):
print(" ")
print(" ")
print(" Report for {} days - from: {} to: {}".format(numDays, startTS.strftime("%m-%d-%y %H:%M"), endTS.strftime("%m-%d-%y %H:%M")))
print(" Mining C29 at {}gps, C31 at {}gps".format(c29gps, c31gps))
print(" ")
print(" Total Rewards: {} Grin".format(rewardTotal))
print(" Avg Daily Reward = {} Grin".format(rewardTotal/NumDays))
print(" ")
def epoch_to_dt(epoch):
return datetime.fromtimestamp(epoch)
parser = argparse.ArgumentParser()
parser.add_argument("--days", help="Number of days to average over")
parser.add_argument("--c29gps", help="Miners C29 Graphs/second")
parser.add_argument("--c31gps", help="Miners C31 Graphs/second")
parser.add_argument("--debug", help="Print lots of debug info")
args = parser.parse_args()
print_header()
if args.days is None:
NumDays = float(input(" Number of days to average over: "))
else:
NumDays = float(args.days)
if NumDays > 62:
print(" ")
print(" -- Error: Please limit your query to 60 days to prevent excess load on our pool API")
print(" ")
sys.exit(1)
if args.c29gps is None:
C29Gps = float(input(" Miners C29 Graphs/second: "))
else:
C29Gps = float(args.c29gps)
if args.c31gps is None:
C31Gps = float(input(" Miners C31 Graphs/second: "))
else:
C31Gps = float(args.c31gps)
if args.debug is None:
debug = False
EndTS = datetime.now()
startTS = EndTS - timedelta(days=NumDays)
# Get a list of the pool-found-blocks within the range
poolblocksURL = mwURL + "/pool/blocks/0,1440/timestamp,height"
poolblocksJSON = requests.get(url = poolblocksURL).json()
poolblocks = [block['height'] for block in poolblocksJSON if(block['timestamp'] >= startTS.timestamp() and block['timestamp'] <= EndTS.timestamp())]
poolblocks.sort()
debug and print("Pool Blocks found in range: {}".format(poolblocks))
print(" ")
print(" Getting Mining Data: ")
rewardTotal = 0
x = [startTS]
y = [0]
debug and print("Start Time: {} - {}".format(startTS, startTS.timestamp()))
debug and print("End Time: {} - {}".format(EndTS, EndTS.timestamp()))
debug or sys.stdout.write(" ")
sys.stdout.flush()
for blockHeight in poolblocks:
# For each pool block, get some information:
# Secondary Scale Value
# Any TX fees included in the block reward
grinBlockURL = mwURL + "/grin/block/{}/timestamp,height,secondary_scaling,fee".format(blockHeight)
grinblockJSON = requests.get(url = grinBlockURL).json()
# Pool GPS at that block height
poolGpsURL = mwURL + "/pool/stat/{}/gps".format(blockHeight)
poolGpsJSON = requests.get(url = poolGpsURL).json()
# Calculate theoretical miners reward
scale = (2**(1+31-24)*31)/float(max(29, grinblockJSON['secondary_scaling']))
minerValue = C29Gps + C31Gps*scale
poolValue = 0
for gps in poolGpsJSON['gps']:
if gps['edge_bits'] == 29:
poolValue += gps['gps']
else:
poolValue += gps['gps']*scale
debug and print("Miner value: {}, pool value: {}".format(minerValue, poolValue))
fullMinersReward = (minerValue/poolValue)*(60+grinblockJSON['fee']*NanoGrin)
tsNow = datetime.fromtimestamp(grinblockJSON['timestamp'])
timedelta = tsNow - startTS
# Check if we get the full reward or not
if(timedelta.total_seconds() < PPLNGSeconds):
minersReward = fullMinersReward * (timedelta.total_seconds()/PPLNGSeconds)
else:
minersReward = fullMinersReward
debug and print(" + Miners reward for {} block {}: {}".format(datetime.fromtimestamp(grinblockJSON['timestamp']).strftime('%c'), blockHeight, minersReward))
rewardTotal += minersReward
# Graph
x.append(tsNow)
timedelta = tsNow - startTS
debug and print("timedelta = {}".format(timedelta))
daysSinceStartTS = float(timedelta.total_seconds())/float(SecondsInDay)
debug and print("daysSinceStartTS = {}".format(daysSinceStartTS))
y.append(rewardTotal/daysSinceStartTS)
debug and print(" ")
debug or sys.stdout.write(".")
sys.stdout.flush()
x.append(EndTS)
y.append(rewardTotal/NumDays)
print_footer(rewardTotal, C29Gps, C31Gps, NumDays, startTS, EndTS)
if Graph == True:
print("Generating graph...")
graphName = "Avg Daily Reward: {} Grin".format(round(rewardTotal/NumDays, 2))
graphData = [go.Scatter(x=x, y=y, name=graphName)]
graphLayout = go.Layout(
title=go.layout.Title(text=graphName),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='Time',
font=dict(
family='Courier New, monospace',
size=18,
color='#008000'
)
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='Grin',
font=dict(
family='Courier New, monospace',
size=18,
color='#008000'
)
)
),
)
graphFigure = go.Figure(data=graphData, layout=graphLayout)
graph_name = "estimate-{}days.html".format(NumDays)
plotly.offline.plot(graphFigure, filename=graph_name)
``` |
{
"source": "jpaulofb/cfc_search_engine_tri",
"score": 3
} |
#### File: jpaulofb/cfc_search_engine_tri/main.py
```python
from __future__ import division
from SearchEngine import SearchEngine
from time import time as getTime
#from time import clock as getTime
from util import Query
import Evaluator
import argparse
import sys
CREATE_INDEX_CMD = "createindex"
INTERACTIVE_QUERY_CMD = "iquery"
PROCESS_QUERY_FILE_CMD = "queryfile"
RANKING_SIZE = 20
INDEX_PATH = "cfcIndex.txt"
def createParser():
description = """
Search engine implementation for the CFC collection. For the
'Tópicos em Recuperação de Informação' course at UFAM 2015/2"""
parser = argparse.ArgumentParser(description=description)
functionHelp = """
function can be either:
<{}> for creating the index;
<{}> for an interactive query mode;
<{}> for parsing a cfc query file.
""".format(CREATE_INDEX_CMD, INTERACTIVE_QUERY_CMD,
PROCESS_QUERY_FILE_CMD)
rsHelp = """
optional argument for specifying the amont of documents that
should be returned by a query, defaults to {}
""".format(RANKING_SIZE)
inHelp = """
argument for passing input path to the program, needed by the
{}, and {} functionalities.
""".format(CREATE_INDEX_CMD, PROCESS_QUERY_FILE_CMD)
parser.add_argument("function", help=functionHelp)
parser.add_argument("-in", "--input", help=inHelp, dest="path")
parser.add_argument("-rs", "--rankingsize", help=rsHelp,
type=int, default=RANKING_SIZE, dest="rSize")
return parser
def loadIndexWrapper(eng):
try:
start = getTime()
eng.loadIndex(INDEX_PATH)
print("It took {:.5f} s to load the index."
.format(getTime() - start))
except IOError:
print("Could not read the index at path: {}".format(INDEX_PATH))
print("Please create the index first with argument '{}'."
.format(CREATE_INDEX_CMD))
sys.exit(-1)
return eng
def menuCreateIndex(eng, cfcFolder):
if not cfcFolder:
print("Please enter the path to the cfc collection files using the -in argument")
sys.exit(-1)
try:
eng.createIndex(cfcFolder)
except IOError as e:
print("There was an error while parsing the files in the folder: {}"
.format(collectionFolder))
print("Please make sure there are cfc files in the folder path.")
print(e.message)
try:
eng.saveIndex(INDEX_PATH)
except IOError as e:
print("Could not save the index file at path: {}".format(INDEX_PATH))
print(e.message)
def menuInteractiveQuery(eng, rankingSize):
eng = loadIndexWrapper(eng)
qId = 1
while True:
try:
queryString = raw_input(">> ")
except EOFError:
print('')
break
except KeyboardInterrupt:
print('')
break
query = Query(qId, queryString, [])
start = getTime()
results, evalResults = eng.processQuery(query, rankingSize)
print("It took {} s to process the query."
.format(getTime() - start))
for result in results:
similarity, doc = result
print("similarity: {}. id: {}"
.format(similarity, doc.id))
print("\ttitle: {}".format(doc.title))
print("\tauthors: {}, year: {}\n"
.format(doc.authors, doc.year))
def menuQueryFile(eng, queryFile, rankingSize):
eng = loadIndexWrapper(eng)
if not queryFile:
print("Please enter the path to the cfc query file using the -in argument")
sys.exit(-1)
print("ranking size: {}".format(rankingSize))
MAPs = []
recallPointsLst = []
pAtTens = []
times = []
try:
print("query id ; P@10 ; interpolated MAP ; time (s)")
for query in eng.parser.parseQueryFile(queryFile):
start = getTime()
results, evalResults = eng.processQuery(query, rankingSize,
evaluate=True)
end = getTime() - start
MAPs.append(evalResults["MAP"])
recallPointsLst.append(evalResults["recallPoints"])
pAtTens.append(evalResults["P@10"])
times.append(end)
print("{:03d} ; {:.5f} ; {:.5f} ; {:.5f} "
.format(query.id, pAtTens[-1], MAPs[-1], times[-1]))
except IOError as e:
print("Could not open the cfc query file at: {}.".format(queryFile))
print(e.message)
sys.exit(-1)
avgRecallPoints = Evaluator.getAverageRecallPoints(recallPointsLst)
avgMAP = sum(MAPs) / len(MAPs)
avgPAtTen = sum(pAtTens) / len(pAtTens)
avgTime = sum(times) / len(times)
print("\nAverages:")
print("\tP@10: {:.5f}".format(avgPAtTen))
print("\tinterpolated MAP: {:.5f}".format(avgMAP))
print("\ttime: {:.5f} s".format(avgTime))
print("\tinterpolated recall points (precision, recall):")
for pair in avgRecallPoints:
p, r = pair
print("\t({:.5f}, {:.5f}),".format (p, r))
if __name__ == '__main__':
parser = createParser()
args = parser.parse_args()
eng = SearchEngine()
if args.function == CREATE_INDEX_CMD:
collectionFolder = args.path
start = getTime()
menuCreateIndex(eng, collectionFolder)
print("It took {} s to create and save the index."
.format(getTime() - start))
elif args.function == INTERACTIVE_QUERY_CMD:
rankingSize = args.rSize
menuInteractiveQuery(eng, rankingSize)
elif args.function == PROCESS_QUERY_FILE_CMD:
queryFile = args.path
rankingSize = args.rSize
start = getTime()
menuQueryFile(eng, queryFile, rankingSize)
print("It took {} s to load the index and process all the queries."
.format(getTime() - start))
else:
parser.print_help()
#parser.print_usage()
```
#### File: jpaulofb/cfc_search_engine_tri/Parser.py
```python
from collections import Counter
from util import Document
from util import Query
import re
class Parser:
def __init__(self, stopWordsPath="sw.txt"):
self.stopWords = self.readStopWords(stopWordsPath)
self.cfcCollectionAttrs = [
"PN", # paper number
"RN", # doc id in the collection
"AN", # super collection id i guess
"AU", # authors
"TI", # title
"SO", # source
"MJ", # major subjects
"MN", # minor subjects
"AB", # abstract when present, or excerpt otherwise
"EX", # abstract when present, or excerpt otherwise
"RF", # list of references used in the doc
"CT", # citation list to the doc
]
self.cfcQueryAttrs = [
"QN", # query number
"QU", # proper query
"NR", # number of relevant docs
"RD", # relevant documents
]
def initializeLastItem(self, attrList, lastItem) :
"""
Helper method, to reinitialize a dictionary containing the data from a
CFC collection document or query. Used by the file parsers
param doc: a dict.
return: the reinitialized dict.
"""
for attr in attrList:
lastItem[attr] = ''
lastItem["lastAttr"] = ''
return lastItem
def parseCFCFile(self, path, regex, lastItemAttrs, treatLastItemFunction):
"""
CFC Collection specific file parser. It's a internal generic file
parser, users should use the parseFile or parseQueryFile methods
instead of this one.
If it fails to open the file, does not attempt to treat the exception.
param path: string containig the path to the file to parse.
param regex: a string containing a regex to separate attributes and
content. The regex must contain a named attribute called "attr" and
another called "content".
param lastItemAttrs: a list containing the attributes of the items
present in the file for usel of self.initializeLastItem method.
param treatLastItemFunction: a function to be called when we finish
parsing an item from the path. The function should receive a dict
containing the data of the file, and return a result to be yielded by
this method.
yield: results of treatLastItemFunction for each item in the file on
the param path.
"""
fin = open(path)
# helper funcion to reset the dict used for parsing the fin. Last doc
# holds the temporary data of the current document being parsed
lastItem = self.initializeLastItem(lastItemAttrs, {})
for line in fin:
line = line.strip()
# if there's content in the line we haven't finished parsing a doc
if line and fin:
# add the content of the line to the correct attr in the
# lastItem dict
lastItem = self.parseLine(line, lastItem, regex)
# else we finished reading a doc
else:
if self.isEmptyItem(lastItem): continue
result = treatLastItemFunction(lastItem)
lastItem = self.initializeLastItem(lastItemAttrs, lastItem)
yield result
fin.close()
def parseFile(self, path):
"""
Wrapper method for the self.parseCFCFile method, for parsing the proper
file containng the documents from the CFC collection.
Does not treat the exception that may be raised when opening the file
in the path.
param path: string containing the path to the file.
yield: each query found in the file, the returned objects are tuples of
the kind (util.Document, collections.Counter). The counter is a dict
with word keys and frequency values.
"""
print("Processing file: {}".format(path))
# regex for separating the attributes of the document from content
regex = r"^((?P<attr>(PN|RN|AN|AU|TI|SO|MJ|MN|AB|EX|RF|CT))\s+)?(?P<content>(.*\w+.*)*)"
# attrs present in the cfc collection documents
attrs = self.cfcCollectionAttrs
# helper function to deal with the parsed data. Transforms the data
# parsed in a tpuple of util.Document object and a Counter with the
# frequency of the words in the document
function = self.treatLastDoc
for result in self.parseCFCFile(path, regex, attrs, function):
yield result
def parseLine(self, line, lastItem, regex):
"""
Parse a single line of a CFC file, adding the content of the line to
the last seen attribute.
The regex should have a named field called "attr" and another
"content". If an attr is found in the line, updates a "lastItem" entry
in the lastItem dict, with the attr found.
param line: a string containing the line to be parsed.
param lastItem: a dict that will contain the temporary data of the item
being parsed.
param regex: a string containing a regex to parse the line. Must have a
named fields "attr" and "content".
return: the param lastItem dict, updated with the param line.
"""
assert type(lastItem) == dict
sep = re.compile(regex)
# separate a possible attribute from content, with a regex
match = sep.match(line)
assert match
# groups named in the sep regex
attr = match.group("attr")
content = match.group("content")
# in the case there's an attribute in the line, we know we have
# finished the last attribute we have seen, otherwise we append to
# the last attribute seen
if attr:
lastItem["lastAttr"] = attr
lastAttr = lastItem["lastAttr"]
# assert lastAttr # buggy because of strange ^Z lines in the end of some files
# add the content of the line to the lastAttr seen
if lastAttr:
lastItem[lastAttr] = (' '.join([lastItem[lastAttr], content.strip()])).strip()
return lastItem
def parseQueryFile(self, path):
"""
Wrapper method for the self.parseCFCFile method, for parsing the query
file from the CFC collection.
Does not treat the exception that may be raised when opening the file
in the path.
param path: string containing the path to the file.
yield: each query found in the file, the returned objects are
util.Query objects.
"""
# regex for separating the attributes from the content
regex = r"^\s*(?P<attr>QN|QU|NR|RD)?\s*(?P<content>(.*\w+.*)*)"
# list of attributes present in the cfc query file
attrs = self.cfcQueryAttrs
# helper function that deals with the data parsed and transforms it on
# util.Query objects
function = self.treatLastQuery
for result in self.parseCFCFile(path, regex, attrs, function):
yield result
def tokenize(self, string, regex=r"[a-zA-Z']+"):
"""
Get a list with the words in the string, while also removing the stop
words defined in the creation of the class.
param string: string with the content to be tokenized.
param regex: string containing a regex of what is considered a word.
return: a list of strings containing the non stop words words, from the
string param.
"""
# regex for separating the words in the content
tokenizer = re.compile(regex)
# get the words that match the regex and set them to lower case
words = [word.lower() for word in tokenizer.findall(string)]
# removal of the stop words defined in the __init__ method from the
# list of words
for sw in self.stopWords.intersection(words):
while sw in words:
words.remove(sw)
return words
def readStopWords(self, path):
"""
Used in the __init__ method to load the stop words from a file.
If the file is empty returns an empty set. The file must contain words
separated by white space characters, and must be lower case. If it
fails to open the file the exception is not handled.
param path: string containing the path to the file containg the stop
words.
return: a set containig the stop words from the file
"""
fin = open(path)
# place the stop words in a set for faster access
sws = set()
for line in fin:
line = line.strip()
for word in line.split():
sws.add(word.lower())
fin.close()
return sws
def treatLastDoc(self, lastDoc):
"""
Helper method that transforms the data in the lastDoc dict into a tuple
of util.Document object and a Counter containing the frequencies of the
words in the document
param lastDoc: a dict containing the data parsed.
return: a tuple(util.Document, collections.Counter). The counter is a
dict with word keys and frequency values.
"""
total = Counter()
# the list of relevant attributes to tokenize. Tokenize also
# removes stop words defined in the init method
relevant = ["TI", "AB", "EX", "MJ", "MN"]
for attr in relevant:
content = lastDoc[attr]
assert type(content) == str
words = self.tokenize(content)
counter = Counter(words)
total += counter
# form the Document object return
docId = int(lastDoc["RN"])
# get the year of publishment
regex = r"(?P<year>\d{2})(?P<idInYear>\d{3})"
sep = re.compile(regex)
match = sep.match(lastDoc["PN"])
year = int(match.group("year"))
title = lastDoc["TI"]
authors = lastDoc["AU"]
tempNorm = 0 # irrelevant norm to be udated in the future
doc = Document(docId, year, title, authors, tempNorm)
result = doc, total
return result
def isEmptyItem(self, lastItem):
"""
Helper method to know if the last item parsed is empty or not. Needed
because some files have double empty lines between documents.
param lastItem: a dict with the data of the last item parsed.
return: True if the lastItem dict has a single key with a value that
evaluates to True, False otherwise.
"""
for key in lastItem.iterkeys():
if lastItem[key]:
return False
return True
def treatLastQuery(self, lastQuery):
"""
Helper method that transforms the data in the lastQuery dict into an
util.Query object.
param lastQuery: a dict containg the data parsed.
return: an util.Query object.
"""
queryId = int(lastQuery["QN"])
queryString = lastQuery["QU"]
sep = re.compile(r"(?P<docId>\d+)\s*(?P<grades>\d+)")
relevants = []
for pair in sep.findall(lastQuery["RD"]):
docId, grades = pair
docId = int(docId)
relevants.append(docId)
return Query(queryId, queryString, relevants)
if __name__ == '__main__':
p = Parser()
for r in p.parseQueryFile("cfquery"):
print(r, '\n')
#s = ' Salivary amylase levels were determined in normal subjects from birth until adult life and in children with conditions sometimes associated with low pancreatic amylase such as malnutrition, coeliac disease and cystic fibrosis. Mixed saliva was collected under carefully standardised conditions and amylase was measured by the method of Dahlqvist. There was a wide scatter of values in the 84 normal subjects, but concentrations rose from very low levels at birth to reach adult levels by the age of 6 months to 1 year. Salivary amylase activity rose normally over ten weeks in one premature infant fed milk by gastrostomy. Thirteen children with coeliac disease and 9 children with cystic fibrosis mostly had normal salivary amylase concentrations. Six out of 12 malnourished children with jejunal villous atrophy of uncertain aetiology had low levels which rose to normal as recovery began.'
#print e.tokenize(s)
``` |
{
"source": "jpaulos/opt_control",
"score": 2
} |
#### File: python/tests/eval_randomized.py
```python
import numpy as np
import matplotlib.pyplot as plt
import time
import pprint
from py_opt_control import min_time_bvp
decimal_places = 1
def compute_many_mp(p0, v0, a0, p1, v1, a1, params):
"""
Compute and verify many motion primitives.
"""
verbose = False
start_time = time.time()
n_mp = p0.shape[0]
mp = []
for i in range(n_mp):
if verbose:
print('Preparing to test problem data:')
print(f"(p0, v0, a0) = {(p0[i], v0[i], a0[i])}")
print(f"(p1, v1, a1) = {(p1[i], v1[i], a1[i])}")
(t, j) = min_time_bvp.min_time_bvp(
# (t, j) = min_time_bvp.min_time_bvp_paranoia(
p0[i], v0[i], a0[i],
p1[i], v1[i], a1[i],
params['v_min'], params['v_max'], params['a_min'], params['a_max'], params['j_min'], params['j_max'],
params['sync_v'], params['sync_a'], params['sync_w'])
a, v, p = min_time_bvp.switch_states(p0[i], v0[i], a0[i], t, j)
st, sj, sa, sv, sp = min_time_bvp.uniformly_sample(p0[i], v0[i], a0[i], t, j, dt=0.01)
is_valid = np.allclose(p1[i], sp[:,-1]) and np.allclose(v1[i], sv[:,-1]) and np.allclose(a1[i], sa[:,-1])
if not is_valid:
print()
print('Test failed. The end position is wrong. Problem data:')
print(f"(p0, v0, a0) = {(p0[i], v0[i], a0[i])}")
print(f"(p1, v1, a1) = {(p1[i], v1[i], a1[i])}")
print(f"Actual final state:")
print(f"(p, v, a) = {(sp[:,-1], sv[:,-1], sa[:,-1])}")
print(f"Final time for each axis (should be identical):")
print(f"{t[:,-1]}")
print()
mp.append({'p0':p0[i], 'v0':v0[i], 'a0':a0[i], 't':t, 'j':j, 'is_valid':is_valid})
sec = (time.time() - start_time)/n_mp
return mp, sec
def plot_2d_projection_many_mp(ax, mp):
"""
Plot many motion primitives projected onto the x-y axis, independent of
original dimension.
"""
n_dim = mp[0]['p0'].shape[0]
for m in mp:
if m['is_valid']:
st, sj, sa, sv, sp = min_time_bvp.uniformly_sample(
m['p0'], m['v0'], m['a0'], m['t'], m['j'], dt=0.001)
if n_dim > 1:
ax.plot(sp[0,:], sp[1,:])
else:
ax.plot(sp[0,:], np.zeros_like(sp[0,:]))
ax.axis('equal')
def test_to_zero(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
a0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
p1 = np.zeros((n_tests,n_dim))
v1 = np.zeros((n_tests,n_dim))
a1 = np.zeros((n_tests,n_dim))
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_to_zero')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
def test_to_nonzero_p(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
a0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
p1 = np.ones((n_tests,n_dim))
v1 = np.zeros((n_tests,n_dim))
a1 = np.zeros((n_tests,n_dim))
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_to_nonzero_p')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
def test_to_nonzero_pv(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
a0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
p1 = np.ones((n_tests,n_dim))
v1 = np.ones((n_tests,n_dim))
v1[:,1:] = -1
a1 = np.zeros((n_tests,n_dim))
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_to_nonzero_pv')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
def test_to_nonzero_a(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
a0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
p1 = np.zeros((n_tests,n_dim))
v1 = np.zeros((n_tests,n_dim))
a1 = np.ones((n_tests,n_dim))
a1[:,1:] = -1
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_to_zero')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
def test_to_nonzero_pva(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
a0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
p1 = np.ones((n_tests,n_dim))
v1 = np.ones((n_tests,n_dim))
v1[:,1:] = -1
a1 = -np.ones((n_tests,n_dim))
a1[:,1:] = 1
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_to_nonzero_pva')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
def test_zero_a(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
a0 = np.zeros((n_tests,n_dim))
p1 = np.ones((n_tests,n_dim))
v1 = np.ones((n_tests,n_dim))
v1[:,1:] = -1
a1 = np.zeros((n_tests,n_dim))
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_zero_a')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
def test_zero_va(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.zeros((n_tests,n_dim))
a0 = np.zeros((n_tests,n_dim))
p1 = np.ones((n_tests,n_dim))
v1 = np.zeros((n_tests,n_dim))
a1 = np.zeros((n_tests,n_dim))
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_zero_va')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
def test_state_to_state(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
a0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
p1 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v1 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
a1 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_state_to_state')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
def test_point_to_point(n_dim, params, n_tests, ax):
p0 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v0 = np.zeros((n_tests,n_dim))
a0 = np.zeros((n_tests,n_dim))
p1 = np.round(np.random.uniform(-1, 1, (n_tests,n_dim)), decimal_places)
v1 = np.zeros((n_tests,n_dim))
a1 = np.zeros((n_tests,n_dim))
mp, sec = compute_many_mp(p0, v0, a0, p1, v1, a1, params)
print('\ntest_point_to_point')
n_failed = sum(1 for m in mp if not m['is_valid'])
print(f' failed: {n_failed/n_tests:5.1%} ({n_failed}/{n_tests})')
print(f' speed: {sec*1000:.2f} ms/test')
plot_2d_projection_many_mp(ax, mp)
return n_failed
if __name__ == '__main__':
n_tests = 1000
n_dim = 2
params = {
'v_min': -10,
'v_max': 10,
'a_min': -5,
'a_max': 5,
'j_min': -100,
'j_max': 100,
'sync_v': True,
'sync_a': True,
'sync_w': False,
}
fig, axes = plt.subplots(3, 2)
axes = axes.flatten()
results = {}
n_failed = test_to_zero(n_dim, params, n_tests, axes[0])
axes[0].set_title(f'To Zero State, Failed {n_failed}/{n_tests}')
results['test_to_zero'] = n_failed
n_failed = test_to_nonzero_p(n_dim, params, n_tests, axes[1])
axes[1].set_title(f'To Nonzero P, Failed {n_failed}/{n_tests}')
results['test_to_nonzero_p'] = n_failed
n_failed = test_to_nonzero_a(n_dim, params, n_tests, axes[2])
axes[2].set_title(f'To Nonzero A, Failed {n_failed}/{n_tests}')
results['test_to_nonzero_p'] = n_failed
n_failed = test_to_nonzero_pv(n_dim, params, n_tests, axes[3])
axes[3].set_title(f'To Nonzero P-V, Failed {n_failed}/{n_tests}')
results['test_to_nonzero_pv'] = n_failed
n_failed = test_to_nonzero_pva(n_dim, params, n_tests, axes[4])
axes[4].set_title(f'To Nonzero P-V-A, Failed {n_failed}/{n_tests}')
results['test_to_nonzero_pva'] = n_failed
fig, axes = plt.subplots(2, 1)
axes = axes.flatten()
n_failed = test_zero_va(n_dim, params, n_tests, axes[1])
axes[1].set_title(f'Zero V-A Boundaries, Failed {n_failed}/{n_tests}')
results['test_zero_va'] = n_failed
n_failed = test_zero_a(n_dim, params, n_tests, axes[0])
axes[0].set_title(f'Zero A Boundaries, Failed {n_failed}/{n_tests}')
results['test_zero_a'] = n_failed
fig, axes = plt.subplots(1, 2)
n_failed = test_point_to_point(n_dim, params, n_tests, axes[0])
axes[0].set_title(f'Point to Point, Failed {n_failed}/{n_tests}')
results['test_point_to_point'] = n_failed
n_failed = test_state_to_state(n_dim, params, n_tests, axes[1])
axes[1].set_title(f'State to State, Failed {n_failed}/{n_tests}')
results['test_state_to_state'] = n_failed
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(results)
# Show plots.
plt.show()
``` |
{
"source": "JPaulsen/Security-Typing",
"score": 3
} |
#### File: 3 STLC + Dynamic/src/TypeParser.py
```python
from ParserUtils import *
from Types import *
def parseFunctionType(expr):
return _parseFunctionType(expr[1], map(_getFirst, expr[2].value()))
def parseNativeType(expr):
if expr == "str":
return str
elif expr == "bool":
return bool
elif expr == "int":
return int
elif expr == "float":
return float
elif expr == "dynamic":
return DynamicType()
else:
raise ValueError(expr + 'is not a valid type.')
def parseType(expr):
if (expr[0].value() == "function"):
checkLengthExpected("Function type", expr, 3)
return _parseFunctionTypeFromTypeExpression(expr)
else:
checkLengthExpected("Native type", expr, 1)
return parseNativeType(expr[0].value())
def _parseFunctionType(returnTypeExpression, parameterTypesExpression):
return FunctionType(parseType(returnTypeExpression), map(parseType, parameterTypesExpression))
def _parseFunctionTypeFromTypeExpression(expr):
return _parseFunctionType(expr[1], expr[2].value())
def _getFirst(touple):
return touple[0]
```
#### File: 3 STLC + Dynamic/test/util.py
```python
import sys
sys.path.insert(0, '../src/')
from Parser import *
from TypeChecker import *
from Interpreter import *
def safeTypeCheck(code):
try:
program = loads(code, true='True', false='False')
except:
return "Syntax Error"
try:
ast = parse(program)
except:
return "Parsing Error"
try:
return typeCheck(ast).type
except:
return "Type Error"
def safeInterp(code):
try:
program = loads(code, true='True', false='False')
except:
return "Syntax Error"
try:
ast = parse(program)
except:
return "Parsing Error"
try:
typeCheckerResult = typeCheck(ast)
ast = typeCheckerResult.astNode
except:
return "Type Error"
try:
return interp(ast)
except:
return "Runtime Error"
```
#### File: 4 STLC + Security/src/Interpreter.py
```python
from AST import *
def interp(ast):
return ast.accept(Interpreter())
class Interpreter:
def __init__(self):
self.env = Environment()
def visitBoolLiteral(self, boolLiteral):
return boolLiteral.value
def visitIntLiteral(self, intLiteral):
return intLiteral.value
def visitFloatLiteral(self, floatLiteral):
return 1.0 * floatLiteral.value
def visitStringLiteral(self, stringLiteral):
return stringLiteral.value
def visitUnaryExpression(self, unaryExpression):
if (unaryExpression.command == "not"):
return not unaryExpression.expression.accept(self)
raise ValueError(
"UnaryExpression with command " + unaryExpression.command + " not yet implemented at interpreter level.")
def visitBinaryExpression(self, binaryExpression):
if binaryExpression.command == "and":
return binaryExpression.firstExpression.accept(self) and binaryExpression.secondExpression.accept(self)
elif binaryExpression.command == "or":
return binaryExpression.firstExpression.accept(self) or binaryExpression.secondExpression.accept(self)
elif binaryExpression.command == "+":
return binaryExpression.firstExpression.accept(self) + binaryExpression.secondExpression.accept(self)
elif binaryExpression.command == "-":
return binaryExpression.firstExpression.accept(self) - binaryExpression.secondExpression.accept(self)
elif binaryExpression.command == "*":
return binaryExpression.firstExpression.accept(self) * binaryExpression.secondExpression.accept(self)
elif binaryExpression.command == "/":
return binaryExpression.firstExpression.accept(self) / binaryExpression.secondExpression.accept(self)
raise ValueError("BinaryExpression with command " + binaryExpression.command + " not yet implemented.")
def visitIfExpression(self, ifExpression):
if ifExpression.conditionExpression.accept(self):
return ifExpression.thenExpression.accept(self)
else:
return ifExpression.elseExpression.accept(self)
def visitLetExpression(self, letExpression):
oldEnv = self.env
self.env = self.env.clone()
self.env.put(letExpression.symbol.value(), letExpression.valueExpression.accept(self))
ans = letExpression.thenExpression.accept(self)
self.env = oldEnv
return ans
def visitGetExpression(self, getExpression):
return self.env.get(getExpression.symbol.value())
def visitFunctionExpression(self, functionExpression):
return functionExpression
def visitApplyExpression(self, applyExpression):
functionExpression = applyExpression.functionExpression.accept(self)
arguments = []
for argument in applyExpression.argumentExpressions:
arguments.append(argument.accept(self))
oldEnv = self.env
self.env = Environment()
argumentsLength = len(arguments)
for i in range(argumentsLength):
self.env.put(functionExpression.parameterSymbols[i].value(), arguments[i])
ans = functionExpression.bodyExpression.accept(self)
self.env = oldEnv
return ans
```
#### File: 4 STLC + Security/src/TypeChecker.py
```python
from AST import *
from Types import *
from sexpdata import *
def _checkExpectedTypesOfValue(value, types):
for type in types:
if _isConsistentTypeOfValue(value, type):
return
raise ValueError(' or '.join(map(str, types)) + ' was expected.')
def _isConsistentTypeOfValue(value, type):
if isinstance(type, FunctionType):
if not isinstance(value, FunctionExpression):
return False
return _areConsistentTypes(value.securityType.type, type)
if isinstance(type, SecurityType):
return False
return isinstance(value, type)
def _areConsistentTypes(type1, type2):
if isinstance(type1, FunctionType) and isinstance(type2, FunctionType):
return _areConsistenFunctionTypes(type1, type2)
if isinstance(type1, SecurityType) and isinstance(type2, SecurityType):
return type1.securityLabel <= type2.securityLabel and _areConsistentTypes(type1.type, type2.type)
if isinstance(type1, FunctionType) or isinstance(type2, FunctionType) or isinstance(type1,
SecurityType) or isinstance(
type2, SecurityType):
return False
return type1 == type2
def _areConsistenFunctionTypes(functionType1, functionType2):
parameterLength1 = len(functionType1.parameterTypes)
parameterLength2 = len(functionType2.parameterTypes)
if parameterLength1 != parameterLength2 or not _areConsistentTypes(functionType2.returnType,
functionType1.returnType):
return False
for i in range(parameterLength1):
if not _areConsistentTypes(functionType1.parameterTypes[i], functionType2.parameterTypes[i]):
return False
return True
def _checkExpectedTypes(type, types):
for t in types:
if _areConsistentTypes(type, t):
return
raise ValueError(' or '.join(map(str, types)) + ' was expected.')
def typeCheck(ast):
return ast.accept(TypeChecker())
class TypeChecker:
def __init__(self):
self.env = Environment()
def visitBoolLiteral(self, boolLiteral):
_checkExpectedTypesOfValue(boolLiteral.value, [bool])
return SecurityType(bool, boolLiteral.securityLabel)
def visitIntLiteral(self, intLiteral):
_checkExpectedTypesOfValue(intLiteral.value, [int])
return SecurityType(int, intLiteral.securityLabel)
def visitFloatLiteral(self, floatLiteral):
_checkExpectedTypesOfValue(floatLiteral.value, [int, float])
return SecurityType(float, floatLiteral.securityLabel)
def visitStringLiteral(self, stringLiteral):
_checkExpectedTypesOfValue(stringLiteral.value, [str])
return SecurityType(str, stringLiteral.securityLabel)
def visitUnaryExpression(self, unaryExpression):
if unaryExpression.command == "not":
expressionType = unaryExpression.expression.accept(self)
_checkExpectedTypes(expressionType.type, [bool])
return expressionType
raise ValueError(
"UnaryExpression with command " + unaryExpression.command + " not yet implemented at typeChecker level.")
def visitBinaryExpression(self, binaryExpression):
firstExpressionType = binaryExpression.firstExpression.accept(self)
secondExpressionType = binaryExpression.secondExpression.accept(self)
securityLabel = SecurityLabel.join(firstExpressionType.securityLabel, secondExpressionType.securityLabel)
if binaryExpression.command == "and" or binaryExpression.command == "or":
_checkExpectedTypes(firstExpressionType.type, [bool])
_checkExpectedTypes(secondExpressionType.type, [bool])
return SecurityType(bool, securityLabel)
elif binaryExpression.command == "+" or binaryExpression.command == "-" or binaryExpression.command == "*" or binaryExpression.command == "/":
_checkExpectedTypes(firstExpressionType.type, [int, float])
_checkExpectedTypes(secondExpressionType.type, [int, float])
type = float if firstExpressionType.type == float or secondExpressionType.type == float else int
return SecurityType(type, securityLabel)
raise ValueError("BinaryExpression with command " + binaryExpression.command + " not yet implemented.")
def visitIfExpression(self, ifExpression):
condExpressionType = ifExpression.conditionExpression.accept(self)
_checkExpectedTypes(condExpressionType.type, [bool])
thenExpressionType = ifExpression.thenExpression.accept(self)
elseExpressionType = ifExpression.elseExpression.accept(self)
_checkExpectedTypes(elseExpressionType.type, [thenExpressionType.type])
return SecurityType(thenExpressionType.type, SecurityLabel.joinMultiple(
[condExpressionType.securityLabel, thenExpressionType.securityLabel, elseExpressionType.securityLabel]))
def visitLetExpression(self, letExpression):
oldEnv = self.env
self.env = self.env.clone()
self.env.put(letExpression.symbol.value(), letExpression.valueExpression.accept(self))
ans = letExpression.thenExpression.accept(self)
self.env = oldEnv
return ans
def visitGetExpression(self, getExpression):
return self.env.get(getExpression.symbol.value())
def visitFunctionExpression(self, functionExpression):
securityType = functionExpression.securityType
parametersLength = len(securityType.type.parameterTypes)
oldEnv = self.env
self.env = Environment()
for i in range(parametersLength):
symbol = functionExpression.parameterSymbols[i]
if not isinstance(symbol, Symbol):
raise ValueError('Each function parameter must be a symbol.')
self.env.put(functionExpression.parameterSymbols[i].value(), securityType.type.parameterTypes[i])
bodyExpressionType = functionExpression.bodyExpression.accept(self)
_checkExpectedTypes(bodyExpressionType, [securityType.type.returnType])
self.env = oldEnv
return securityType
def visitApplyExpression(self, applyExpression):
securityType = applyExpression.functionExpression.accept(self)
argumentTypes = []
for argument in applyExpression.argumentExpressions:
argumentTypes.append(argument.accept(self))
argumentsLength = len(argumentTypes)
if len(securityType.type.parameterTypes) != argumentsLength:
raise ValueError('Function length of parameters and arguments in apply do not match.')
for i in range(argumentsLength):
parameterType = securityType.type.parameterTypes[i]
argumentType = argumentTypes[i]
_checkExpectedTypes(argumentType, [parameterType])
return securityType.type.returnType
```
#### File: 6 STLC + Security + Dynamic Label/src/Types.py
```python
class FunctionType:
def __init__(self, returnType, parameterTypes):
self.returnType = returnType
self.parameterTypes = parameterTypes
def __str__(self):
return 'function ' + str(self.returnType) + ' [' + ', '.join(map(str, self.parameterTypes)) + ']'
class SecurityType:
def __init__(self, type, securityLabel):
self.type = type
self.securityLabel = securityLabel
def __str__(self):
return '(' + str(self.type) + ', ' + str(self.securityLabel) + ')'
class SecurityLabel:
lattice = {
'b': 0,
'l': 1,
'h': 2,
't': 3,
'?': -1,
}
def __init__(self, type):
self.type = type
self.value = SecurityLabel.lattice[type]
def __lt__(self, other):
return self.isDynamicLabel() or other.isDynamicLabel() or self.value < other.value
def __le__(self, other):
return self.isDynamicLabel() or other.isDynamicLabel() or self.value <= other.value
def __str__(self):
return self.type
def isDynamicLabel(self):
return self.value == SecurityLabel.lattice['?']
@staticmethod
def join(securityLabel1, securityLabel2):
if securityLabel1.isDynamicLabel():
return securityLabel1
if securityLabel2.isDynamicLabel():
return securityLabel2
return securityLabel1 if securityLabel1 >= securityLabel2 else securityLabel2
@staticmethod
def joinMultiple(securityLabels):
return reduce(SecurityLabel.join, securityLabels, SecurityLabel('b'))
@staticmethod
def meet(securityLabel1, securityLabel2):
if securityLabel1.isDynamicLabel():
return securityLabel1
if securityLabel2.isDynamicLabel():
return securityLabel2
return securityLabel1 if securityLabel1 <= securityLabel2 else securityLabel2
@staticmethod
def meetMultiple(securityLabels):
return reduce(SecurityLabel.meet, securityLabels, SecurityLabel('t'))
class SecurityValue:
def __init__(self, value, securityLabel):
self.value = value
self.securityLabel = securityLabel
def __str__(self):
return '(' + str(self.value) + ', ' + str(self.securityLabel) + ')'
def __add__(self, other):
return SecurityValue(self.value + other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __mul__(self, other):
return SecurityValue(self.value * other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __sub__(self, other):
return SecurityValue(self.value - other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __mod__(self, other):
return SecurityValue(self.value % other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __div__(self, other):
return SecurityValue(self.value / other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __lt__(self, other):
return SecurityValue(self.value < other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __le__(self, other):
return SecurityValue(self.value <= other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __eq__(self, other):
return SecurityValue(self.value == other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __ne__(self, other):
return SecurityValue(not self == other, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __gt__(self, other):
return SecurityValue(self.value > other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def __ge__(self, other):
return SecurityValue(self.value >= other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def boolAnd(self, other):
return SecurityValue(self.value and other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def boolOr(self, other):
return SecurityValue(self.value or other.value, SecurityLabel.join(self.securityLabel, other.securityLabel))
def boolNot(self):
return SecurityValue(not self.value, self.securityLabel)
``` |
{
"source": "j-paulus/grrrmin_heatmap",
"score": 2
} |
#### File: j-paulus/grrrmin_heatmap/grrrmin_heatmap.py
```python
import sys
import sqlite3 # interface GarminDB
import datetime
import dateutil # tcx time zone parsing
from contextlib import closing # context manager with automatic closing for the db
from pathlib import Path # home directory
import os # file name handling
import glob # listing files
import argparse # command line handling
from typing import Optional, Tuple, List, Any
import numpy as np
import matplotlib # colormap
# basemap
import contextily as ctx
import xyzservices
import xyzservices.providers as xyz
# coordinate transforms
import pyproj
from pyproj.transformer import Transformer
from tqdm import tqdm # progress bar
# plotting routines
from PIL import Image, ImageDraw
import fitparse # .fit file support
import gpxpy # .gpx file support
import gpxpy.gpx
import tcxparser # .tcx file support
__version__ = '0.3.7'
geod_conv = pyproj.Geod(ellps='WGS84')
def get_activities_from_db(sport_name: str='steps',
target_year: Optional[int]=None,
garmin_db: Optional[str]=None,
verbosity: int=1) \
-> Tuple[List[List[Tuple[float, float]]], float]:
"""
Load requested activities from GarminDB SQLite database.
Parameters
----------
sport_name : string in {'cycling', 'all', 'running', 'hiking', 'steps'}
select a specific activity type
target_year : list of int, None, optional
specify a year from which the data should be plotted from
garmin_db : string, None, optional
alternative path to the SQLite DB
verbosity : int, optional
printout verbosity
Returns
-------
list of lists of points : activities
float : total distance in km
"""
if garmin_db is None:
# default path to GarminDB database file
garmin_db = '{}/HealthData/DBs/garmin_activities.db'.format(Path.home())
steps_template = r'''SELECT activities.activity_id, activities.name, activities.description, activities.start_time,
activities.stop_time, activities.elapsed_time, ROUND(activities.distance, 1)
FROM steps_activities
JOIN activities ON activities.activity_id = steps_activities.activity_id {act_filter}
ORDER BY activities.start_time ASC'''
cycle_query = r'''SELECT activities.activity_id, activities.name, activities.description, activities.start_time,
activities.stop_time, activities.elapsed_time, ROUND(activities.distance, 1)
FROM activities
WHERE activities.sport == "cycling"
OR activities.sport == "Biking"
ORDER BY activities.start_time ASC'''
all_activities_query = r'''SELECT activities.activity_id, activities.name, activities.description, activities.start_time,
activities.stop_time, activities.elapsed_time, ROUND(activities.distance, 1)
FROM activities
ORDER BY activities.start_time ASC'''
if sport_name == 'cycling':
act_query = cycle_query
elif sport_name == 'all':
act_query = all_activities_query
elif sport_name == 'running':
act_filter = 'WHERE Activities.sport == "running"'
act_query = steps_template.format(act_filter=act_filter)
elif sport_name == 'hiking':
act_filter = 'WHERE Activities.sport == "hiking"'
act_query = steps_template.format(act_filter=act_filter)
elif sport_name == 'walking':
act_filter = 'WHERE Activities.sport == "walking"'
act_query = steps_template.format(act_filter=act_filter)
else: # sport_name == 'steps':
act_filter = ''
pic_tag = 'steps'
act_query = steps_template.format(act_filter=act_filter)
with closing(sqlite3.connect(garmin_db)) as db_conn: # this closes the connection after finishing
c = db_conn.cursor()
# get all activities
act_id_list = [] # list to store activity_id keys
act_time_list = []
act_dist_list = []
c.execute(act_query)
for one_row in c:
act_id = one_row[0]
act_date = one_row[3]
act_dist = one_row[6]
if (act_date is None) or (act_dist is None) or (act_id is None):
# skip
continue
act_id_list.append(act_id)
act_time_list.append(datetime.datetime.strptime(act_date, '%Y-%m-%d %H:%M:%S.%f'))
act_dist_list.append(act_dist)
# for each activity in the list, fetch the points
act_ite = tqdm(zip(act_id_list, act_time_list, act_dist_list), total=len(act_id_list), disable=(verbosity == 0))
act_ite.set_description('Activities...')
all_paths = []
total_dist = 0.0
for act_id, act_time, act_dist in act_ite:
# are we within the given time range
if (target_year is None) or (len(target_year) == 0) or (act_time.year in target_year):
total_dist += act_dist
c.execute('SELECT activity_records.activity_id, activity_records.timestamp, activity_records.position_lat, activity_records.position_long, activity_records.altitude FROM activity_records WHERE activity_records.activity_id = (?) ORDER BY activity_records.timestamp ASC', (act_id,))
# collect all points of this activity into a list
this_points = []
for one_point in c:
this_lat = one_point[2]
this_lon = one_point[3]
if (this_lat is not None) and (this_lon is not None):
this_points.append((this_lat, this_lon))
all_paths.append(this_points)
return all_paths, total_dist
def get_activities_from_dir(path_str: str,
target_year: Optional[int]=None,
verbosity: int=1) \
-> Tuple[List[List[Tuple[float, float, float]]], float]:
"""
Check recursively all files in the given directory and if they are
.fit/.gpx/.tcx, load the activities from them. This may be somewhat slow.
Parameters
----------
path_str : string
path from which all the files are checked. run recursively into all subdirectories
target_year : list of int, None, optional
list of target years for filtering the plotted activities
verbosity : int, optional
printout verbosity
Returns
-------
list of lists of points : activities (lat, lon, alt): lat/lon in decimal WSG84 degrees, alt in meters
float : total distance in km
"""
def semi2deg(x: float) -> float:
"""
Convert "semicircle" units to decimal degrees.
"""
return x * 180.0 / (2.0**31)
# list all files
all_files = glob.glob(os.path.join(path_str, '**/*.*'), recursive=True)
# collected info
all_activities = []
total_dist = 0.0
for one_name in tqdm(all_files,
total=len(all_files),
desc='Checking files',
unit=' files',
disable=(verbosity == 0)):
full_name = os.path.join(path_str, one_name)
# check for supported file extensions
base_str, ext_str = os.path.splitext(full_name)
if os.path.isfile(full_name) and (ext_str.lower() == '.fit'):
# try to parse the .fit file
try:
fitfile = fitparse.FitFile(full_name)
fitfile.parse()
# retieve activity type, even though this is not used right now
this_activity_type = None
for sports in fitfile.get_messages('sport'):
this_activity_type = sports.get_value('sport')
this_act = []
act_dist = 0.0
act_time = None
# get all data messages that are of type record
for one_rec in fitfile.get_messages('record'):
this_lat = one_rec.get_value('position_lat')
this_lon = one_rec.get_value('position_long')
this_dist = one_rec.get_value('distance')
this_altitude = one_rec.get_value('altitude')
# convert the coordinates from the semicircle to decimal degrees
if (this_lat is not None) and (this_lon is not None):
this_act.append((semi2deg(this_lat), semi2deg(this_lon), this_altitude))
if this_dist is not None:
act_dist = this_dist
if act_time is None:
this_time = one_rec.get_value('timestamp')
if this_time is not None:
act_time = this_time
# activity date -based filtering
if (act_time is None) or (target_year is None) or (len(target_year) == 0) or (act_time.year in target_year):
all_activities.append(this_act)
total_dist += act_dist
except fitparse.FitParseError as e:
if verbosity > 0:
print('ERROR: Could not parse file "{}". Error: {}'.format(full_name, e))
elif os.path.isfile(full_name) and (ext_str.lower() == '.gpx'):
# try to parse the .gpx file
with open(full_name, 'r') as gpx_file:
gpx = gpxpy.parse(gpx_file)
act_dist = 0.0
act_time = None
this_act = []
for one_track in gpx.tracks:
act_dist = one_track.length_3d() # 3D length in meters
act_time = one_track.get_time_bounds()[0] # starting time
for tmp_data in one_track.walk():
one_point = tmp_data[0]
this_act.append((one_point.latitude, one_point.longitude, one_point.elevation))
# activity date -based filtering
if (act_time is None) or (target_year is None) or (len(target_year) == 0) or (act_time.year in target_year):
all_activities.append(this_act)
total_dist += act_dist
act_dist = 0.0
act_time = None
this_act = []
for one_route in gpx.routes:
act_dist = one_route.length_3d()
act_time = one_track.get_time_bounds()[0] # starting time
for tmp_data in one_route.walk():
one_point = tmp_data[0]
this_act.append((one_point.latitude, one_point.longitude, one_point.elevation))
# activity date -based filtering
if (act_time is None) or (target_year is None) or (len(target_year) == 0) or (act_time.year in target_year):
all_activities.append(this_act)
total_dist += act_dist
elif os.path.isfile(full_name) and (ext_str.lower() == '.tcx'):
# try to parse the .tcx file
tcx_data = tcxparser.TCXParser(full_name)
this_activity_type = tcx_data.activity_type # could be used for filtering activity type, but not done now
act_dist = tcx_data.distance
act_time = dateutil.parser.isoparse(tcx_data.started_at)
this_act = tcx_data.position_values() # list of (lat, lon) tuples
this_altitudes = tcx_data.altitude_points() # list of floats
if len(this_act) == len(this_altitudes):
this_act_w_alt = [one_act + (one_alt,) for one_act, one_alt in zip(this_act, this_altitudes)]
else:
# if the two lists have different lengths, trust on location
this_act_w_alt = [one_act + (None,) for one_act in this_act]
# activity date -based filtering
if (act_time is None) or (target_year is None) or (len(target_year) == 0) or (act_time.year in target_year):
all_activities.append(this_act_w_alt)
total_dist += act_dist
return all_activities, total_dist / 1000.0
def get_year_range(year_list: List[int]) -> str:
"""
Transform a year list into a textual representation shortening consecutive values.
E.g., get_year_range([1999, 2000, 2001, 2004]) => '1999-2001_2004
Adapted from <https://stackoverflow.com/a/48106843>
Parameters
----------
year_list : list of int
Returns
-------
string
"""
if (year_list is None) or (len(year_list) == 0):
return 'all'
elif len(year_list) == 1:
return '{}'.format(year_list[0])
else:
nums = sorted(set(year_list))
gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s+1 < e]
edges = iter(nums[:1] + sum(gaps, []) + nums[-1:])
range_list = [(s, e+1) for s, e in zip(edges, edges)]
out_str = ''
for r_idx, one_range in enumerate(range_list):
if r_idx == 0:
out_str = '{}'.format(one_range[0])
else:
out_str = '_{}'.format(one_range[0])
if one_range[0] != one_range[1]:
out_str = '{}-{}'.format(out_str, one_range[1])
return out_str
def run_plotting(args: argparse.Namespace) -> None:
"""
Main plotting function
Parameters
----------
args : Namespace with field fields:
bounding_box : None, 4-list/tuple of floats
define image bounding box in decimal WGS84: n, e, s, w. None for automatic
bb_percentile : float
in range 0..1, when determining the bounding box from the data, use bb_percentile
and 1-bb_percentile quantiles as the limits to filter outliers. 0 for min/max
zoom_level: None, int
None for automatic zoom level, otherwise the given int
sport : string in 'cycling, 'running', 'hiking', 'walking', 'steps'
activity type to plot
basemap_provider : string, None
Contextily basemap provider name string
img_width : int
if basemap_provider == None, width of the blank image
track_colormap : string
matplotlib colormap name
max_point_dist : float, None
if not None and consecutive track points are further than
this, the track is split into two
year : list of ints
list of years to plot, e.g., [2019, 2020]
do_gif : bool
if True, create a frame-per-activity animation
fps : float
FPS of the created animation
start_center : None, 2-tuple of float
only tracks starting near ("start_max_dist") this point (lat, lon, in decimal WGS84) are plotted
start_max_dist : float
only track starting within this radius (in meters) from "start_center" are plotted
verbosity : int
progree printout verbosity level
"""
do_start_filter = (args.start_center is not None) and (args.start_max_dist is not None)
if args.bounding_box is not None: # n, e, s, w
max_lon, max_lat, min_lon, min_lat = args.bounding_box
else:
# determine from data
all_lat = []
all_lon = []
if args.zoom_level is None:
zoom_level = 'auto'
else:
zoom_level = args.zoom_level
if (args.sport is None) or (args.sport.lower() not in {'cycling', 'all', 'running', 'walking'}):
pic_tag = 'steps'
else:
pic_tag = args.sport
if args.input_dir is not None:
all_activities, total_dist = get_activities_from_dir(args.input_dir, target_year=args.year, verbosity=args.verbosity)
else:
all_activities, total_dist = get_activities_from_db(sport_name=args.sport, target_year=args.year, garmin_db=None, verbosity=args.verbosity)
all_paths = []
all_lat = []
all_lon = []
for act_idx, this_points in tqdm(enumerate(all_activities),
desc='Filtering points',
unit=' activities',
disable=(args.verbosity == 0)):
# create a path from the points
if len(this_points) > 1:
path_points = []
# starting point -based filtering active and first point in activity
if do_start_filter:
one_point = this_points[0]
start_az1, start_az2, start_dist = geod_conv.inv(args.start_center[1], args.start_center[0],
one_point[1], one_point[0])
if start_dist > args.start_max_dist:
# too far from the target starting location => skip the entire activity
if args.verbosity > 1:
print('WARNING: Activity starting location {:.1f} m (>{:.1f} m) from the defined start location, skipping.'.format(start_dist,
args.start_max_dist))
continue # skip to next activity
# distance-based filtering
if args.max_point_dist is not None:
prev_point = (None, None)
for point_idx, one_point in enumerate(this_points):
if args.bounding_box is None:
all_lat.append(one_point[0])
all_lon.append(one_point[1])
if prev_point[0] is None:
path_points.append(one_point)
else:
# long/lat pairs to azimuths and distance in meters
az1, az2, dist = geod_conv.inv(prev_point[1], prev_point[0], one_point[1], one_point[0])
if dist < args.max_point_dist:
path_points.append(one_point)
else:
# too large distance between two points => discard
if args.verbosity > 1:
print('WARNING: Track segment detached due to distance {:.1f}m exceeding the threshold of {:.1f}m.'.format(dist, args.max_point_dist))
# start a new path
all_paths.append(path_points)
path_points = [one_point]
prev_point = one_point
else:
# no distance filtering => use as-is
path_points = this_points.copy()
if args.bounding_box is None:
for one_point in this_points:
all_lat.append(one_point[0])
all_lon.append(one_point[1])
all_paths.append(path_points)
if len(all_paths) == 0:
if args.verbosity > 0:
print('WARNING: No mathing activities found.')
sys.exit()
if args.bounding_box is None:
lat_array = np.array(all_lat)
lon_array = np.array(all_lon)
lat_quants = np.quantile(lat_array, (args.bb_percentile, 1.0-args.bb_percentile))
lon_quants = np.quantile(lon_array, (args.bb_percentile, 1.0-args.bb_percentile))
min_lat = lat_quants[0]
max_lat = lat_quants[1]
min_lon = lon_quants[0]
max_lon = lon_quants[1]
if args.verbosity > 0:
print('INFO: Total activity distance: {:.2f}km'.format(total_dist))
print('INFO: Using lat range: {:.3f} - {:.3f}, and lon range: {:.3f} - {:.3f}.'.format(min_lat, max_lat, min_lon, max_lon))
if zoom_level == 'auto':
# the default zoom level
zoom_level = ctx.tile._calculate_zoom(w=min_lon, s=min_lat, e=max_lon, n=max_lat)
if args.verbosity > 1:
print('INFO: Using zoom level {}.'.format(zoom_level))
# from WGS84 to Spherical Mercator used by contextily
crs_trans = Transformer.from_crs('EPSG:4326', 'EPSG:3857', always_xy=True)
# fetch the basemap including the specified bounding box region
if args.verbosity > 0:
print('INFO: Fetching basemap...')
if args.basemap_provider is None:
# no map, but blank background
# transformer input: (x,y) -> (lon, lat)
min_point = crs_trans.transform(min_lon, min_lat)
max_point = crs_trans.transform(max_lon, max_lat)
imshow_extent = [min_point[0], max_point[0], min_point[1], max_point[1]] # [minX, maxX, minY, maxY]
range_lon = max_point[0] - min_point[0]
range_lat = max_point[1] - min_point[1]
img_height = args.img_width
img_width = int(img_height / float(range_lat) * range_lon)
basemap_img = np.zeros((img_height, img_width, 3), dtype=np.uint8)
basemap_attr = None
else:
basemap_src = get_basemap_provider(args.basemap_provider)
basemap_img, imshow_extent = ctx.bounds2img(w=min_lon, s=min_lat, e=max_lon, n=max_lat,
zoom=zoom_level, ll=True, source=basemap_src)
basemap_attr = basemap_src['attribution']
if args.track_colormap is None:
# two default colormaps
if (args.basemap_provider is None) or (args.basemap_provider == 'CartoDB.DarkMatter'):
track_cmap = 'plasma' # ok with CartoDB.DarkMatter
else:
track_cmap = 'autumn' # works ok with Esri.WorldImagery, use also for others
else:
# user-defined colormap
track_cmap = args.track_colormap
# from RGB to RGBA
zero_alpha = 255 * np.ones((basemap_img.shape[0], basemap_img.shape[1], 1), dtype=np.uint8)
if basemap_img.shape[2] == 3:
# add alpha channel
basemap_image = Image.fromarray(np.concatenate((basemap_img, zero_alpha), axis=-1))
else:
# replace alpha channel
basemap_img[:, :, -1] = zero_alpha[:, :, 0]
basemap_image = Image.fromarray(basemap_img)
# add attribution
basemap_draw = ImageDraw.Draw(basemap_image)
basemap_draw.text((5, 5), 'Created with grrrmin_heatmap.py' + (basemap_attr is not None)*'\nUsing Contextily basemap:\n{}'.format(basemap_attr))
# a function to transform geographical coordinates to PIL coordinates: (0,0) upper left corner. (x, y)
def coord_to_pixel(lat, lon):
new_lat = (1.0 - (lat - imshow_extent[2]) / (imshow_extent[3] - imshow_extent[2])) * basemap_image.height
new_lon = (lon - imshow_extent[0]) / (imshow_extent[1] - imshow_extent[0]) * basemap_image.width
return new_lat, new_lon
# transform point from WGS84 to pixels
for path_idx, one_path in enumerate(all_paths):
for point_idx, one_point in enumerate(one_path):
# transform the points into coordinate system used by the basemap
# transformer input: (x,y) -> (lon, lat)
one_point = crs_trans.transform(one_point[1], one_point[0])
one_path[point_idx] = coord_to_pixel(one_point[1], one_point[0])[::-1]
all_paths[path_idx] = one_path
## plotting
# loop through paths one-by-one
path_sum = None
n_paths = len(all_paths)
if args.do_gif:
all_frames = []
# the paths are plotted on this dummy image
path_image = Image.new('RGBA', (basemap_image.width, basemap_image.height), color=(0, 0, 0, 0)) # (width, height)
path_canvas = ImageDraw.Draw(path_image)
h = path_image.height
w = path_image.width
# plot each path
plot_ite = tqdm(enumerate(all_paths), total=n_paths, unit=' activities', disable=(args.verbosity == 0))
plot_ite.set_description('Plotting...')
year_str = get_year_range(args.year)
out_name_base = 'grrrmin_heatmap_{}_{}'.format(pic_tag, year_str)
for path_idx, one_path in plot_ite:
# PIL.Image approach
path_canvas.line(xy=one_path, fill='black', width=args.line_width, joint='curve')
img = np.array(path_image)
if path_sum is None:
path_sum = np.zeros((h, w), dtype=np.float32)
# binary mapping from alpha channel
path_sum[img[:, :, 3] > 128] += 1.0
# erase the path
draw = ImageDraw.Draw(path_image)
draw.rectangle([(0,0), path_image.size], fill=(0, 0, 0, 0))
if args.do_gif or (path_idx == n_paths - 1):
comp_sum = np.log2(1.0 + 1.0*path_sum)
# from a single float matrix to RGBA
comp_rgba = matplotlib.cm.ScalarMappable(norm=None, cmap=track_cmap).to_rgba(comp_sum, alpha=None, bytes=True, norm=True) # to uint8
# set alpha channel to 0 if no path occupies the pixel
comp_rgba[..., 3] = 255
comp_rgba[path_sum < 1.0, 3] = 0
path_sum_image = Image.fromarray(comp_rgba)
# overlay sum path on the basemap
out_image = Image.alpha_composite(basemap_image, path_sum_image)
if args.do_gif:
all_frames.append(out_image)
else:
out_file_name = '{}.png'.format(out_name_base)
out_image.save(out_file_name)
if args.do_gif:
all_frames[0].save('{}.gif'.format(out_name_base),
save_all=True,
append_images=all_frames[1:],
loop=False,
duration=1.0 / args.fps)
##
def list_basemap_providers() -> None:
"""
Print all map tile providers from Contextily
"""
print('INFO: Supported background map tile providers:')
all_providers = list(xyz.flatten().keys())
all_providers.sort()
for one_prov_name in all_providers:
one_prov = get_basemap_provider(one_prov_name)
requires_key = one_prov.requires_token()
print('{} (API key required: {})'.format(one_prov_name, requires_key))
# basemap
def get_basemap_provider(provider_str: str) -> Optional[xyzservices.TileProvider]:
"""
Take a string representing the desired Contextily basemap provider,
e.g., "Esri.WorldImagery", parse it, and provide the provider
instance xyzservices.providers.Esri.WorldImagery, if found or None.
"""
b_provider = None
try:
b_provider = xyz.query_name(provider_str)
except ValueError:
print('ERROR: Unsupported basemap provider "{}" requested.'.format(provider_str))
return b_provider
##
def main(argv: List[str]) -> None:
"""
Top level input argument parsing.
Parameters
----------
argv : list of string
"""
argparser = argparse.ArgumentParser()
argparser.add_argument('--sport',
action='store',
type=str,
default='all',
choices=('steps', 'running', 'walking', 'hiking', 'cycling', 'all'),
help='sport type filter')
argparser.add_argument('--year',
action='store',
type=int,
default=[],
nargs='+',
help='years to plot')
argparser.add_argument('--bounding_box',
action='store',
type=float,
default=None,
nargs=4,
help='output image bounding box in decimal WGS84: n, e, s, w')
argparser.add_argument('--start_center',
action='store',
type=float,
default=None,
nargs=2,
help='only tracks starting near ("start_max_dist") this point (lat, lon, in decimal WGS84) are plotted')
argparser.add_argument('--start_max_dist',
action='store',
type=float,
default=500.0,
help='only tracks starting within this radius (in meters) from "start_center" are plotted')
argparser.add_argument('--bb_percentile',
action='store',
type=float,
default=0.01,
help='when determining bounding box from data use percentiles '
'bb_percentile and 1-bb_percentile. range: 0..1. to use '
'min/max, set to 0')
argparser.add_argument('--zoom_level',
action='store',
type=int,
default=None,
help='zoom level (larger value mean finer details). leave empty for automatic')
argparser.add_argument('--line_width',
action='store',
type=int,
default=3,
help='plotting line width in pixels')
argparser.add_argument('--max_point_dist',
action='store',
type=float,
default=200.0,
help='meters or None. for filtering missing data. if distance between consecutive points is larger, the line is broken into two')
argparser.add_argument('--do_gif',
action='store_true',
help='save as frame-per-activity animation')
argparser.add_argument('--fps',
action='store',
type=float,
default=12,
help='animation speed as FPS')
argparser.add_argument('--basemap_provider',
action='store',
type=str,
default='CartoDB.DarkMatter',
help='Contextily basemap provider string, e.g., "OpenTopoMap", "HikeBike.HikeBike", "CartoDB.DarkMatter, Esri.WorldImagery", "None" (for blank)')
argparser.add_argument('--img_width',
action='store',
type=int,
default=1080,
help='when not using a background map image width in pixels (height is computed from data)')
argparser.add_argument('--track_colormap',
action='store',
type=str,
default=None,
help='(matplotlib) colormap to use for track plotting')
argparser.add_argument('--list_providers',
action='store_true',
help='list basemap tile providers and exit')
argparser.add_argument('--input_dir',
action='store',
type=str,
default=None,
help='directory-based data input: load all .fit and .gpx files here and in sub-directories')
argparser.add_argument('--verbosity',
action='store',
type=int,
default=1,
help='message output verbosity. 0: silent, 1: default, 2: more info')
args = argparser.parse_args(argv)
if args.basemap_provider.lower() == 'None'.lower():
args.basemap_provider = None
if args.list_providers:
list_basemap_providers()
else:
run_plotting(args)
## entry point
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "jpauwels/hrtfdata",
"score": 2
} |
#### File: hrtfdata/hrtfdata/display.py
```python
import matplotlib.pyplot as plt
import numpy as np
def plot_hrtf_plane(hrtf, angles, angles_label, frequencies, log_freq=False, ax=None, cmap='gray', continuous=False, vmin=None, vmax=None, colorbar=True):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
mesh = ax.pcolormesh(angles, frequencies/1000, hrtf, shading='gouraud' if continuous else 'nearest', cmap=cmap, vmin=vmin, vmax=vmax)
if colorbar:
fig.colorbar(mesh, ax=ax)
ax.set_xlabel(angles_label)
if log_freq:
ax.set_yscale('log')
ax.set_ylim([frequencies[1]/1000, frequencies[-1]/1000])
ax.set_ylabel('frequency [kHz]')
return ax
def plot_hrir_plane(hrir, angles, angles_label, sample_rate, ax=None, cmap='gray', continuous=False, vmin=None, vmax=None, colorbar=True):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
times = np.arange(0, hrir.shape[0]*1000/sample_rate, 1000/sample_rate)
mesh = ax.pcolormesh(angles, times, hrir, shading='gouraud' if continuous else 'nearest', cmap=cmap, vmin=vmin, vmax=vmax)
if colorbar:
fig.colorbar(mesh, ax=ax)
ax.set_xlabel(angles_label)
ax.set_ylabel('time [ms]')
return ax
def plot_plane_angles(angles, min_angle, max_angle, closed_open_angles, radius, zero_location, direction, ax=None):
if ax is None:
_, ax = plt.subplots(subplot_kw={'projection': 'polar'})
ax.plot(np.deg2rad(angles), np.full(len(angles), radius), 'ko')
ax.set_rmax(radius * 1.2)
ax.set_rticks([]) # no radial ticks
ax.grid(False)
if closed_open_angles:
angular_ticks = np.linspace(min_angle, max_angle, 8, endpoint=False)
else:
angular_ticks = np.flip(np.linspace(max_angle, min_angle, 8, endpoint=False))
ax.set_xticks(np.deg2rad(angular_ticks))
ax.set_thetamin(min_angle)
ax.set_thetamax(max_angle)
ax.set_theta_direction(direction)
ax.set_theta_zero_location(zero_location)
return ax
```
#### File: hrtfdata/torch/full.py
```python
from ..datapoint import DataPoint, SofaSphericalDataPoint, CipicDataPoint, AriDataPoint, ListenDataPoint, BiLiDataPoint, ItaDataPoint, HutubsDataPoint, RiecDataPoint, ChedarDataPoint, WidespreadDataPoint, Sadie2DataPoint, ThreeDThreeADataPoint, SonicomDataPoint
import warnings
from pathlib import Path
from typing import Any, Callable, List, Iterable, Optional, TypeVar, Dict, IO, Tuple, Iterator
import numpy as np
from PIL.Image import Image, LANCZOS
from torch.utils.data import Dataset as TorchDataset
from torchvision.transforms import ToTensor
# from torchvision.datasets.utils import check_integrity, download_and_extract_archive
import numpy as np
class HRTFDataset(TorchDataset):
def __init__(
self,
datapoint: DataPoint,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
image_transform: Optional[Callable] = ToTensor(),
measurement_transform: Optional[Callable] = None,
hrir_transform: Optional[Callable] = None,
# download: bool = True,
) -> None:
super().__init__()#root, transform=transform, target_transform=target_transform) # torchvision dataset
self._image_transform = image_transform
self._measurement_transform = measurement_transform
self._hrir_transform = hrir_transform
self._query = datapoint.query
# if download:
# self.download()
# if not self._check_integrity():
# raise RuntimeError('Dataset not found or corrupted.' +
# ' You can use download=True to download it')
if target_spec is None:
target_spec = {}
if group_spec is None:
group_spec = {}
self._specification = {**feature_spec, **target_spec, **group_spec}
if not self._specification:
raise ValueError('At least one specification should not be empty')
if subject_requirements is not None:
self._specification = {**self._specification, **subject_requirements}
ear_ids = self._query.specification_based_ids(self._specification, include_subjects=subject_ids)
if len(ear_ids) == 0:
if len(self._query.specification_based_ids(self._specification)) == 0:
raise ValueError('Empty dataset. Check if its configuration and paths are correct.')
self.subject_ids = tuple()
self.hrir_samplerate = None
self.hrtf_frequencies = None
self._features = []
self._targets = []
self._groups = []
self._selected_angles = {}
self.row_angles = np.array([])
self.column_angles = np.array([])
return
self.subject_ids, _ = zip(*ear_ids)
if 'hrirs' in self._specification.keys():
self._selected_angles, row_indices, column_indices = datapoint.hrir_angle_indices(
self.subject_ids[0],
self._specification['hrirs'].get('row_angles'),
self._specification['hrirs'].get('column_angles')
)
self.row_angles = np.array(list(self._selected_angles.keys()))
self.column_angles = np.ma.getdata(list(self._selected_angles.values())[0])
side = self._specification['hrirs'].get('side', '')
if side.startswith('both-'):
if isinstance(datapoint, SofaSphericalDataPoint):
# mirror azimuths/rows
start_idx = 1 if np.isclose(self.row_angles[0], -180) else 0
if not np.allclose(self.row_angles[start_idx:], -np.flip(self.row_angles[start_idx:])):
raise ValueError(f'Only datasets with symmetric azimuths can use {side} sides.')
else:
# mirror laterals/columns
if not np.allclose(self.column_angles, -np.flip(self.column_angles)):
raise ValueError(f'Only datasets with symmetric lateral angles can use {side} sides.')
else:
self._selected_angles = {}
self.row_angles = np.array([])
self.column_angles = np.array([])
self.hrir_samplerate = datapoint.hrir_samplerate(self.subject_ids[0])
self.hrtf_frequencies = datapoint.hrtf_frequencies(self.subject_ids[0])
self._features: Any = []
self._targets: Any = []
self._groups: Any = []
for subject, side in ear_ids:
for spec, store in (feature_spec, self._features), (target_spec, self._targets), (group_spec, self._groups):
subject_data = {}
if 'images' in spec.keys():
subject_data['images'] = datapoint.pinna_image(subject, side=side, rear=spec['images'].get('rear', False))
if 'anthropometry' in spec.keys():
subject_data['anthropometry'] = datapoint.anthropomorphic_data(subject, side=side, select=spec['anthropometry'].get('select', None))
if 'hrirs' in spec.keys():
subject_data['hrirs'] = datapoint.hrir(subject, side=side, domain=spec['hrirs'].get('domain', 'time'), row_indices=row_indices, column_indices=column_indices)
if 'subject' in spec.keys():
subject_data['subject'] = subject
if 'side' in spec.keys():
subject_data['side'] = side
if 'collection' in spec.keys():
subject_data['collection'] = datapoint.dataset_id
store.append(subject_data)
def __len__(self):
return len(self._features)
def __getitem__(self, idx):
def get_single_item(features, target, group):
# unify both to simpify on-demand transforms
characteristics = {**features, **target, **group}
if 'images' in characteristics:
width, height = characteristics['images'].size
resized_im = characteristics['images'].resize((32, 32), resample=LANCZOS, box=(width//2-128, height//2-128, width//2+128, height//2+128)).convert('L')
if self._image_transform:
resized_im = self._image_transform(resized_im)
# resized_im = resized_im.transpose((1, 2, 0)) # convert to HWC
characteristics['images'] = resized_im
if 'anthropometry' in characteristics and self._measurement_transform:
characteristics['anthropometry'] = self._measurement_transform(characteristics['anthropometry'])
if 'hrirs' in characteristics and self._hrir_transform:
characteristics['hrirs'] = self._hrir_transform(characteristics['hrirs'])
def shape_data(keys):
if len(keys) == 0:
return np.array([])
if len(keys) == 1:
return characteristics[list(keys)[0]]
return tuple(characteristics[k] for k in keys)
return {
'features': shape_data(features.keys()),
'target': shape_data(target.keys()),
'group': shape_data(group.keys()),
}
if isinstance(idx, int):
return get_single_item(self._features[idx], self._targets[idx], self._groups[idx])
else:
items = []
for features, target, group in zip(self._features[idx], self._targets[idx], self._groups[idx]):
items.append(get_single_item(features, target, group))
try:
return {k: np.stack([d[k] for d in items]) for k in items[0].keys()}
except ValueError:
raise ValueError('Not all data points have the same shape')
@property
def target_shape(self):
return np.hstack(tuple(self._targets[0].values())).shape
@property
def available_subject_ids(self):
ear_ids = self._query.specification_based_ids(self._specification)
subject_ids, _ = zip(*ear_ids)
return tuple(sorted(set(subject_ids)))
class CIPIC(HRTFDataset):
"""CIPIC HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
measurement_transform: Optional[Callable] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = CipicDataPoint(
anthropomorphy_matfile_path=Path(root)/'CIPIC_hrtf_database/anthropometry/anthro.mat',
sofa_directory_path=Path(root)/'sofa',
dtype=dtype,
)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, measurement_transform, hrir_transform)
class ARI(HRTFDataset):
"""ARI HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
measurement_transform: Optional[Callable] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = AriDataPoint(
anthropomorphy_matfile_path=Path(root)/'anthro.mat',
sofa_directory_path=Path(root)/'sofa',
dtype=dtype,
)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, measurement_transform, hrir_transform)
class Listen(HRTFDataset):
"""Listen HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = ListenDataPoint(sofa_directory_path=Path(root)/'sofa/compensated/44100', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class BiLi(HRTFDataset):
"""BiLi HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = BiLiDataPoint(sofa_directory_path=Path(root)/'sofa/compensated/96000', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class ITA(HRTFDataset):
"""ITA HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = ItaDataPoint(sofa_directory_path=Path(root)/'sofa', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class HUTUBS(HRTFDataset):
"""HUTUBS HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = HutubsDataPoint(sofa_directory_path=Path(root)/'sofa', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class RIEC(HRTFDataset):
"""RIEC HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = RiecDataPoint(sofa_directory_path=Path(root)/'sofa', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class CHEDAR(HRTFDataset):
"""CHEDAR HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = ChedarDataPoint(sofa_directory_path=Path(root)/'sofa', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class Widespread(HRTFDataset):
"""Widespread HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = WidespreadDataPoint(sofa_directory_path=Path(root)/'sofa', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class SADIE2(HRTFDataset):
"""SADIE II HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = Sadie2DataPoint(sofa_directory_path=Path(root)/'Database-Master_V1-4', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class ThreeDThreeA(HRTFDataset):
"""3D3A HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = ThreeDThreeADataPoint(sofa_directory_path=Path(root)/'sofa', dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
class SONICOM(HRTFDataset):
"""SONICOM HRTF Dataset
"""
def __init__(
self,
root: str,
feature_spec: Dict,
target_spec: Optional[Dict] = None,
group_spec: Optional[Dict] = None,
subject_ids: Optional[Iterable[int]] = None,
subject_requirements: Optional[Dict] = None,
hrir_transform: Optional[Callable] = None,
dtype: type = np.float32,
# download: bool = True,
) -> None:
datapoint = SonicomDataPoint(sofa_directory_path=Path(root), dtype=dtype)
super().__init__(datapoint, feature_spec, target_spec, group_spec, subject_ids, subject_requirements, None, None, hrir_transform)
``` |
{
"source": "jpauwels/neptune-client",
"score": 2
} |
#### File: neptune-client/e2e_tests/conftest.py
```python
import os
from faker import Faker
import boto3
import pytest
from neptune.management.internal.utils import normalize_project_name
from neptune.management import create_project, add_project_member
import neptune.new as neptune
from e2e_tests.utils import a_project_name, Environment
fake = Faker()
@pytest.fixture(scope="session")
def environment():
workspace = os.getenv("WORKSPACE_NAME")
admin_token = os.getenv("ADMIN_NEPTUNE_API_TOKEN")
user = os.getenv("USER_USERNAME")
project_name, project_key = a_project_name(project_slug=fake.slug())
project_identifier = normalize_project_name(name=project_name, workspace=workspace)
created_project_identifier = create_project(
name=project_name,
key=project_key,
visibility="priv",
workspace=workspace,
api_token=admin_token,
)
add_project_member(
name=created_project_identifier,
username=user,
# pylint: disable=no-member
role="contributor",
api_token=admin_token,
)
yield Environment(
workspace=workspace,
project=project_identifier,
user_token=os.getenv("NEPTUNE_API_TOKEN"),
admin_token=admin_token,
admin=os.getenv("ADMIN_USERNAME"),
user=user,
)
@pytest.fixture(scope="session")
def container(request, environment):
if request.param == "project":
project = neptune.init_project(name=environment.project)
yield project
project.stop()
if request.param == "run":
exp = neptune.init_run(project=environment.project)
yield exp
exp.stop()
@pytest.fixture(scope="session")
def bucket(environment):
bucket_name = os.environ.get("BUCKET_NAME")
s3_client = boto3.resource("s3")
s3_bucket = s3_client.Bucket(bucket_name)
yield bucket_name, s3_client
s3_bucket.objects.filter(Prefix=environment.project).delete()
```
#### File: neptune/new/exceptions.py
```python
from typing import List, Optional, Union
from urllib.parse import urlparse
from packaging.version import Version
from neptune.exceptions import STYLES
from neptune.new import envs
from neptune.new.envs import CUSTOM_RUN_ID_ENV_NAME
from neptune.new.internal.backends.api_model import Project, Workspace
from neptune.new.internal.container_type import ContainerType
from neptune.new.internal.utils import replace_patch_version
class NeptuneException(Exception):
def __eq__(self, other):
if type(other) is type(self):
return super().__eq__(other) and str(self).__eq__(str(other))
else:
return False
def __hash__(self):
return hash((super().__hash__(), str(self)))
class NeptuneApiException(NeptuneException):
pass
class MetadataInconsistency(NeptuneException):
pass
class MissingFieldException(NeptuneException, AttributeError, KeyError):
"""Raised when get-like action is called on `Handler`, instead of on `Attribute`."""
def __init__(self, field_path):
message = """
{h1}
----MissingFieldException-------------------------------------------------------
{end}
Field "{field_path}" was not found.
There are two possible reasons:
- There is a typo in a path. Double-check your code for typos.
- You are fetching a field that other process created, but local representation is not synchronized.
If you are sending metadata from multiple processes at the same time, synchronize the local representation before fetching values:
{python}run.sync(){end}
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
self._msg = message.format(field_path=field_path, **STYLES)
super().__init__(self._msg)
def __str__(self):
# required because of overriden `__str__` in `KeyError`
return self._msg
class MalformedOperation(NeptuneException):
pass
class FileNotFound(NeptuneException):
def __init__(self, file: str):
super().__init__("File not found: {}".format(file))
class FileUploadError(NeptuneException):
def __init__(self, filename: str, msg: str):
super().__init__("Cannot upload file {}: {}".format(filename, msg))
class FileSetUploadError(NeptuneException):
def __init__(self, globs: List[str], msg: str):
super().__init__("Cannot upload file set {}: {}".format(globs, msg))
class InternalClientError(NeptuneException):
def __init__(self, msg: str):
message = """
{h1}
----InternalClientError-----------------------------------------------------------------------
{end}
Neptune Client Library encountered an unexpected Internal Error:
{msg}
Please contact Neptune support.
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(msg=msg, **STYLES))
class ClientHttpError(NeptuneException):
def __init__(self, status, response):
self.status = status
self.response = response
message = """
{h1}
----ClientHttpError-----------------------------------------------------------------------
{end}
Neptune server returned status {fail}{status}{end}.
Server response was:
{fail}{response}{end}
Verify the correctness of your call or contact Neptune support.
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(status=status, response=response, **STYLES))
class ExceptionWithProjectsWorkspacesListing(NeptuneException):
def __init__(
self,
message: str,
available_projects: List[Project] = (),
available_workspaces: List[Workspace] = (),
**kwargs,
):
available_projects_message = """
Did you mean any of these?
{projects}
"""
available_workspaces_message = """
You can check all of your projects on the Projects page:
{workspaces_urls}
"""
projects_formated_list = "\n".join(
map(
lambda project: f" - {project.workspace}/{project.name}",
available_projects,
)
)
workspaces_formated_list = "\n".join(
map(
lambda workspace: f" - https://app.neptune.ai/{workspace.name}/-/projects",
available_workspaces,
)
)
super().__init__(
message.format(
available_projects_message=available_projects_message.format(
projects=projects_formated_list
)
if available_projects
else "",
available_workspaces_message=available_workspaces_message.format(
workspaces_urls=workspaces_formated_list
)
if available_workspaces
else "",
**STYLES,
**kwargs,
)
)
class ProjectNotFound(ExceptionWithProjectsWorkspacesListing):
def __init__(
self,
project_id: str,
available_projects: List[Project] = (),
available_workspaces: List[Workspace] = (),
):
message = """
{h1}
----NeptuneProjectNotFoundException------------------------------------
{end}
We couldn’t find project {fail}"{project}"{end}.
{available_projects_message}{available_workspaces_message}
You may also want to check the following docs pages:
- https://docs.neptune.ai/administration/projects
- https://docs.neptune.ai/getting-started/hello-world#project
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message=message,
available_projects=available_projects,
available_workspaces=available_workspaces,
project=project_id,
)
class ProjectNameCollision(ExceptionWithProjectsWorkspacesListing):
def __init__(self, project_id: str, available_projects: List[Project] = ()):
message = """
{h1}
----NeptuneProjectNameCollisionException------------------------------------
{end}
Cannot resolve project {fail}"{project}"{end}.
{available_projects_message}
You may also want to check the following docs pages:
- https://docs.neptune.ai/administration/projects
- https://docs.neptune.ai/getting-started/hello-world#project
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message=message, available_projects=available_projects, project=project_id
)
class NeptuneMissingProjectNameException(ExceptionWithProjectsWorkspacesListing):
def __init__(
self,
available_projects: List[Project] = (),
available_workspaces: List[Workspace] = (),
):
message = """
{h1}
----NeptuneMissingProjectNameException----------------------------------------
{end}
Neptune client couldn't find your project name.
{available_projects_message}{available_workspaces_message}
There are two options two add it:
- specify it in your code
- set an environment variable in your operating system.
{h2}CODE{end}
Pass it to {bold}neptune.init(){end} via {bold}project{end} argument:
{python}neptune.init(project='WORKSPACE_NAME/PROJECT_NAME'){end}
{h2}ENVIRONMENT VARIABLE{end}
or export or set an environment variable depending on your operating system:
{correct}Linux/Unix{end}
In your terminal run:
{bash}export {env_project}=WORKSPACE_NAME/PROJECT_NAME{end}
{correct}Windows{end}
In your CMD run:
{bash}set {env_project}=WORKSPACE_NAME/PROJECT_NAME{end}
and skip the {bold}project{end} argument of {bold}neptune.init(){end}:
{python}neptune.init(){end}
You may also want to check the following docs pages:
- https://docs.neptune.ai/administration/projects
- https://docs.neptune.ai/getting-started/hello-world#project
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message=message,
available_projects=available_projects,
available_workspaces=available_workspaces,
env_project=envs.PROJECT_ENV_NAME,
)
class RunNotFound(NeptuneException):
def __init__(self, run_id: str) -> None:
super().__init__("Run {} not found.".format(run_id))
class ContainerUUIDNotFound(NeptuneException):
container_id: str
container_type: ContainerType
def __init__(self, container_id: str, container_type: ContainerType):
self.container_id = container_id
self.container_type = container_type
super().__init__(
"{} with ID {} not found. Could be deleted.".format(
container_type.value.capitalize(), container_id
)
)
def raise_container_not_found(
container_id: str, container_type: ContainerType, from_exception: Exception = None
):
if container_type == ContainerType.RUN:
error_class = RunUUIDNotFound
elif container_type == ContainerType.PROJECT:
error_class = ProjectUUIDNotFound
else:
raise InternalClientError(f"Unknown container_type: {container_type}")
if from_exception:
raise error_class(container_id) from from_exception
else:
raise error_class(container_id)
class RunUUIDNotFound(ContainerUUIDNotFound):
def __init__(self, container_id: str):
super().__init__(container_id, container_type=ContainerType.RUN)
class ProjectUUIDNotFound(ContainerUUIDNotFound):
def __init__(self, container_id: str):
super().__init__(container_id, container_type=ContainerType.PROJECT)
class InactiveContainerException(NeptuneException):
resume_info: str
def __init__(self, container_type: ContainerType, label: str):
message = """
{h1}
----{cls}----------------------------------------
{end}
It seems you are trying to log (or fetch) metadata to a {container_type} that was stopped ({label}).
What should I do?{resume_info}
You may also want to check the following docs pages:
- https://docs.neptune.ai/api-reference/{container_type}#.stop
- https://docs.neptune.ai/you-should-know/connection-modes
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message.format(
cls=self.__class__.__name__,
label=label,
container_type=container_type.value,
resume_info=self.resume_info,
**STYLES,
)
)
class InactiveRunException(InactiveContainerException):
resume_info = """
- Resume the run to continue logging to it:
https://docs.neptune.ai/how-to-guides/neptune-api/resume-run#how-to-resume-run
- Don't invoke `stop()` on a {container_type} that you want to access. If you want to stop monitoring only,
you can resume a {container_type} in read-only mode:
https://docs.neptune.ai/you-should-know/connection-modes#read-only"""
def __init__(self, label: str):
super().__init__(label=label, container_type=ContainerType.RUN)
class InactiveProjectException(InactiveContainerException):
resume_info = """
- Initialize connection to the project again to continue logging to it:
https://docs.neptune.ai/api-reference/neptune#.init_project
- Don't invoke `stop()` on a {container_type} that you want to access."""
def __init__(self, label: str):
super().__init__(label=label, container_type=ContainerType.PROJECT)
class NeptuneMissingApiTokenException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneMissingApiTokenException-------------------------------------------
{end}
Neptune client couldn't find your API token.
You can get it here:
- https://app.neptune.ai/get_my_api_token
There are two options to add it:
- specify it in your code
- set an environment variable in your operating system.
{h2}CODE{end}
Pass the token to {bold}neptune.init(){end} via {bold}api_token{end} argument:
{python}neptune.init(project='WORKSPACE_NAME/PROJECT_NAME', api_token='YOUR_API_TOKEN'){end}
{h2}ENVIRONMENT VARIABLE{end} {correct}(Recommended option){end}
or export or set an environment variable depending on your operating system:
{correct}Linux/Unix{end}
In your terminal run:
{bash}export {env_api_token}="YOUR_API_TOKEN"{end}
{correct}Windows{end}
In your CMD run:
{bash}set {env_api_token}="YOUR_API_TOKEN"{end}
and skip the {bold}api_token{end} argument of {bold}neptune.init(){end}:
{python}neptune.init(project='WORKSPACE_NAME/PROJECT_NAME'){end}
You may also want to check the following docs pages:
- https://docs.neptune.ai/getting-started/installation#authentication-neptune-api-token
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message.format(env_api_token=envs.API_TOKEN_ENV_NAME, **STYLES)
)
class NeptuneInvalidApiTokenException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneInvalidApiTokenException------------------------------------------------
{end}
Provided API token is invalid.
Make sure you copied and provided your API token correctly.
You can get it or check if it is correct here:
- https://app.neptune.ai/get_my_api_token
There are two options to add it:
- specify it in your code
- set as an environment variable in your operating system.
{h2}CODE{end}
Pass the token to {bold}neptune.init(){end} via {bold}api_token{end} argument:
{python}neptune.init(project='WORKSPACE_NAME/PROJECT_NAME', api_token='YOUR_API_TOKEN'){end}
{h2}ENVIRONMENT VARIABLE{end} {correct}(Recommended option){end}
or export or set an environment variable depending on your operating system:
{correct}Linux/Unix{end}
In your terminal run:
{bash}export {env_api_token}="YOUR_API_TOKEN"{end}
{correct}Windows{end}
In your CMD run:
{bash}set {env_api_token}="YOUR_API_TOKEN"{end}
and skip the {bold}api_token{end} argument of {bold}neptune.init(){end}:
{python}neptune.init(project='WORKSPACE_NAME/PROJECT_NAME'){end}
You may also want to check the following docs pages:
- https://docs.neptune.ai/getting-started/installation#authentication-neptune-api-token
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message.format(env_api_token=envs.API_TOKEN_ENV_NAME, **STYLES)
)
class CannotSynchronizeOfflineRunsWithoutProject(NeptuneException):
def __init__(self):
super().__init__("Cannot synchronize offline runs without a project.")
class NeedExistingRunForReadOnlyMode(NeptuneException):
def __init__(self):
message = """
{h1}
----NeedExistingRunForReadOnlyMode-----------------------------------------
{end}
Read-only mode can be used only with an existing run.
Parameter {python}run{end} of {python}neptune.init(){end} must be provided and reference
an existing run when using {python}mode="read-only"{end}.
You may also want to check the following docs pages:
- https://docs.neptune.ai/you-should-know/connection-modes#read-only
- https://docs.neptune.ai/api-reference/neptune#init
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class NeptuneRunResumeAndCustomIdCollision(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneRunResumeAndCustomIdCollision-----------------------------------------
{end}
It's not possible to use {python}custom_run_id{end} while resuming a run.
Parameters {python}run{end} and {python}custom_run_id{end} of {python}neptune.init(){end} are mutually exclusive.
Make sure you have no {bash}{custom_id_env}{end} environment variable set
and no value is explicitly passed to `custom_run_id` argument when you are resuming a run.
You may also want to check the following docs pages:
- https://docs.neptune.ai/api-reference/neptune#init
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(custom_id_env=CUSTOM_RUN_ID_ENV_NAME, **STYLES))
class UnsupportedClientVersion(NeptuneException):
def __init__(
self,
version: Union[Version, str],
min_version: Optional[Union[Version, str]] = None,
max_version: Optional[Union[Version, str]] = None,
):
current_version = str(version)
required_version = (
"==" + replace_patch_version(str(max_version))
if max_version
else ">=" + str(min_version)
)
message = """
{h1}
----UnsupportedClientVersion-------------------------------------------------------------
{end}
Your version of neptune-client ({current_version}) library is not supported by the Neptune server.
Please install neptune-client{required_version}
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message.format(
current_version=current_version,
required_version=required_version,
**STYLES,
)
)
class CannotResolveHostname(NeptuneException):
def __init__(self, host):
message = """
{h1}
----CannotResolveHostname-----------------------------------------------------------------------
{end}
Neptune Client Library was not able to resolve hostname {underline}{host}{end}.
What should I do?
- Check if your computer is connected to the internet.
- Check if your computer should use any proxy to access internet.
If so, you may want to use {python}proxies{end} parameter of {python}neptune.init(){end} function.
See https://docs.neptune.ai/api-reference/neptune#.init
and https://requests.readthedocs.io/en/master/user/advanced/#proxies
- Check Neptune services status: https://status.neptune.ai/
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(host=host, **STYLES))
class SSLError(NeptuneException):
def __init__(self):
super().__init__(
"SSL certificate validation failed. Set NEPTUNE_ALLOW_SELF_SIGNED_CERTIFICATE "
"environment variable to accept self-signed certificates."
)
class NeptuneConnectionLostException(NeptuneException):
def __init__(self, cause: Exception):
self.cause = cause
message = """
{h1}
----NeptuneConnectionLostException---------------------------------------------------------
{end}
A connection to the Neptune server was lost.
If you are using asynchronous (default) connection mode Neptune will continue to locally track your metadata and will continuously try to re-establish connection with Neptune servers.
If the connection is not re-established you can upload it later using Neptune Command Line Interface:
{bash}neptune sync -p workspace_name/project_name{end}
What should I do?
- Check if your computer is connected to the internet.
- If your connection is unstable you can consider working using the offline mode:
{python}run = neptune.init(mode="offline"){end}
You can read in detail how it works and how to upload your data on the following doc pages:
- https://docs.neptune.ai/you-should-know/connection-modes#offline
- https://docs.neptune.ai/you-should-know/connection-modes#uploading-offline-data
You may also want to check the following docs pages:
- https://docs.neptune.ai/you-should-know/connection-modes#connectivity-issues
- https://docs.neptune.ai/you-should-know/connection-modes
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class InternalServerError(NeptuneApiException):
def __init__(self, response):
message = """
{h1}
----InternalServerError-----------------------------------------------------------------------
{end}
Neptune Client Library encountered an unexpected Internal Server Error.
Server response was:
{fail}{response}{end}
Please try again later or contact Neptune support.
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(response=response, **STYLES))
class Unauthorized(NeptuneApiException):
def __init__(self):
message = """
{h1}
----Unauthorized-----------------------------------------------------------------------
{end}
You have no permission to access given resource.
- Verify your API token is correct.
See: https://app.neptune.ai/get_my_api_token
- Verify if your the provided project name is correct.
The correct project name should look like this {correct}WORKSPACE/PROJECT_NAME{end}.
It has two parts:
- {correct}WORKSPACE{end}: which can be your username or your organization name
- {correct}PROJECT_NAME{end}: which is the actual project name you chose
- Ask your organization administrator to grant you necessary privileges to the project
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class Forbidden(NeptuneApiException):
def __init__(self):
message = """
{h1}
----Forbidden-----------------------------------------------------------------------
{end}
You have no permission to access given resource.
- Verify your API token is correct.
See: https://app.neptune.ai/get_my_api_token
- Verify if your the provided project name is correct.
The correct project name should look like this {correct}WORKSPACE/PROJECT_NAME{end}.
It has two parts:
- {correct}WORKSPACE{end}: which can be your username or your organization name
- {correct}PROJECT_NAME{end}: which is the actual project name you chose
- Ask your organization administrator to grant you necessary privileges to the project
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class NeptuneOfflineModeFetchException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneOfflineModeFetchException---------------------------------------------------
{end}
It seems you are trying to fetch data from the server, while working in an offline mode.
You need to work in non-offline connection mode to fetch data from the server.
You can set connection mode when creating a new run:
{python}run = neptune.init(mode="async"){end}
You may also want to check the following docs pages:
- https://docs.neptune.ai/you-should-know/connection-modes
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class OperationNotSupported(NeptuneException):
def __init__(self, message: str):
super().__init__(f"Operation not supported: {message}")
class NeptuneLegacyProjectException(NeptuneException):
def __init__(self, project: str):
message = """
{h1}
----NeptuneLegacyProjectException---------------------------------------------------------
{end}
Your project "{project}" has not been migrated to the new structure yet.
Unfortunately neptune.new Python API is incompatible with projects using old structure,
please use legacy neptune Python API.
Don't worry - we are working hard on migrating all the projects and you will be able to use the neptune.new API soon.
You can find documentation for legacy neptune Python API here:
- https://docs-legacy.neptune.ai/index.html
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(project=project, **STYLES))
class NeptuneUninitializedException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneUninitializedException----------------------------------------------------
{end}
You must initialize neptune-client before you access `get_last_run`.
Looks like you forgot to add:
{python}neptune.init(project='WORKSPACE_NAME/PROJECT_NAME', api_token='YOUR_API_TOKEN'){end}
before you ran:
{python}neptune.get_last_run(){end}
You may also want to check the following docs pages:
- https://docs.neptune.ai/api-reference/neptune#get_last_run
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class NeptuneIntegrationNotInstalledException(NeptuneException):
def __init__(self, integration_package_name, framework_name):
message = """
{h1}
----NeptuneIntegrationNotInstalledException-----------------------------------------
{end}
Looks like integration {integration_package_name} wasn't installed.
To install run:
{bash}pip install {integration_package_name}{end}
Or:
{bash}pip install neptune-client[{framework_name}]{end}
You may also want to check the following docs pages:
- https://docs.neptune.ai/integrations-and-supported-tools/intro
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message.format(
integration_package_name=integration_package_name,
framework_name=framework_name,
**STYLES,
)
)
class NeptuneLimitExceedException(NeptuneException):
def __init__(self, reason: str):
message = """
{h1}
----NeptuneLimitExceedException---------------------------------------------------------------------------------------
{end}
{reason}
It's not possible to upload new data, but you can still fetch and delete data.
If you are using asynchronous (default) connection mode Neptune automatically switched to an offline mode
and your data is being stored safely on the disk. You can upload it later using Neptune Command Line Interface:
{bash}neptune sync -p project_name{end}
What should I do?
- In case of storage limit go to your projects and remove runs or model metadata you don't need
- ... or update your subscription plan here: https://app.neptune.ai/-/subscription
You may also want to check the following docs pages:
- https://docs.neptune.ai/advanced-user-guides/connection-modes
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES, reason=reason))
class NeptuneStorageLimitException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneStorageLimitException---------------------------------------------------------------------------------------
{end}
You exceeded storage limit for workspace. It's not possible to upload new data, but you can still fetch and delete data.
If you are using asynchronous (default) connection mode Neptune automatically switched to an offline mode
and your data is being stored safely on the disk. You can upload it later using Neptune Command Line Interface:
{bash}neptune sync -p project_name{end}
What should I do?
- Go to your projects and remove runs or model metadata you don't need
- ... or update your subscription plan here: https://app.neptune.ai/-/subscription
You may also want to check the following docs pages:
- https://docs.neptune.ai/advanced-user-guides/connection-modes
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class FetchAttributeNotFoundException(MetadataInconsistency):
def __init__(self, attribute_path: str):
message = """
{h1}
----MetadataInconsistency----------------------------------------------------------------------
{end}
Field {python}{attribute_path}{end} was not found.
Remember that in the asynchronous (default) connection mode data is synchronized
with the Neptune servers in the background and may have not reached
it yet before it's fetched. Before fetching the data you can force
wait for all the requests sent by invoking:
{python}run.wait(){end}
Remember that each use of {python}wait{end} introduces a delay in code execution.
You may also want to check the following docs pages:
- https://docs.neptune.ai/you-should-know/connection-modes
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help.html
"""
super().__init__(message.format(attribute_path=attribute_path, **STYLES))
class ArtifactNotFoundException(MetadataInconsistency):
def __init__(self, artifact_hash: str):
message = """
{h1}
----MetadataInconsistency----------------------------------------------------------------------
{end}
Artifact with hash {python}{artifact_hash}{end} was not found.
Remember that in the asynchronous (default) connection mode data is synchronized
with the Neptune servers in the background and may have not reached
it yet before it's fetched. Before fetching the data you can force
wait for all the requests sent by invoking:
{python}run.wait(){end}
Remember that each use of {python}wait{end} introduces a delay in code execution.
You may also want to check the following docs pages:
- https://docs.neptune.ai/you-should-know/connection-modes
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help.html
"""
super().__init__(message.format(artifact_hash=artifact_hash, **STYLES))
class PlotlyIncompatibilityException(Exception):
def __init__(self, matplotlib_version, plotly_version):
super().__init__(
"Unable to convert plotly figure to matplotlib format. "
"Your matplotlib ({}) and plotlib ({}) versions are not compatible. "
"See https://stackoverflow.com/q/63120058 for details. "
"Downgrade matplotlib to version 3.2 or use as_image to log static chart.".format(
matplotlib_version, plotly_version
)
)
class NeptunePossibleLegacyUsageException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptunePossibleLegacyUsageException----------------------------------------------------------------
{end}
It seems you are trying to use legacy API, but imported the new one.
Simply update your import statement to:
{python}import neptune{end}
You may want to check the Legacy API docs:
- https://docs-legacy.neptune.ai
If you want to update your code with the new API we prepared a handy migration guide:
- https://docs.neptune.ai/migration-guide
You can read more about neptune.new in the release blog post:
- https://neptune.ai/blog/neptune-new
You may also want to check the following docs pages:
- https://docs-legacy.neptune.ai/getting-started/integrate-neptune-into-your-codebase.html
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class NeptuneLegacyIncompatibilityException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneLegacyIncompatibilityException----------------------------------------
{end}
It seems you are passing the legacy Experiment object, when a Run object is expected.
What can I do?
- Updating your code to the new Python API requires few changes, but to help you with this process we prepared a handy migration guide:
https://docs.neptune.ai/migration-guide
- You can read more about neptune.new in the release blog post:
https://neptune.ai/blog/neptune-new
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class NeptuneUnhandledArtifactSchemeException(NeptuneException):
def __init__(self, path: str):
scheme = urlparse(path).scheme
message = """
{h1}
----NeptuneUnhandledArtifactProtocolException------------------------------------
{end}
You have used a Neptune Artifact to track a file with scheme unhandled by this client ({scheme}).
Problematic path: {path}
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(scheme=scheme, path=path, **STYLES))
class NeptuneUnhandledArtifactTypeException(NeptuneException):
def __init__(self, type_str: str):
message = """
{h1}
----NeptuneUnhandledArtifactTypeException----------------------------------------
{end}
A Neptune Artifact you're listing is tracking a file type unhandled by this client ({type_str}).
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(type_str=type_str, **STYLES))
class NeptuneLocalStorageAccessException(NeptuneException):
def __init__(self, path, expected_description):
message = """
{h1}
----NeptuneLocalStorageAccessException-------------------------------------
{end}
Neptune had problem processing "{path}", it expects it to be {expected_description}.
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message.format(
path=path, expected_description=expected_description, **STYLES
)
)
class NeptuneRemoteStorageCredentialsException(NeptuneException):
def __init__(self):
message = """
{h1}
----NeptuneRemoteStorageCredentialsException-------------------------------------
{end}
Neptune could not find suitable credentials for remote storage of a Neptune Artifact you're listing.
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(**STYLES))
class NeptuneRemoteStorageAccessException(NeptuneException):
def __init__(self, location: str):
message = """
{h1}
----NeptuneRemoteStorageAccessException------------------------------------------
{end}
Neptune could not access an object ({location}) from remote storage of a Neptune Artifact you're listing.
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(message.format(location=location, **STYLES))
class ArtifactUploadingError(NeptuneException):
def __init__(self, msg: str):
super().__init__("Cannot upload artifact: {}".format(msg))
class NeptuneUnsupportedArtifactFunctionalityException(NeptuneException):
def __init__(self, functionality_info: str):
message = """
{h1}
----NeptuneUnsupportedArtifactFunctionality-------------------------------------
{end}
It seems you are using Neptune Artifacts functionality that is currently not supported.
{functionality_info}
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message.format(functionality_info=functionality_info, **STYLES)
)
class NeptuneEmptyLocationException(NeptuneException):
def __init__(self, location: str, namespace: str):
message = """
{h1}
----NeptuneEmptyLocationException----------------------------------------------
{end}
Neptune could not find files in the requested location ({location}) during creation of an Artifact in "{namespace}".
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
super().__init__(
message.format(location=location, namespace=namespace, **STYLES)
)
class NeptuneFeatureNotAvailableException(NeptuneException):
def __init__(self, missing_feature):
message = """
{h1}
----NeptuneFeatureNotAvailableException----------------------------------------------
{end}
Following feature is not yet supported by the Neptune instance you are using:
{missing_feature}
An update of the Neptune instance is required in order to use it. Please contact your local Neptune administrator
or the Neptune support directly (<EMAIL>) about the upcoming updates.
{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help
"""
self.message = message.format(missing_feature=missing_feature, **STYLES)
super().__init__(message)
```
#### File: new/internal/init_project.py
```python
import logging
import threading
from typing import Optional
from neptune.new.exceptions import NeptuneException
from neptune.new.internal.backends.factory import get_backend
from neptune.new.internal.backends.project_name_lookup import project_name_lookup
from neptune.new.internal.backgroud_job_list import BackgroundJobList
from neptune.new.internal.operation_processors.factory import get_operation_processor
from neptune.new.internal.utils import verify_type
from neptune.new.project import Project
from neptune.new.types.mode import Mode
from neptune.new.version import version as parsed_version
__version__ = str(parsed_version)
_logger = logging.getLogger(__name__)
def init_project(
*,
name: Optional[str] = None,
api_token: Optional[str] = None,
mode: str = Mode.ASYNC.value,
flush_period: float = 5,
proxies: Optional[dict] = None,
) -> Project:
verify_type("name", name, (str, type(None)))
verify_type("api_token", api_token, (str, type(None)))
verify_type("mode", mode, str)
verify_type("flush_period", flush_period, (int, float))
verify_type("proxies", proxies, (dict, type(None)))
if mode == Mode.OFFLINE:
raise NeptuneException("Project can't be initialized in OFFLINE mode")
backend = get_backend(mode, api_token=api_token, proxies=proxies)
project_obj = project_name_lookup(backend, name)
project_lock = threading.RLock()
operation_processor = get_operation_processor(
mode,
container_id=project_obj.id,
container_type=Project.container_type,
backend=backend,
lock=project_lock,
flush_period=flush_period,
)
background_jobs = []
project = Project(
project_obj.id,
backend,
operation_processor,
BackgroundJobList(background_jobs),
project_lock,
project_obj.workspace,
project_obj.name,
)
if mode != Mode.OFFLINE:
project.sync(wait=False)
# pylint: disable=protected-access
project._startup(debug_mode=mode == Mode.DEBUG)
return project
def get_project(
name: Optional[str] = None,
api_token: Optional[str] = None,
proxies: Optional[dict] = None,
) -> Project:
"""Get a project with given `name`.
Args:
name(str, optional): Name of a project in a form of namespace/project_name. Defaults to `None`.
If None, the value of `NEPTUNE_PROJECT` environment variable will be taken.
api_token(str, optional): User’s API token. Defaults to `None`.
If None, the value of `NEPTUNE_API_TOKEN` environment variable will be taken.
.. note::
It is strongly recommended to use `NEPTUNE_API_TOKEN` environment variable rather than placing your
API token in plain text in your source code.
Returns:
``Project``: object that can be used to interact with the project as a whole like fetching data from Runs table.
Examples:
>>> import neptune.new as neptune
>>> # Fetch project 'jack/sandbox'
... project = neptune.get_project(name='jack/sandbox')
>>> # Fetch all Runs metadata as Pandas DataFrame
... runs_table_df = project.fetch_runs_table().to_pandas()
You may also want to check `get_project docs page`_.
.. _get_project docs page:
https://docs.neptune.ai/api-reference/neptune#.get_project
"""
return init_project(
name=name, api_token=api_token, mode=Mode.READ_ONLY.value, proxies=proxies
)
```
#### File: internal/backends/test_hosted_file_operations.py
```python
import json
import os
import random
import unittest
import uuid
from collections import namedtuple
from tempfile import NamedTemporaryFile, TemporaryDirectory
import mock
from mock import MagicMock, patch, call
from neptune.new.internal.backends.api_model import ClientConfig
from neptune.new.internal.backends.hosted_file_operations import (
upload_file_attribute,
upload_file_set_attribute,
download_file_attribute,
_get_content_disposition_filename,
download_file_set_attribute,
)
from neptune.utils import IS_WINDOWS
from tests.neptune.new.backend_test_mixin import BackendTestMixin
from tests.neptune.new.helpers import create_file
def set_expected_result(endpoint: MagicMock, value: dict):
endpoint.return_value.response.return_value.result = namedtuple(
endpoint.__class__.__name__, value.keys()
)(**value)
class HostedFileOperationsHelper(unittest.TestCase):
@staticmethod
def get_random_bytes(count):
return bytes(random.randint(0, 255) for _ in range(count))
@staticmethod
def _get_swagger_mock():
swagger_mock = MagicMock()
swagger_mock.swagger_spec.http_client = MagicMock()
swagger_mock.swagger_spec.api_url = "ui.neptune.ai"
swagger_mock.api.uploadFileSetAttributeChunk.operation.path_name = (
"/uploadFileSetChunk"
)
swagger_mock.api.uploadFileSetAttributeTar.operation.path_name = (
"/uploadFileSetTar"
)
swagger_mock.api.uploadPath.operation.path_name = "/uploadPath"
swagger_mock.api.uploadAttribute.operation.path_name = "/attributes/upload"
swagger_mock.api.downloadAttribute.operation.path_name = "/attributes/download"
swagger_mock.api.downloadFileSetAttributeZip.operation.path_name = (
"/attributes/downloadFileSetZip"
)
swagger_mock.api.downloadFileSetAttributeZip.operation.path_name = (
"/attributes/downloadFileSetZip"
)
swagger_mock.api.download.operation.path_name = "/download"
swagger_mock.api.fileAtomMultipartUploadStart.operation.path_name = (
"/attributes/storage/file/upload/start"
)
swagger_mock.api.fileAtomMultipartUploadFinish.operation.path_name = (
"/attributes/storage/file/upload/finish"
)
swagger_mock.api.fileAtomMultipartUploadPart.operation.path_name = (
"/attributes/storage/file/upload/part"
)
swagger_mock.api.fileAtomUpload.operation.path_name = (
"/attributes/storage/file/upload"
)
swagger_mock.api.fileSetFileMultipartUploadStart.operation.path_name = (
"/attributes/storage/fileset/upload/start"
)
swagger_mock.api.fileSetFileMultipartUploadFinish.operation.path_name = (
"/attributes/storage/fileset/upload/finish"
)
swagger_mock.api.fileSetFileMultipartUploadPart.operation.path_name = (
"/attributes/storage/fileset/upload/part"
)
swagger_mock.api.fileSetFileUpload.operation.path_name = (
"/attributes/storage/fileset/upload"
)
return swagger_mock
class TestCommonHostedFileOperations(HostedFileOperationsHelper):
# pylint:disable=protected-access
def test_get_content_disposition_filename(self):
# given
response_mock = MagicMock()
response_mock.headers = {
"Content-Disposition": 'attachment; filename="sample.file"'
}
# when
filename = _get_content_disposition_filename(response_mock)
# then
self.assertEqual(filename, "sample.file")
@patch(
"neptune.new.internal.backends.hosted_file_operations._store_response_as_file"
)
@patch("neptune.new.internal.backends.hosted_file_operations._download_raw_data")
def test_download_file_attribute(self, download_raw, store_response_mock):
# given
swagger_mock = self._get_swagger_mock()
exp_uuid = str(uuid.uuid4())
# when
download_file_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="some/attribute",
destination=None,
)
# then
download_raw.assert_called_once_with(
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/download",
headers={"Accept": "application/octet-stream"},
query_params={"experimentId": str(exp_uuid), "attribute": "some/attribute"},
)
store_response_mock.assert_called_once_with(download_raw.return_value, None)
@patch(
"neptune.new.internal.backends.hosted_file_operations._store_response_as_file"
)
@patch("neptune.new.internal.backends.hosted_file_operations._download_raw_data")
@patch(
"neptune.new.internal.backends.hosted_file_operations._get_download_url",
new=lambda _, _id: "some_url",
)
def test_download_file_set_attribute(self, download_raw, store_response_mock):
# given
swagger_mock = self._get_swagger_mock()
download_id = str(uuid.uuid4())
# when
download_file_set_attribute(
swagger_client=swagger_mock, download_id=download_id, destination=None
)
# then
download_raw.assert_called_once_with(
http_client=swagger_mock.swagger_spec.http_client,
url="some_url",
headers={"Accept": "application/zip"},
)
store_response_mock.assert_called_once_with(download_raw.return_value, None)
class TestOldUploadFileOperations(HostedFileOperationsHelper):
multipart_config = None
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations.upload_raw_data")
def test_missing_files_or_directory(self, upload_raw_data_mock):
# given
exp_uuid = str(uuid.uuid4())
swagger_mock = self._get_swagger_mock()
upload_raw_data_mock.return_value = b"null"
swagger_mock.api.getUploadConfig.return_value.response.return_value.result.chunkSize = (
10
)
# when
with NamedTemporaryFile("w") as temp_file_1:
with NamedTemporaryFile("w") as temp_file_2:
with TemporaryDirectory() as temp_dir:
upload_file_set_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="some/attribute",
file_globs=[
temp_file_1.name,
temp_file_2.name,
os.path.abspath("missing_file"),
temp_dir,
],
reset=True,
multipart_config=self.multipart_config,
)
# then
upload_raw_data_mock.assert_called_once_with(
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/uploadFileSetTar",
data=mock.ANY,
headers={"Content-Type": "application/octet-stream"},
query_params={
"experimentId": str(exp_uuid),
"attribute": "some/attribute",
"reset": "True",
},
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations._upload_loop")
def test_upload_file_attribute(self, upload_loop_mock):
# given
exp_uuid = str(uuid.uuid4())
swagger_mock = self._get_swagger_mock()
upload_loop_mock.return_value = b"null"
# when
with NamedTemporaryFile("w") as f:
upload_file_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="target/path.txt",
source=f.name,
ext="txt",
multipart_config=self.multipart_config,
)
# then
upload_loop_mock.assert_called_once_with(
file_chunk_stream=mock.ANY,
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/upload",
query_params={
"experimentId": str(exp_uuid),
"attribute": "target/path.txt",
"ext": "txt",
},
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations._upload_loop")
def test_upload_file_attribute_from_stream(self, upload_loop_mock):
# given
exp_uuid = str(uuid.uuid4())
swagger_mock = self._get_swagger_mock()
upload_loop_mock.return_value = b"null"
# when
upload_file_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="target/path.txt",
source=b"Some content of test stream",
ext="txt",
multipart_config=self.multipart_config,
)
# then
upload_loop_mock.assert_called_once_with(
file_chunk_stream=mock.ANY,
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/upload",
query_params={
"experimentId": str(exp_uuid),
"attribute": "target/path.txt",
"ext": "txt",
},
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations._upload_loop_chunk")
@patch(
"neptune.new.internal.utils.glob",
new=lambda path, recursive=False: [path.replace("*", "file.txt")],
)
def test_upload_single_file_in_file_set_attribute(self, upload_loop_chunk_mock):
# given
exp_uuid = uuid.uuid4()
swagger_mock = self._get_swagger_mock()
upload_loop_chunk_mock.return_value = b"null"
chunk_size = 5 * 1024 * 1024
swagger_mock.api.getUploadConfig.return_value.response.return_value.result.chunkSize = (
chunk_size
)
# when
with NamedTemporaryFile("w") as temp_file:
with open(temp_file.name, "wb") as handler:
handler.write(self.get_random_bytes(2 * chunk_size))
upload_file_set_attribute(
swagger_client=swagger_mock,
container_id=str(exp_uuid),
attribute="some/attribute",
file_globs=[temp_file.name],
reset=True,
multipart_config=self.multipart_config,
)
# then
upload_loop_chunk_mock.assert_has_calls(
[
call(
mock.ANY,
mock.ANY,
http_client=swagger_mock.swagger_spec.http_client,
query_params={
"experimentId": str(exp_uuid),
"attribute": "some/attribute",
"reset": "True",
"path": os.path.basename(temp_file.name),
},
url="https://ui.neptune.ai/uploadFileSetChunk",
),
call(
mock.ANY,
mock.ANY,
http_client=swagger_mock.swagger_spec.http_client,
query_params={
"experimentId": str(exp_uuid),
"attribute": "some/attribute",
"reset": "False",
"path": os.path.basename(temp_file.name),
},
url="https://ui.neptune.ai/uploadFileSetChunk",
),
]
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations.upload_raw_data")
@patch(
"neptune.new.internal.utils.glob",
new=lambda path, recursive=False: [path.replace("*", "file.txt")],
)
def test_upload_multiple_files_in_file_set_attribute(self, upload_raw_data_mock):
# given
exp_uuid = str(uuid.uuid4())
swagger_mock = self._get_swagger_mock()
upload_raw_data_mock.return_value = b"null"
swagger_mock.api.getUploadConfig.return_value.response.return_value.result.chunkSize = (
10
)
# when
with NamedTemporaryFile("w") as temp_file_1:
with NamedTemporaryFile("w") as temp_file_2:
upload_file_set_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="some/attribute",
file_globs=[temp_file_1.name, temp_file_2.name],
reset=True,
multipart_config=self.multipart_config,
)
# then
upload_raw_data_mock.assert_called_once_with(
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/uploadFileSetTar",
data=mock.ANY,
headers={"Content-Type": "application/octet-stream"},
query_params={
"experimentId": str(exp_uuid),
"attribute": "some/attribute",
"reset": "True",
},
)
class TestNewUploadFileOperations(HostedFileOperationsHelper, BackendTestMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
config_swagger_client = self._get_swagger_client_mock(MagicMock())
client_config = ClientConfig.from_api_response(
config_swagger_client.api.getClientConfig().response().result
)
self.multipart_config = client_config.multipart_config
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations.upload_raw_data")
def test_missing_files_or_directory(self, upload_raw_data_mock):
# given
exp_uuid = str(uuid.uuid4())
swagger_mock = self._get_swagger_mock()
upload_raw_data_mock.return_value = b"null"
# when
with NamedTemporaryFile("w") as temp_file_1:
with NamedTemporaryFile("w") as temp_file_2:
with TemporaryDirectory() as temp_dir:
upload_file_set_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="some/attribute",
file_globs=[
temp_file_1.name,
temp_file_2.name,
os.path.abspath("missing_file"),
temp_dir,
],
reset=True,
multipart_config=self.multipart_config,
)
# then
upload_raw_data_mock.assert_called_once_with(
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/uploadFileSetTar",
data=mock.ANY,
headers={"Content-Type": "application/octet-stream"},
query_params={
"experimentId": str(exp_uuid),
"attribute": "some/attribute",
"reset": "True",
},
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations.upload_raw_data")
def test_upload_small_file_attribute(self, upload_raw_data):
# given
exp_uuid = str(uuid.uuid4())
swagger_mock = self._get_swagger_mock()
upload_raw_data.return_value = json.dumps(
{
"uploadId": "placeholder",
"errors": [],
}
)
data = b"testdata"
# when
with create_file(content=data, binary_mode=True) as filename:
upload_file_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="target/path.txt",
source=filename,
ext="txt",
multipart_config=self.multipart_config,
)
# then
swagger_mock.api.fileSetFileMultipartUploadStart.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadFinish.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadPart.assert_not_called()
swagger_mock.api.fileSetFileUpload.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadStart.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadFinish.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadPart.assert_not_called()
swagger_mock.api.fileAtomUpload.assert_not_called()
upload_raw_data.assert_called_once_with(
data=data,
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/storage/file/upload",
query_params={
"experimentIdentifier": str(exp_uuid),
"attribute": "target/path.txt",
"ext": "txt",
},
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations.upload_raw_data")
def test_upload_big_file_attribute(self, upload_raw_data):
# given
exp_uuid = str(uuid.uuid4())
swagger_mock = self._get_swagger_mock()
upload_id = "placeholder"
set_expected_result(
swagger_mock.api.fileAtomMultipartUploadStart,
{
"uploadId": upload_id,
"errors": [],
},
)
upload_raw_data.return_value = json.dumps(
{
"errors": [],
}
)
data = self.get_random_bytes(8 * 2 ** 20) # 8 MB
chunk_size = self.multipart_config.min_chunk_size
# when
with create_file(content=data, binary_mode=True) as filename:
upload_file_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="target/path.txt",
source=filename,
ext="txt",
multipart_config=self.multipart_config,
)
# then
swagger_mock.api.fileSetFileMultipartUploadStart.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadFinish.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadPart.assert_not_called()
swagger_mock.api.fileSetFileUpload.assert_not_called()
swagger_mock.api.fileAtomUpload.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadStart.assert_called_once_with(
attribute="target/path.txt",
experimentIdentifier=str(exp_uuid),
ext="txt",
totalLength=len(data),
)
swagger_mock.api.fileAtomMultipartUploadFinish.assert_called_once_with(
attribute="target/path.txt",
experimentIdentifier=str(exp_uuid),
uploadId=upload_id,
)
upload_raw_data.assert_has_calls(
[
call(
data=data[:chunk_size],
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/storage/file/upload/part",
headers={"X-Range": f"bytes=0-{chunk_size - 1}/{len(data)}"},
query_params={
"uploadPartIdx": 0,
"uploadId": upload_id,
"experimentIdentifier": str(exp_uuid),
"attribute": "target/path.txt",
},
),
call(
data=data[chunk_size:],
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/storage/file/upload/part",
headers={
"X-Range": f"bytes={chunk_size}-{len(data) - 1}/{len(data)}"
},
query_params={
"uploadPartIdx": 1,
"uploadId": upload_id,
"experimentIdentifier": str(exp_uuid),
"attribute": "target/path.txt",
},
),
]
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations.upload_raw_data")
@patch(
"neptune.new.internal.utils.glob",
new=lambda path, recursive=False: [path.replace("*", "file.txt")],
)
def test_upload_single_small_file_in_file_set_attribute(self, upload_raw_data):
# given
exp_uuid = uuid.uuid4()
swagger_mock = self._get_swagger_mock()
upload_raw_data.return_value = json.dumps(
{
"errors": [],
}
)
data = b"testdata"
# when
with create_file(content=data, binary_mode=True) as filename:
upload_file_set_attribute(
swagger_client=swagger_mock,
container_id=str(exp_uuid),
attribute="some/attribute",
file_globs=[filename],
reset=True,
multipart_config=self.multipart_config,
)
# then
swagger_mock.api.fileSetFileMultipartUploadStart.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadFinish.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadPart.assert_not_called()
swagger_mock.api.fileSetFileUpload.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadStart.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadFinish.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadPart.assert_not_called()
swagger_mock.api.fileAtomUpload.assert_not_called()
upload_raw_data.assert_called_once_with(
data=data,
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/storage/fileset/upload",
query_params={
"subPath": os.path.basename(filename),
"experimentIdentifier": str(exp_uuid),
"attribute": "some/attribute",
},
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations.upload_raw_data")
@patch(
"neptune.new.internal.utils.glob",
new=lambda path, recursive=False: [path.replace("*", "file.txt")],
)
def test_upload_single_big_file_in_file_set_attribute(self, upload_raw_data):
# given
exp_uuid = uuid.uuid4()
swagger_mock = self._get_swagger_mock()
upload_id = "placeholder"
set_expected_result(
swagger_mock.api.fileSetFileMultipartUploadStart,
{
"uploadId": upload_id,
"errors": [],
},
)
upload_raw_data.return_value = json.dumps(
{
"errors": [],
}
)
data = self.get_random_bytes(8 * 2 ** 20) # 8 MB
chunk_size = self.multipart_config.min_chunk_size
# when
with create_file(content=data, binary_mode=True) as filename:
upload_file_set_attribute(
swagger_client=swagger_mock,
container_id=str(exp_uuid),
attribute="some/attribute",
file_globs=[filename],
reset=True,
multipart_config=self.multipart_config,
)
# then
swagger_mock.api.fileSetFileMultipartUploadPart.assert_not_called()
swagger_mock.api.fileSetFileUpload.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadStart.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadFinish.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadPart.assert_not_called()
swagger_mock.api.fileAtomUpload.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadStart.assert_called_once_with(
attribute="some/attribute",
experimentIdentifier=str(exp_uuid),
totalLength=len(data),
subPath=os.path.basename(filename),
)
swagger_mock.api.fileSetFileMultipartUploadFinish.assert_called_once_with(
attribute="some/attribute",
experimentIdentifier=str(exp_uuid),
subPath=os.path.basename(filename),
uploadId=upload_id,
)
upload_raw_data.assert_has_calls(
[
call(
data=data[:chunk_size],
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/storage/fileset/upload/part",
headers={"X-Range": f"bytes=0-{chunk_size - 1}/{len(data)}"},
query_params={
"uploadPartIdx": 0,
"uploadId": upload_id,
"subPath": os.path.basename(filename),
"experimentIdentifier": str(exp_uuid),
"attribute": "some/attribute",
},
),
call(
data=data[chunk_size:],
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/attributes/storage/fileset/upload/part",
headers={
"X-Range": f"bytes={chunk_size}-{len(data) - 1}/{len(data)}"
},
query_params={
"uploadPartIdx": 1,
"uploadId": upload_id,
"subPath": os.path.basename(filename),
"experimentIdentifier": str(exp_uuid),
"attribute": "some/attribute",
},
),
]
)
@unittest.skipIf(IS_WINDOWS, "Windows behaves strangely")
@patch("neptune.new.internal.backends.hosted_file_operations.upload_raw_data")
@patch(
"neptune.new.internal.utils.glob",
new=lambda path, recursive=False: [path.replace("*", "file.txt")],
)
def test_upload_multiple_files_in_file_set_attribute(self, upload_raw_data_mock):
# given
exp_uuid = str(uuid.uuid4())
swagger_mock = self._get_swagger_mock()
upload_raw_data_mock.return_value = b"null"
swagger_mock.api.getUploadConfig.return_value.response.return_value.result.chunkSize = (
10
)
# when
with NamedTemporaryFile("w") as temp_file_1:
with NamedTemporaryFile("w") as temp_file_2:
upload_file_set_attribute(
swagger_client=swagger_mock,
container_id=exp_uuid,
attribute="some/attribute",
file_globs=[temp_file_1.name, temp_file_2.name],
reset=True,
multipart_config=self.multipart_config,
)
# then
swagger_mock.api.fileSetFileMultipartUploadStart.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadFinish.assert_not_called()
swagger_mock.api.fileSetFileMultipartUploadPart.assert_not_called()
swagger_mock.api.fileSetFileUpload.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadStart.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadFinish.assert_not_called()
swagger_mock.api.fileAtomMultipartUploadPart.assert_not_called()
swagger_mock.api.fileAtomUpload.assert_not_called()
upload_raw_data_mock.assert_called_once_with(
http_client=swagger_mock.swagger_spec.http_client,
url="https://ui.neptune.ai/uploadFileSetTar",
data=mock.ANY,
headers={"Content-Type": "application/octet-stream"},
query_params={
"experimentId": str(exp_uuid),
"attribute": "some/attribute",
"reset": "True",
},
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jpauwels/pywebaudioplayer",
"score": 2
} |
#### File: pywebaudioplayer/pywebaudioplayer/notebook.py
```python
from .core import Image, _id, _js, _figure_margins, _write_samples, waveform_playlist
def wavesurfer(audio_path=None, controls={}, display={}, behaviour={}, samples=None):
# Set defaults
if 'text_controls' not in controls:
controls['text_controls'] = True
if 'backward_button' not in controls:
controls['backward_button'] = True
if 'forward_button' not in controls:
controls['forward_button'] = True
if 'mute_button' not in controls:
controls['mute_button'] = True
if 'height' not in display:
display['height'] = 128
if 'cursor_colour' not in display:
display['cursor_colour'] = '#333'
if 'played_wave_colour' not in display:
display['played_wave_colour'] = '#555'
if 'unplayed_wave_colour' not in display:
display['unplayed_wave_colour'] = '#999'
if 'bar_width' not in display:
display['bar_width'] = None
if 'normalize' not in behaviour:
behaviour['normalize'] = False
if 'mono' not in behaviour:
behaviour['mono'] = True
unique_id = _id()
if not audio_path and not samples:
raise ValueError('Provide either a path to an audio file or samples')
html_code = '''
<div id="waveform{}"></div>
<p align="center">
'''.format(unique_id)
if controls['backward_button']:
html_code += '''
<button class="btn btn-primary" onclick="wavesurfer{}.skipBackward()">
<i class="fa fa-backward"></i>
{}
</button>'''.format(unique_id, 'Backward' if controls['text_controls'] else '')
html_code += '''
<button class="btn btn-success" onclick="wavesurfer{}.playPause()">
<i class="fa fa-play"></i>
{} /
<i class="fa fa-pause"></i>
{}
</button>'''.format(unique_id, 'Play' if controls['text_controls'] else '', 'Pause' if controls['text_controls'] else '')
if controls['forward_button']:
html_code += '''
<button class="btn btn-primary" onclick="wavesurfer{}.skipForward()">
<i class="fa fa-forward"></i>
{}
</button>'''.format(unique_id, 'Forward' if controls['text_controls'] else '')
if controls['mute_button']:
html_code += '''
<button class="btn btn-danger" onclick="wavesurfer{}.toggleMute()">
<i class="fa fa-volume-off"></i>
{}
</button>'''.format(unique_id, 'Toggle Mute' if controls['text_controls'] else '')
html_code += '\n </p>'
html_code += '''
<script type="text/javascript">
requirejs.config({{paths: {{wavesurfer: "//cdnjs.cloudflare.com/ajax/libs/wavesurfer.js/1.4.0/wavesurfer.min"}}}});
requirejs(["wavesurfer"], function(WaveSurfer) {{
wavesurfer{id} = WaveSurfer.create({{
container: '#waveform{id}',
cursorColor: "{cursor}",
progressColor: "{progress}",
waveColor: "{wave}",
splitChannels: {split},
height: {height},
normalize: {norm}{bar_width}
}});
wavesurfer{id}.load("{path}");
}});
</script>'''.format(
id=unique_id, path=audio_path, cursor=display['cursor_colour'], progress=display['played_wave_colour'], wave=display['unplayed_wave_colour'], split=_js(not behaviour['mono']), height=display['height'], norm=_js(behaviour['normalize']),
bar_width=', barWidth: {}'.format(display['bar_width']) if display['bar_width'] else '')
return html_code
def trackswitch(tracks, text='', seekable_image=None, seek_margin=None, mute=True, solo=True, globalsolo=True, repeat=False, radiosolo=False, onlyradiosolo=False, spacebar=False, tabview=False):
unique_id = _id()
if isinstance(seekable_image, str):
seekable_image_path = seekable_image
with Image.open(seekable_image) as image_file:
image_width = image_file.size[0]
elif seekable_image:
fig, seekable_image_path = seekable_image
image_width = fig.get_size_inches()[0] * fig.get_dpi()
seek_margin = _figure_margins(fig.gca())
fig.savefig(seekable_image_path, dpi='figure')
html_code = '''
<link rel="stylesheet" href="//audiolabs.github.io/trackswitch.js/css/trackswitch.min.css" />
<div class="player{}"{}>'''.format(unique_id, ' style="width:{}px"'.format(image_width) if seekable_image else '')
if text:
html_code += '''
<p>
{}
</p>'''.format(text)
if seekable_image:
html_code += '''
<img src="{}#{}" data-style="width: {}px;" class="seekable"{}/>'''.format(seekable_image_path, unique_id, image_width, ' data-seek-margin-left="{}" data-seek-margin-right="{}"'.format(*seek_margin) if seek_margin else '')
for track in tracks:
html_code += '''
<ts-track{}{}>
<ts-source src='''.format(' title="{}"'.format(track['title']) if 'title' in track else '',
' data-img="{}"'.format(track['image']) if 'image' in track else '')
if 'samples' in track:
path_or_bytestring = _write_samples(track)
html_code += '"{}" type="audio/wav"'.format(path_or_bytestring)
elif 'path' in track:
html_code += '"{}"{}'.format(track['path'], ' type="{}"'.format(track['mimetype']) if 'mimetype' in track else '')
else:
raise ValueError('Provide either a path to an audio file or raw samples')
html_code += '''></ts-source>
</ts-track>'''
html_code += '''
</div>
<script type="text/javascript">
requirejs.config({{
"paths": {{
"trackswitch": "//audiolabs.github.io/trackswitch.js/js/trackswitch.min",
}},
"shim": {{
"trackswitch": ["jquery"],
}}
}});
requirejs(["jquery", "trackswitch"], function(jQuery) {{
jQuery(document).ready(function() {{
jQuery('.player{}').trackSwitch({{mute: {}, solo: {}, globalsolo: {}, repeat: {}, radiosolo: {}, onlyradiosolo: {}, spacebar: {}, tabview: {}}});
}});
}});
</script>
'''.format(unique_id, _js(mute), _js(solo), _js(globalsolo), _js(repeat), _js(radiosolo), _js(onlyradiosolo), _js(spacebar), _js(tabview))
return html_code
``` |
{
"source": "jpavelka/cfb-transitivity",
"score": 3
} |
#### File: cfb-transitivity/app/transitivity_rankings.py
```python
import pandas
import numpy
import networkx
class TransRank:
def __init__(self, winners, losers, distances=None):
win_graph = networkx.DiGraph()
win_graph.add_edges_from(zip(winners, losers))
loss_graph = networkx.DiGraph()
loss_graph.add_edges_from(zip(losers, winners))
self.teams = sorted(list(win_graph.nodes()))
win_paths = self.get_paths_from_graph(win_graph)
loss_paths = self.get_paths_from_graph(loss_graph, reverse=True)
df_cols = ['trans_wins', 'avg_win_len', 'win_score', 'win_rank', 'trans_losses' 'avg_loss_len', 'loss_score', 'loss_rank',
'avg_rank', 'comb_rank']
rank_df = pandas.DataFrame(index=self.teams, columns=df_cols)
rank_df['trans_wins'] = [len(win_paths[t]) for t in self.teams]
rank_df['trans_losses'] = [len(loss_paths[t]) for t in self.teams]
rank_df['avg_win_len'] = [len(self.teams) if len(win_paths[t]) == 0
else numpy.mean([len(win_paths[t][p]) - 1 for p in win_paths[t]]) for t in self.teams]
rank_df['avg_loss_len'] = [0 if len(loss_paths[t]) == 0 else numpy.mean([len(loss_paths[t][p]) - 1
for p in loss_paths[t]]) for t in self.teams]
rank_df['win_score'] = rank_df['trans_wins'] - rank_df['avg_win_len'] / (max(rank_df['avg_win_len']) + 1)
rank_df['loss_score'] = -(rank_df['trans_losses'] - rank_df['avg_loss_len'] / (max(rank_df['avg_loss_len']) + 1))
rank_df['win_rank'] = [sum(rank_df['win_score'] > rank_df.loc[t, 'win_score']) + 1 for t in self.teams]
rank_df['loss_rank'] = [sum(rank_df['loss_score'] > rank_df.loc[t, 'loss_score']) + 1 for t in self.teams]
rank_df['avg_rank'] = rank_df[['win_rank', 'loss_rank']].mean(axis=1)
rank_df['comb_rank'] = [sum(rank_df['avg_rank'] < rank_df.loc[t, 'avg_rank']) + 1 for t in self.teams]
rank_df['wins'] = pandas.Series(dict(win_graph.out_degree()))
rank_df['losses'] = pandas.Series(dict(loss_graph.out_degree()))
rank_df = rank_df[['comb_rank', 'win_rank', 'loss_rank', 'avg_rank', 'wins', 'trans_wins', 'avg_win_len',
'losses', 'trans_losses', 'avg_loss_len']]
rank_df = rank_df.sort_values('comb_rank')
if distances is not None:
for e in win_graph.edges():
win_graph[e[0]][e[1]]['weight'] = distances[e[0], e[1]]
for e in loss_graph.edges():
loss_graph[e[0]][e[1]]['weight'] = distances[e[0], e[1]]
self.win_geo_paths = networkx.all_pairs_dijkstra_path(win_graph)
self.loss_geo_paths = networkx.all_pairs_dijkstra_path(loss_graph)
self.win_paths = win_paths
self.loss_paths = loss_paths
self.rank_df = rank_df
def get_paths_from_graph(self, graph, reverse=False):
paths = {x[0]: x[1] for x in networkx.all_pairs_shortest_path(graph)}
paths = {t: {p: paths[t][p] for p in paths[t] if p != t} for t in self.teams}
if reverse:
for t in paths:
paths[t] = {s: paths[t][s][::-1] for s in paths[t]}
return paths
def team_link(self, team):
return '/' + team.replace(' ', '_')
def all_team_urls(self):
return {t: self.team_link(t) for t in self.teams}
def get_html_table(self, images=None):
display_header_names = {'comb_rank': 'Rank', 'win_rank': 'Win Rank', 'loss_rank': 'Loss Rank',
'trans_wins': 'Trans Wins', 'trans_losses': 'Trans Losses', 'avg_win_len': 'Avg Trans Win',
'avg_loss_len': 'Avg Trans Loss', 'avg_rank': 'Avg Rank'}
html_df = self.rank_df.rename(columns=display_header_names)
html_df['Avg Trans Win'] = [self.style_float(x) for x in html_df['Avg Trans Win']]
html_df['Avg Trans Loss'] = [self.style_float(x) for x in html_df['Avg Trans Loss']]
if images is not None:
html_df.index = [f'<img src="{images[t]}" class="team-logo">  <a href="{self.team_link(t)}">{t}</a>'
for t in html_df.index]
html_df.index = [x + f" ({html_df['wins'][x]}-{html_df['losses'][x]})" for x in html_df.index]
html_df = html_df.drop(['wins', 'losses'], axis=1)
html_table = html_df.to_html(classes=['table-striped', 'table-bordered', 'full-width'], table_id='rank-table',
escape=False)
return html_table
def style_float(self, f):
return '%.2f' % f
``` |
{
"source": "jpavelw/sam-2017",
"score": 2
} |
#### File: SAM2017/deadline/models.py
```python
from django.db import models
# Create your models here.
class Deadline(models.Model):
type = models.CharField(max_length=30)
date = models.DateField()
def __str__(self):
return self.type
```
#### File: SAM2017/paper/forms.py
```python
from django import forms
from . import models
class SubmitPaperForm(forms.ModelForm):
file = forms.FileField(label="Select File", widget=forms.FileInput(attrs={'class': 'form-control'}))
class Meta:
model = models.Paper
fields = ['title', 'list_of_authors', 'preferred_contact_method', 'format', 'file']
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'list_of_authors': forms.Textarea(attrs={'class': 'form-control resize-text-area-none', 'rows': 3}),
'preferred_contact_method': forms.Select(attrs={'class': 'form-control'}),
'format': forms.Select(attrs={'class': 'form-control'})
}
def clean_title(self):
return self.cleaned_data['title']
def clean_list_of_authors(self):
return self.cleaned_data['list_of_authors']
def clean_version_number(self):
return self.cleaned_data['version_number']
def clean_format(self):
return self.cleaned_data['format']
def clean_file(self):
if 'file' in self.cleaned_data:
file_name = str(self.cleaned_data['file']).lower()
file_parts = file_name.split(".")
if not file_parts[-1] in ['pdf', 'doc', 'docx']:
raise forms.ValidationError("Invalid file format.")
return self.cleaned_data['file']
class UpdatePaperForm(forms.ModelForm):
class Meta:
model = models.Paper
fields = ['title', 'list_of_authors', 'preferred_contact_method', 'format']
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'list_of_authors': forms.Textarea(attrs={'class': 'form-control resize-text-area-none'}),
'preferred_contact_method': forms.Select(attrs={'class': 'form-control'}),
'format': forms.Select(attrs={'class': 'form-control'})
}
def clean_title(self):
return self.cleaned_data['title']
def clean_list_of_authors(self):
return self.cleaned_data['list_of_authors']
def clean_version_number(self):
return self.cleaned_data['version_number']
def clean_format(self):
return self.cleaned_data['format']
```
#### File: SAM2017/rate_paper_pcm/models.py
```python
from django.db import models
from paper.models import Paper
from registration.models import User
# Create your models here.
class Paper_PCM_Rate(models.Model):
pcm = models.ForeignKey(User)
paper = models.ForeignKey(Paper)
review = models.TextField(null=False)
decision = models.IntegerField() # 0 = rejected; 1 = accepted; 2 = accepted with modification
has_conflict = models.BooleanField(default=False)
def __str__(self):
return 'Reviewer: ' + str(self.pcm.email) + ' Paper name: ' + self.paper.title
``` |
{
"source": "jpavelw/tool_sharing",
"score": 2
} |
#### File: tool_sharing/manage_tools/forms.py
```python
import string
import random
from django import forms
from .models import Tool
from utils.utilities import is_empty
class ToolForm(forms.ModelForm):
class Meta:
model = Tool
fields = ['name', 'status', 'shared_from', 'category', 'description', 'picture']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'status': forms.Select(attrs={'class': 'form-control'}),
'shared_from': forms.Select(attrs={'class': 'form-control'}),
'category': forms.Select(attrs={'class': 'form-control'}),
'description': forms.Textarea(attrs={'class': 'form-control resize-text-area-none'}),
'picture': forms.FileInput(attrs={'class': 'form-control'}),
}
'''def clean_id(self):
return self.cleaned_data["id"]'''
def clean_name(self):
if is_empty(self.cleaned_data["name"]):
raise forms.ValidationError("Invalid name.")
return self.cleaned_data["name"]
def clean_status(self):
return self.cleaned_data["status"]
def clean_description(self):
if is_empty(self.cleaned_data["description"]):
raise forms.ValidationError("Invalid description.")
return self.cleaned_data["description"]
def clean_is_shared_from_home(self):
return self.cleaned_data["is_shared_from_home"]
def clean_category(self):
return self.cleaned_data["category"]
def clean_picture(self):
if 'picture' in self.cleaned_data:
file_name = str(self.cleaned_data['picture']).lower()
file_parts = file_name.split(".")
if not file_parts[-1] in ['jpeg', 'png', 'bmp', 'gif', 'jpg']:
raise forms.ValidationError("Invalid image format.")
try:
if self.cleaned_data['picture'].size > 3*1024*1024:
raise forms.ValidationError("Image file too large (> 3MB).")
except AttributeError:
pass
return self.cleaned_data["picture"]
def set_owner(self, owner):
self.instance.owner = owner
def generate_code(self):
while True:
new_code = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
try:
Tool.objects.get(code=new_code)
except:
self.instance.code = new_code
break
```
#### File: tool_sharing/request/models.py
```python
from django.db import models
from manage_tools.models import Tool
from user.models import User
class Request(models.Model):
PENDING_APPROVAL = 'PA'
APPROVED = 'AP'
REJECTED = 'RE'
RETURNED = 'RT'
status_choices = (
(PENDING_APPROVAL, 'Pending Approval'),
(APPROVED, 'Approved'),
(REJECTED, 'Rejected'),
(RETURNED, 'Returned')
)
tool = models.ForeignKey(Tool, related_name='requests')
lender = models.ForeignKey(User, related_name='lender_requests')
borrower = models.ForeignKey(User, related_name='borrower_requests')
status = models.CharField(choices=status_choices, max_length=2, default="PA")
comment = models.CharField(max_length=150, default="")
date = models.DateTimeField(auto_now_add=True, auto_now=False)
returned_date = models.DateTimeField(null=True)
shared_from = models.CharField(choices=Tool.shared_choices, max_length=2)
zipcode = models.CharField(max_length=5)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
borrower_enabled = models.BooleanField(default=True)
lender_enabled = models.BooleanField(default=True)
may_leave_comment = models.BooleanField(default=False)
def __str__(self):
return self.status + " - " + str(self.tool.id) + " - " + str(self.lender.id) + " - " + str(self.borrower.id)
def get_status_choices(self):
return dict(self.status_choices).get(self.status)
class Notification(models.Model):
user = models.OneToOneField(User)
pending_sent = models.PositiveSmallIntegerField(default=0)
pending_received = models.PositiveSmallIntegerField(default=0)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.user.last_name + ", " + self.user.first_name
def increment_sent(self):
self.pending_sent += 1
def increment_received(self):
self.pending_received += 1
```
#### File: tool_sharing/shared_zone/models.py
```python
from django.db import models
class SharedZone(models.Model):
name = models.CharField(max_length=50)
zipcode = models.CharField(max_length=5, unique=True)
address = models.CharField(max_length=100)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __str__(self):
return self.name
```
#### File: tool_sharing/shared_zone/tests.py
```python
from django.test import TestCase
from .models import SharedZone
# Create your tests here.
class ShareZoneTestCase(TestCase):
def setUp(self):
SharedZone.objects.create(zipcode="14623", name="Rebaz", description="hello world", address="214 alex road")
def test_ShareZone1(self):
test_ShareZone1 = SharedZone.objects.get(zipcode="14623")
self.assertEqual(test_ShareZone1.name, "Rebaz")
class ShareZone2TestCase(TestCase):
def setUp(self):
SharedZone.objects.create(zipcode="12345", name="Rebaz", description="hello world", address="214 alex road")
def test_ShareZone2(self):
test_ShareZone2 = SharedZone.objects.get(zipcode="12345")
self.assertNotEqual(test_ShareZone2.name, "wajdi")
```
#### File: tool_sharing/user/forms.py
```python
from django import forms
from .models import User
from utils.utilities import check_password, is_number, contains_number, is_empty, contains_space
class RegistrationForm(forms.ModelForm):
confirm_password = forms.CharField(label='Confirm password', max_length=30, widget=forms.PasswordInput(attrs={
'class': 'form-control'}))
class Meta:
model = User
fields = ['first_name', 'middle_name', 'last_name', 'email', 'phone_number', 'password', '<PASSWORD>_password',
'address', 'state', 'city', 'zipcode']
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'middle_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control', 'placeholder': "<EMAIL>"}),
'phone_number': forms.TextInput(attrs={'class': 'form-control', 'placeholder': "Ex.: 2225559988"}),
'password': forms.PasswordInput(attrs={'class': 'form-control'}),
'address': forms.TextInput(attrs={'class': 'form-control'}),
'state': forms.Select(attrs={'class': 'form-control'}),
'city': forms.TextInput(attrs={'class': 'form-control'}),
'zipcode': forms.TextInput(attrs={'class': 'form-control', 'placeholder': "Ex.: 12345"}),
}
def clean_first_name(self):
if contains_number(self.cleaned_data['first_name']) or is_empty(self.cleaned_data["first_name"]):
raise forms.ValidationError("Invalid first name.")
return self.cleaned_data["first_name"]
def clean_middle_name(self):
if contains_number(self.cleaned_data['middle_name']) or (self.cleaned_data['middle_name'] != "" and is_empty(self.cleaned_data["middle_name"])):
raise forms.ValidationError("Invalid middle name.")
return self.cleaned_data["middle_name"]
def clean_last_name(self):
if contains_number(self.cleaned_data['last_name']) or is_empty(self.cleaned_data["last_name"]):
raise forms.ValidationError("Invalid last name.")
return self.cleaned_data["last_name"]
def clean_email(self):
return self.cleaned_data["email"]
def clean_phone_number(self):
if not is_number(self.cleaned_data["phone_number"]) or len(str(self.cleaned_data["phone_number"])) < 10:
raise forms.ValidationError("Invalid phone number.")
return self.cleaned_data["phone_number"]
def clean_password(self):
if len(str(self.cleaned_data["password"])) < 3 or contains_space(self.cleaned_data["password"]):
raise forms.ValidationError("Invalid password.")
return self.cleaned_data["password"]
def clean_confirm_password(self):
if not self.cleaned_data["password"] == self.cleaned_data["confirm_password"]:
raise forms.ValidationError("Passwords do not match.")
return self.cleaned_data["confirm_password"]
def clean_address(self):
if is_empty(self.cleaned_data["address"]):
raise forms.ValidationError("Invalid address.")
return self.cleaned_data["address"]
def clean_state(self):
return self.cleaned_data["state"]
def clean_city(self):
if contains_number(self.cleaned_data['city']) or is_empty(self.cleaned_data["city"]):
raise forms.ValidationError("Invalid city name.")
return self.cleaned_data["city"]
def clean_zipcode(self):
if not is_number(self.cleaned_data["zipcode"]) or not len(str(self.cleaned_data["zipcode"])) == 5:
raise forms.ValidationError("Invalid zip code.")
return self.cleaned_data["zipcode"]
class LogInForm(forms.Form):
email = forms.EmailField(label='', max_length=30, widget=forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': "Email address", 'autofocus': "true"}))
password = forms.CharField(label='', max_length=30,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': "Password"}))
def clean_email(self):
if not User.objects.filter(email=self.cleaned_data["email"]).exists():
raise forms.ValidationError("User does not exist.")
return self.cleaned_data["email"]
def clean_password(self):
if "email" in self.cleaned_data:
if User.objects.filter(email=self.cleaned_data["email"], enabled=1).exists():
user = User.objects.get(email=self.cleaned_data["email"], enabled=1)
if not check_password(user.password, self.cleaned_data["password"]):
raise forms.ValidationError("Invalid password.")
return self.cleaned_data["password"]
class UpdateProfileForm(forms.ModelForm):
confirm_password = forms.CharField(label='Enter password to confirm changes', max_length=30,
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
class Meta:
model = User
fields = ['first_name', 'middle_name', 'last_name', 'email', 'phone_number', 'address', 'state',
'city', 'zipcode', 'confirm_password']
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'middle_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control', 'placeholder': "<EMAIL>"}),
'phone_number': forms.TextInput(attrs={'class': 'form-control', 'placeholder': "Ex.: 2225559988"}),
'address': forms.TextInput(attrs={'class': 'form-control'}),
'state': forms.Select(attrs={'class': 'form-control'}),
'city': forms.TextInput(attrs={'class': 'form-control'}),
'zipcode': forms.TextInput(attrs={'class': 'form-control', 'placeholder': "Ex.: 12345"}),
}
def clean_first_name(self):
if contains_number(self.cleaned_data['first_name']) or is_empty(self.cleaned_data["first_name"]):
raise forms.ValidationError("Invalid first name.")
return self.cleaned_data["first_name"]
def clean_middle_name(self):
if contains_number(self.cleaned_data['middle_name']) or (self.cleaned_data['middle_name'] != "" and is_empty(self.cleaned_data["middle_name"])):
raise forms.ValidationError("Invalid middle name.")
return self.cleaned_data["middle_name"]
def clean_last_name(self):
if contains_number(self.cleaned_data['last_name']) or is_empty(self.cleaned_data["last_name"]):
raise forms.ValidationError("Invalid last name.")
return self.cleaned_data["last_name"]
def clean_email(self):
return self.cleaned_data["email"]
def clean_phone_number(self):
if not is_number(self.cleaned_data["phone_number"]) or len(str(self.cleaned_data["phone_number"])) < 10:
raise forms.ValidationError("Invalid phone number.")
return self.cleaned_data["phone_number"]
def clean_address(self):
if is_empty(self.cleaned_data["address"]):
raise forms.ValidationError("Invalid address.")
return self.cleaned_data["address"]
def clean_state(self):
return self.cleaned_data["state"]
def clean_city(self):
if contains_number(self.cleaned_data['city']):
raise forms.ValidationError("Invalid city name")
return self.cleaned_data["city"]
def clean_zipcode(self):
if not is_number(self.cleaned_data["zipcode"]) or not len(str(self.cleaned_data["zipcode"])) == 5:
raise forms.ValidationError("Invalid zip code.")
return self.cleaned_data["zipcode"]
def clean_confirm_password(self):
if self.instance:
if not check_password(self.instance.password, self.cleaned_data["confirm_password"]):
raise forms.ValidationError("Invalid password.")
return self.cleaned_data["confirm_password"]
class UpdatePasswordForm(forms.Form):
current_password = None
password = forms.CharField(label='', max_length=30, widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': "Current password", 'autofocus': "true"}))
new_password = forms.CharField(label='', max_length=30, widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': "New password"}))
confirm_password = forms.CharField(label='', max_length=30, widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': "Confirm new password"}))
def clean_password(self):
if self.current_password is not None:
if not check_password(self.current_password, self.cleaned_data["password"]):
raise forms.ValidationError("Invalid password.")
else:
return self.cleaned_data["password"]
raise forms.ValidationError("Password not provided.")
def clean_new_password(self):
if len(str(self.cleaned_data["new_password"])) < 3 or contains_space(self.cleaned_data["new_password"]):
raise forms.ValidationError("Invalid password.")
return self.cleaned_data["new_password"]
def clean_confirm_password(self):
if "password" in self.cleaned_data:
if not (self.cleaned_data["confirm_password"] == self.cleaned_data["new_password"]):
raise forms.ValidationError("Passwords must match.")
else:
return self.cleaned_data["confirm_password"]
def set_current_password(self, current_password):
self.current_password = <PASSWORD>
class PickupArrangementForm(forms.Form):
DAYS = (('Monday', 'Monday'),
('Tuesday', 'Tuesday'),
('Wednesday', 'Wednesday'),
('Thursday', 'Thursday'),
('Friday', 'Friday'),
('Saturday', 'Saturday'),
('Sunday', 'Sunday'))
days = forms.MultipleChoiceField(choices=DAYS)
time_from = forms.CharField(max_length=25)
time_to = forms.CharField(max_length=25)
def clean(self):
time_from = self.cleaned_data.get('time_from')
time_to = self.cleaned_data.get('time_to')
if time_from is None or time_to is None:
raise forms.ValidationError("Please provide time information")
time_from = int(time_from.replace(":", ""))
time_to = int(time_to.replace(":", ""))
if time_from > time_to:
raise forms.ValidationError("Invalid time range")
return True
def week_days(self):
days = []
for day in self.DAYS:
days.append(day[0])
return days
class ForgotPasswordForm(forms.Form):
email = forms.EmailField(label='', max_length=30, widget=forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': "Email address", 'autofocus': "true"}))
def clean_email(self):
if not User.objects.filter(email=self.cleaned_data["email"]).exists():
raise forms.ValidationError("User does not exist.")
return self.cleaned_data["email"]
``` |
{
"source": "jpavlav/cbtool",
"score": 2
} |
#### File: lib/api/api_service_client.py
```python
from sys import path
from xmlrpc.client import Server
import xmlrpc.client
import pwd
import sys
import re
import os
import traceback
path.append(re.compile(".*\/").search(os.path.realpath(__file__)).group(0) + "/../../")
path.append(re.compile(".*\/").search(os.path.realpath(__file__)).group(0) + "/../../../")
from lib.stores.stores_initial_setup import load_metricstore_adapter
from time import time, strftime, strptime, localtime
from datetime import datetime
import copy
import socket
import inspect
from threading import Lock
class APIException(Exception) :
def __init__(self, status, msg):
Exception.__init__(self)
self.msg = msg
self.status = str(status)
def __str__(self):
return self.msg
class APINoSuchMetricException(Exception) :
def __init__(self, status, msg):
Exception.__init__(self)
self.msg = msg
self.status = status
def __str__(self):
return self.msg
class APINoDataException(Exception) :
def __init__(self, status, msg):
Exception.__init__(self)
self.msg = msg
self.status = status
def __str__(self):
return self.msg
def makeTimestamp(supplied_epoch_time = False) :
'''
TBD
'''
if not supplied_epoch_time :
_now = datetime.utcnow()
else :
_now = datetime.utcfromtimestamp(supplied_epoch_time)
_date = _now.date()
result = ("%02d" % _date.month) + "/" + ("%02d" % _date.day) + "/" + ("%04d" % _date.year)
result += strftime(" %I:%M:%S %p",
strptime(str(_now.hour) + ":" + str(_now.minute) + ":" + \
str(_now.second), "%H:%M:%S"))
result += " UTC"
return result
class APIVM():
def __init__(self, name, info, app):
self.name = name
self.role = info["role"]
self.uuid = info["uuid"]
self.app_metrics = None
self.system_metrics = None
self.info = info
self.app = app
self.started = int(info["arrival"])
self.vcpus = info["vcpus"]
self.vmemory = info["vmemory"]
self.new = True
makeTimestamp()
def val(self, key, dict):
if dict is None :
raise APINoSuchMetricException(1, "No data available.")
if key in dict :
return dict[key]
else :
raise APINoSuchMetricException(1, "No such metric: " + key)
def app(self, key):
return float(self.val(key, self.app_metrics)["val"])
def system(self, key):
return float(self.val(key, self.system_metrics)["val"])
mutex = Lock()
class APIClient(Server):
def api_error_check(self, func):
'''
TBD
'''
def wrapped(*args, **kwargs):
try :
mutex.acquire()
resp = func(*args, **kwargs)
mutex.release()
except Exception as e :
mutex.release()
for line in traceback.format_exc().splitlines() :
print(line)
raise e
if int(resp["status"]) :
raise APIException(str(resp["status"]), resp["msg"])
if self.print_message :
print(resp["msg"])
return resp["result"]
return wrapped
def dashboard_conn_check(self, cloud_name, msattrs = None, username = None, experiment_id = None, check_for_vpn = False):
'''
TBD
'''
if not self.msattrs :
"""
Open a connection to the metric store
"""
self.msattrs = self.cldshow(cloud_name, "metricstore") if msattrs is None else msattrs
# We are opted-out of the VPN by default. But, when inside a virtual machine,
# we need to opt-in.
if check_for_vpn :
use_vpn_ip = str(self.cldshow(cloud_name, "vm_defaults")["use_vpn_ip"]).lower()
if use_vpn_ip == "true" :
self.msattrs['host'] = self.cldshow(cloud_name, "vpn")["server_bootstrap"]
self.msci = load_metricstore_adapter(self.msattrs)
self.username = self.cldshow(cloud_name, "time")["username"] if username is None else username
self.experiment_id = self.cldshow(cloud_name, "time")["experiment_id"] if experiment_id is None else experiment_id
def __init__ (self, service_url, print_message = False):
'''
This rewrites the xmlrpc function bindings to use a
decorator so that we can check the return status of API
functions before returning them back to the client
It allows the client object to directly inherit all
of the API calls exposed on the server side to the
client side without writing ANOTHER lookup table.
'''
_orig_Method = xmlrpc.client._Method
'''
XML-RPC doesn't support keyword arguments,
so we have to do it ourselves...
'''
class KeywordArgMethod(_orig_Method):
def __call__(self, *args, **kwargs):
args = list(args)
if kwargs:
args.append(("kwargs", kwargs))
return _orig_Method.__call__(self, *args)
xmlrpc.client._Method = KeywordArgMethod
Server.__init__(self, service_url)
setattr(self, "_ServerProxy__request", self.api_error_check(self._ServerProxy__request))
self.vms = {}
self.hosts = {}
self.msattrs = None
self.msci = None
self.username = None
self.print_message = print_message
self.last_refresh = datetime.now()
def check_for_new_vm(self, cloud_name, identifier):
'''
TBD
'''
info = self.vmshow(cloud_name, identifier)
print(identifier + " configured: (" + info["vcpus"] + ", " + info["vmemory"] + ")")
if "configured_size" in info :
print(" Eclipsed size: (" + info["vcpus_max"] + ", " + info["vmemory_max"] + ")")
if info["ai"] != "none" :
app = self.appshow(cloud_name, info["ai_name"])
else :
app = None
return APIVM(identifier, info, app)
def refresh_vms(self, cloud_name, force, state = "") :
'''
TBD
'''
try :
self.experiment_id = self.cldshow(cloud_name, "time")["experiment_id"]
if not force :
if not self.should_refresh(cloud_name, str(self.last_refresh)) :
#print "VM list unchanged (" + str(len(self.vms)) + " vms) ..."
return False
self.last_refresh = time()
old_vms = copy.copy(self.vms)
for obj in self.stateshow(cloud_name, state) :
if obj["type"] != "AI" :
continue
sibling_uuids = []
for vm in self.appshow(cloud_name, obj["name"])["vms"].split(",") :
uuid, role, name = vm.split("|")
if uuid not in self.vms :
self.vms[uuid] = self.check_for_new_vm(name)
sibling_uuids.append(uuid)
if uuid in old_vms :
del old_vms[uuid]
for me in sibling_uuids :
myself = self.vms[me]
myself.siblings = []
for sibling in sibling_uuids :
if sibling != myself :
sib = self.vms[sibling]
myself.siblings.append(sib)
if sib.role.count("client") :
myself.client = sib
for uuid in old_vms :
del self.vms[uuid]
self.reset_refresh(cloud_name)
return True
except APIException as obj :
print("Check VM API Problem (" + str(obj.status) + "): " + obj.msg)
return False
except socket.error as obj :
print("API not available: " + str(obj))
return False
def get_performance_data(self, cloud_name, uuid, metric_class = "runtime", object_type = "VM", metric_type = "os", latest = False, samples = 0, expid = "auto") :
'''
TBD
'''
self.dashboard_conn_check(cloud_name)
if str(uuid).lower() == "all" :
uuid = None
if metric_class == "runtime" :
_object_type = metric_class + '_' + metric_type + '_' + object_type
else :
_object_type = metric_class + '_' + object_type
if latest :
_allmatches = True
_collection_name = "latest_" + _object_type + "_" + self.username
_limitdocuments = 0
else :
if samples != 0 :
_allmatches = False
_limitdocuments = samples
else :
_allmatches = True
_limitdocuments = 0
_collection_name = _object_type + "_" + self.username
_criteria = {}
if expid != "auto" :
_criteria["expid"] = expid
if uuid :
_criteria["uuid"] = uuid
metrics = self.msci.find_document(_collection_name, \
_criteria, \
limitdocuments = _limitdocuments, \
allmatches = _allmatches)
if isinstance(metrics, dict) :
_metrics = []
_metrics.append(metrics)
metrics = _metrics
# if uuid and metrics :
# if metrics :
# if "count" in dir(metrics) :
# _samples = metrics.count()
#
# if _samples == 0 :
# metrics = None
# if _samples == 1 :
# _metrics = []
# _metrics.append(metrics)
# metrics = _metrics
if metrics is None :
_msg = "No " + metric_class + ' ' + _object_type + '(' + str(metric_type) + ") data available."
# raise APINoSuchMetricException(1, _msg")
return metrics
def get_latest_app_data(self, cloud_name, uuid, expid = "auto") :
'''
TBD
'''
_metrics = self.get_performance_data(cloud_name, uuid, "runtime", "VM", "app", True, 0, expid)
if uuid in self.vms :
self.vms[uuid].app_metrics = _metrics
return _metrics
def get_latest_system_data(self, cloud_name, uuid, expid = "auto") :
'''
TBD
'''
_metrics = self.get_performance_data(cloud_name, uuid, "runtime", "VM", "os", True, 0, expid)
if uuid in self.vms :
self.vms[uuid].system_metrics = _metrics
return _metrics
def get_latest_management_data(self, cloud_name, uuid, expid = "auto") :
'''
TBD
'''
_metrics = self.get_performance_data(cloud_name, uuid, "management", "VM", "os", True, 0, expid)
return _metrics
def get_app_data(self, cloud_name, uuid, expid = "auto") :
'''
TBD
'''
_metrics = self.get_performance_data(cloud_name, uuid, "runtime", "VM", "app", False, 0, expid)
if uuid in self.vms :
self.vms[uuid].app_metrics = _metrics
return _metrics
def get_system_data(self, cloud_name, uuid, expid = "auto") :
'''
TBD
'''
_metrics = self.get_performance_data(cloud_name, uuid, "runtime", "VM", "os", False, 0, expid)
if uuid in self.vms :
self.vms[uuid].system_metrics = _metrics
return _metrics
def get_management_data(self, cloud_name, uuid, expid = "auto") :
'''
TBD
'''
_metrics = self.get_performance_data(cloud_name, uuid, "management", "VM", "os", False, 0, expid)
return _metrics
```
#### File: lib/auxiliary/thread_pool.py
```python
from queue import Queue
from threading import Thread
from time import sleep
import copy
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks, pool):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.abort = False
self.aborted = False
self.start()
self.pool = pool
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
#print ("THREAD STARTED: " + func.__name__ + ": " + str(args) + " " + str(kargs))
self.abort = False
self.aborted = False
self.pool.results.append(func(*args, **kargs))
self.aborted = True
except Exception as e:
#print ("THREAD FAILED: " + func.__name__ + ": " + str(args) + " " + str(kargs))
print(e)
finally :
#print ("THREAD FINISHED: " + func.__name__ + ": " + str(args) + " " + str(kargs))
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.results = []
self.workers = []
self.tasks = Queue(num_threads)
for _ in range(num_threads): self.workers.append(Worker(self.tasks, self))
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def abort(self):
for worker in self.workers :
worker.abort = True
while True :
all_aborted = True
for worker in self.workers :
if not worker.aborted :
all_aborted = False
break
if all_aborted :
break
sleep(0.5)
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
while self.tasks.unfinished_tasks > 0 :
sleep(0.5)
'''
Reset the results for the next time.
'''
result = copy.deepcopy(self.results)
self.results = []
return result
```
#### File: lib/auxiliary/value_generation.py
```python
from random import expovariate, uniform, gauss, gammavariate
from ..auxiliary.code_instrumentation import trace, cbdebug, cberr, cbwarn, cbinfo, cbcrit
class ValueGeneration :
'''
TBD
'''
@trace
def __init__ (self, pid) :
'''
TBD
'''
self.pid = pid
class ValueGenerationException(Exception):
'''
TBD
'''
def __init__(self, msg, status):
Exception.__init__(self)
self.msg = msg
self.status = status
def __str__(self):
return self.msg
@trace
def get_value(self, parameters, previous_value = False) :
'''
TBD
'''
try :
_status = 100
_fmsg = "Failure while trying to generate value."
if parameters.count('|') :
parameters = parameters.replace('|','I')
if parameters.count('I') :
_value = self.rand_dist_gen(parameters)
elif parameters.count('+') or parameters.count('-') or parameters.count('*') :
_value = self.monotonic_variation(previous_value, parameters)
elif parameters.count('d') or parameters.count('h') or parameters.count('m') or parameters.count('s') :
_value = self.time2seconds(parameters)
else :
_value = float(parameters)
_status = 0
except ValueError as msg :
_status = 10
_fmsg = str(msg)
finally :
if _status :
_msg = "Value generation failure: " + _fmsg
cberr(_msg)
raise self.ValueGenerationException(_msg, _status)
else :
_msg = "Value generation success."
cbdebug(_msg)
return _value
@trace
def rand_dist_gen(self, parameters) :
'''
TBD
'''
try :
_status = 100
_max_tries = 10000
_fmsg = "Failure while parsing the distribution parameters"
parameters = parameters.split('I')
if len(parameters) == 5 :
_distribution = str(parameters[0])
for _idx in range(1,5) :
if parameters[_idx] != "X" :
parameters[_idx] = float(parameters[_idx])
_mean = parameters[1]
_stdev = parameters[2]
_min = parameters[3]
_max = parameters[4]
else :
_msg = "Missing parameters for generator with a random "
_msg += "distribution. All 4 parameters (mean, standard deviation, min,"
_msg += "max) needs to be specified for every distribution (even if "
_msg += "some of those gets ignored later)."
raise self.ValueGenerationException (_msg, 27)
if _mean == 0.0 :
_mean = 1.0
elif _mean == "X" :
_mean = _max/2
else :
True
if _stdev == 0.0 :
_stdev = 1.0
elif _stdev == "X" :
_stdev = 1.0
else :
True
if _min >= _max :
_max = _min + 1
if _max <= _mean:
_mean = _max/2
_fmsg = "Failure while generating values according to the distribution"
_fmsg += " \"" + _distribution + "\" with parameters " + str(_mean)
_fmsg += " (mean) " + str(_stdev) + " (stdev) " + str(_min) + " (min)"
_fmsg += str(_max) + " (max)"
_distributions = {}
_distributions["exponential"] = "expovariate(1/_mean)"
_distributions["uniform"] = "uniform(_min, _max)"
_distributions["gamma"] = "gammavariate((_mean * _mean) / (_stdev * _stdev), (_stdev * _stdev) / _mean)"
_distributions["normal"] = "gauss(_mean, _stdev)"
if _distribution in _distributions :
_tries = 0
_value = _min - 1.0
while (_value < _min or _value > _max) and _tries < _max_tries :
_value = eval(_distributions[_distribution])
_tries += 1
_status = 0
else :
_fmsg = _distribution + " distribution generators are not supported."
_fmsg += "Supported random distribution generators are: \n"
for _key in list(_distributions.keys()) :
_msg = _msg + _key + '\n'
_status = 30
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Random distribution generation failure: " + _fmsg
cberr(_msg)
raise self.ValueGenerationException(_msg, _status)
else :
_msg = "The " + _distribution + " distribution generator "
_msg += "completed successfully."
cbdebug(_msg)
return _value
@trace
def monotonic_variation(self, previous_value, parameters) :
'''
TBD
'''
try :
_sum = False
_subtract = False
_multiply = False
_divide = False
if parameters.count('+') :
parameters = parameters.split('+')
_sum = True
elif parameters.count('-') :
parameters = parameters.split('-')
_subtract = True
elif parameters.count('*') :
parameters = parameters.split('*')
_multiply = True
elif parameters.count('/') :
parameters = parameters.split('/')
_divide = True
else :
_msg = "Missing parameters for generation of monotonic variation"
raise self.ValueGenerationException (_msg, 27)
if not previous_value :
previous_value = int(parameters[0])
_factor = int(parameters[1])
if _sum :
_value = int(previous_value) + int(_factor)
elif _subtract :
_value = int(previous_value) - int(_factor)
elif _multiply :
_value = int(previous_value) * int(_factor)
else :
_value = int(previous_value) / int(_factor)
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Monotonic variation generation failure: " + _fmsg
cberr(_msg)
raise self.ValueGenerationException(_msg, _status)
else :
_msg = "Monotonic variation generation success."
cbdebug(_msg)
return _value
def value_suffix(self, value, in_kilobytes) :
'''
TBD
'''
_units = {}
_units['K'] = 1024
_units['M'] = 1024*1024
_units['G'] = 1024*1024*1204
if value[-1] in _units :
_value = int(value[:-1]) * _units[value[-1]]
if in_kilobytes :
_value = _value/1024
else :
_value = int(value)
return _value
@trace
def time2seconds(self, time_string) :
'''
TBD
'''
try :
_status = 100
_total_time = 0
_rest = "Undefined"
time_string = time_string.strip()
if time_string.count('d') :
_days, _rest = time_string.split('d')
_total_time = _total_time + int(_days) * 86400
else :
_rest = time_string
if _rest.count('h') :
_hours, _rest = _rest.split('h')
_total_time = _total_time + int(_hours) * 3600
else :
_rest = time_string
if _rest.count('m') :
_minutes, _rest = _rest.split('m')
_total_time = _total_time + int(_minutes) * 60
else :
_rest = time_string
if _rest.count('s') :
_seconds, _rest = _rest.split('s')
_total_time = _total_time + int(_seconds)
_rest = time_string
if _rest == "Undefined" :
_fmsg = "Unable to identifiy time string. Please add the suffix"
_fmsg += "d (day), h (hour), m (minute) and s (seconds) to each"
_fmsg += "number (e.g., XdYhZmWs)"
else :
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Time to seconds conversion failure: " + _fmsg
cberr(_msg)
raise self.ValueGenerationException(_msg, _status)
else :
_msg = "Time to seconds conversion success."
cbdebug(_msg)
return _total_time
```
#### File: lib/clouds/osk_cloud_ops.py
```python
from time import time, sleep
from uuid import uuid5, UUID
from random import choice
from os import access, F_OK
from os.path import expanduser
import socket
import copy
import iso8601
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
from keystoneauth1.identity import v3
from keystoneauth1 import session
from novaclient import client as novac
from glanceclient import client as glancec
from lib.auxiliary.code_instrumentation import trace, cbdebug, cberr, cbwarn, cbinfo, cbcrit
from lib.auxiliary.data_ops import str2dic
from lib.remote.network_functions import hostname2ip
from .shared_functions import CldOpsException, CommonCloudFunctions
class OskCmds(CommonCloudFunctions) :
'''
TBD
'''
@trace
def __init__ (self, pid, osci, expid = None) :
'''
TBD
'''
CommonCloudFunctions.__init__(self, pid, osci)
self.pid = pid
self.osci = osci
self.oskconncompute = {}
self.oskconnstorage = {}
self.oskconnnetwork = {}
self.oskconnimage = {}
self.expid = expid
self.ft_supported = False
self.lvirt_conn = {}
self.networks_attr_list = { "tenant_network_list":[] }
self.host_map = {}
self.api_error_counter = {}
self.additional_rc_contents = ''
self.connauth_pamap = {}
self.max_api_errors = 10
@trace
def get_description(self) :
'''
TBD
'''
return "OpenStack Cloud"
@trace
def connect(self, access_url, authentication_data, region, extra_parms = {}, diag = False, generate_rc = False, client_conn_id = None) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_dmsg = ''
_version = '2'
_auth = None
_credentials = None
_data_auth_parse = False
_nova_client = False
if not self.connauth_pamap :
self.connauth_pamap = self.parse_cloud_connection_file(access_url)
access_url, _endpoint_type, region = self.parse_connection_data(access_url, region, extra_parms)
_username, _password, _tenant, _project_name, _cacert, _verify, _user_domain_id, _project_domain_id = self.parse_authentication_data(authentication_data)
_data_auth_parse = True
_client_conn_id = "common"
if client_conn_id :
_client_conn_id = client_conn_id
if not _username :
_fmsg = _password
else :
access_url = access_url.replace('v2.0/','v3')
_auth = v3.Password(auth_url = access_url, \
username = _username, \
password = _password, \
project_name = _project_name, \
user_domain_id = _user_domain_id, \
project_domain_id = _project_domain_id)
_session = session.Session(auth = _auth, verify = _verify, cert = _cacert)
_msg = self.get_description() + " connection parameters: username=" + _username
_msg += ", password=<<PASSWORD>>, tenant=" + _tenant + ", "
_msg += "cacert=" + str(_cacert) + ", verify=" + str(_verify)
_msg += ", region_name=" + region + ", access_url=" + access_url
_msg += ", endpoint_type=" + str(_endpoint_type)
cbdebug(_msg, diag)
_fmsg = "About to attempt a connection to " + self.get_description()
if _client_conn_id not in self.oskconncompute :
self.oskconncompute[_client_conn_id] = novac.Client("2.1", session = _session)
self.oskconnimage[_client_conn_id] = glancec.Client("2", session = _session)
self.oskconncompute[_client_conn_id].flavors.list()
_nova_client = True
if "use_cinderclient" in extra_parms :
self.use_cinderclient = str(extra_parms["use_cinderclient"]).lower()
else :
self.use_cinderclient = "false"
_cinder_client = True
if self.use_cinderclient == "true" :
_cinder_client = False
if _client_conn_id not in self.oskconnstorage :
from cinderclient import client as cinderc
self.oskconnstorage[_client_conn_id] = cinderc.Client("2.1", session = _session)
self.oskconnstorage[_client_conn_id].volumes.list()
_cinder_client = True
self.use_neutronclient = "true"
_neutron_client = True
if self.use_neutronclient == "true" :
_neutron_client = False
if _client_conn_id not in self.oskconnnetwork :
from neutronclient.v2_0 import client as neutronc
self.oskconnnetwork[_client_conn_id] = neutronc.Client(session = _session)
self.oskconnnetwork[_client_conn_id].list_networks()
_neutron_client = True
else :
self.oskconnnetwork = False
_region = region
_msg = "Selected region is " + str(region)
cbdebug(_msg)
if generate_rc :
self.additional_rc_contents = "export OS_TENANT_NAME=" + _tenant + "\n"
self.additional_rc_contents += "export OS_USERNAME=" + _username + "\n"
self.additional_rc_contents += "export OS_PASSWORD=" + <PASSWORD> + "\n"
self.additional_rc_contents += "export OS_AUTH_URL=\"" + access_url + "\"\n"
self.additional_rc_contents += "export OS_NO_CACHE=1\n"
# self.additional_rc_contents += "export OS_INTERFACE=" + _endpoint_type.replace("URL",'') + "\n"
self.additional_rc_contents += "export OS_INTERFACE=admin\n"
if _cacert :
self.additional_rc_contents += "export OS_CACERT=" + _cacert + "\n"
self.additional_rc_contents += "export OS_REGION_NAME=" + region + "\n"
_status = 0
except novaclient.exceptions as obj:
_status = int(obj.error_code)
_fmsg = str(obj.error_message)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = self.get_description() + " connection failure: " + _fmsg
cberr(_msg)
if _data_auth_parse :
if not _nova_client :
_dmsg = "Please attempt to execute the following : \"python -c \""
_dmsg += "from keystoneauth1.identity import v3; "
_dmsg += "from keystoneauth1 import session; "
_dmsg += "from novaclient import client as novac; "
_dmsg += "_auth = v3.Password(username = '" + str(_username)
_dmsg += "', password = '<PASSWORD>', project_name = '"
_dmsg += str(_tenant) + "', auth_url = '" + str(access_url)
_dmsg += "', user_domain_id = '" + str(_user_domain_id) + "', "
_dmsg += "project_domain_id = '" + str(_project_domain_id) + "'); "
_dmsg += "_session = session.Session(auth = _auth, verify = " + str(_verify) + ", cert = " + str(_cacert) + "); "
_dmsg += "ct = novac.Client(\"2.1\", session = _session); print ct.flavors.list()\"\""
elif not _cinder_client :
_dmsg = "Please attempt to execute the following : \"python -c \""
_dmsg += "from keystoneauth1.identity import v3; "
_dmsg += "from keystoneauth1 import session; "
_dmsg += "from cinderclient import client as cinderc; "
_dmsg += "_auth = v3.Password(username = '" + str(_username)
_dmsg += "', password = '<PASSWORD>', project_name = '"
_dmsg += str(_tenant) + "', auth_url = '" + str(access_url)
_dmsg += "', user_domain_id = '" + str(_user_domain_id) + "', "
_dmsg += "project_domain_id = '" + str(_project_domain_id) + "'); "
_dmsg += "_session = session.Session(auth = _auth, verify = " + str(_verify) + ", cert = " + str(_cacert) + "); "
_dmsg += "ct = cinderc.Client(\"2.1\", session = _session); print ct.volumes.list()\"\""
elif not _neutron_client :
_dmsg = "Please attempt to execute the following : \"python -c \""
_dmsg += "from keystoneauth1.identity import v3; "
_dmsg += "from keystoneauth1 import session; "
_dmsg += "from neutronclient.v2_0 import client as neutronc; "
_dmsg += "_auth = v3.Password(username = '" + str(_username)
_dmsg += "', password = '<PASSWORD>', project_name = '"
_dmsg += str(_tenant) + "', auth_url = '" + str(access_url)
_dmsg += "', user_domain_id = '" + str(_user_domain_id) + "', "
_dmsg += "project_domain_id = '" + str(_project_domain_id) + "'); "
_dmsg += "_session = session.Session(auth = _auth, verify = " + str(_verify) + ", cert = " + str(_cacert) + "); "
_dmsg += "ct = neutronc.Client(session = _session); print ct.list_networks()\"\""
print(_dmsg)
raise CldOpsException(_msg, _status)
else :
_msg = self.get_description() + " connection successful."
cbdebug(_msg)
return _status, _msg, _region
@trace
def test_vmc_connection(self, cloud_name, vmc_name, access, credentials, key_name, \
security_group_name, vm_templates, vm_defaults, vmc_defaults) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(access, credentials, vmc_name, vm_defaults, True, True, vmc_name)
self.generate_rc(cloud_name, vmc_defaults, self.additional_rc_contents)
_key_pair_found = self.check_ssh_key(vmc_name, self.determine_key_name(vm_defaults), vm_defaults, False, vmc_name)
_security_group_found = self.check_security_group(vmc_name, security_group_name)
_floating_pool_found = self.check_floating_pool(vmc_name, vm_defaults)
_prov_netname_found, _run_netname_found = self.check_networks(vmc_name, vm_defaults)
_detected_imageids = self.check_images(vmc_name, vm_defaults, vm_templates)
_check_jumphost = self.check_jumphost(vmc_name, vm_defaults, vm_templates, _detected_imageids)
if not (_run_netname_found and _prov_netname_found and \
_key_pair_found and _security_group_found and _check_jumphost) :
_msg = "Check the previous errors, fix it (using OpenStack's web"
_msg += " GUI (horizon) or nova CLI"
_status = 1178
raise CldOpsException(_msg, _status)
if len(_detected_imageids) :
_status = 0
else :
_status = 1
except CldOpsException as obj :
_fmsg = str(obj.msg)
_status = 2
except Exception as msg :
_fmsg = str(msg)
_status = 23
finally :
self.disconnect()
_status, _msg = self.common_messages("VMC", {"name" : vmc_name }, "connected", _status, _fmsg)
return _status, _msg
@trace
def check_networks(self, vmc_name, vm_defaults) :
'''
TBD
'''
_prov_netname = vm_defaults["netname"]
_run_netname = vm_defaults["netname"]
_net_str = "network \"" + _prov_netname + "\""
_msg = "Checking if the " + _net_str + " can be found on VMC " + vmc_name + "..."
cbdebug(_msg, True)
self.get_network_list(vmc_name, vm_defaults)
_prov_netname_found = False
_run_netname_found = False
if _prov_netname in self.networks_attr_list :
_net_model = self.networks_attr_list[_prov_netname]["model"]
_net_type = self.networks_attr_list[_prov_netname]["model"]
if _net_model != "external" :
_prov_netname_found = True
if _net_type == _net_model :
_net_str = _net_type
else :
_net_str = _net_type + ' ' + _net_model
_msg = "done. This " + _net_str + " will be used as the default for provisioning."
cbdebug(_msg)
else:
_msg = "\nERROR! The default provisioning network ("
_msg += _prov_netname + ") cannot be an external network"
cberr(_msg, True)
if _run_netname in self.networks_attr_list :
_net_model = self.networks_attr_list[_run_netname]["model"]
_net_type = self.networks_attr_list[_run_netname]["model"]
if _net_model != "external" :
_run_netname_found = True
if _net_type == _net_model :
_net_str = _net_type
else :
_net_str = _net_type + ' ' + _net_model
_msg = "a " + _net_type + ' ' + _net_model + " will be used as the default for running."
cbdebug(_msg)
else:
_msg = "ERROR! The default running network ("
_msg += _run_netname + ") cannot be an external network"
cberr(_msg, True)
if not (_run_netname_found and _prov_netname_found) :
_msg = "ERROR! Please make sure that the " + _net_str + " can be found"
_msg += " VMC " + vmc_name
_fmsg = _msg
cberr(_msg, True)
return _prov_netname_found, _run_netname_found
@trace
def check_images(self, vmc_name, vm_defaults, vm_templates) :
'''
TBD
'''
self.common_messages("IMG", { "name": vmc_name }, "checking", 0, '')
_map_name_to_id = {}
_map_id_to_name = {}
# _registered_image_list = self.oskconncompute[vmc_name].glance.list()
_registered_image_list = self.oskconnimage[vmc_name].images.list()
_registered_imageid_list = []
for _registered_image in _registered_image_list :
if "hypervisor_type" in vm_defaults :
if str(vm_defaults["hypervisor_type"]).lower() != "fake" :
if "hypervisor_type" in _registered_image._info :
if _registered_image._info["hypervisor_type"] == vm_defaults["hypervisor_type"] :
_registered_imageid_list.append(_registered_image.id)
_map_name_to_id[_registered_image.name] = _registered_image.id
else :
_registered_imageid_list.append(_registered_image.id)
_map_name_to_id[_registered_image.name] = _registered_image.id
else :
_registered_imageid_list.append(_registered_image.id)
_map_name_to_id[_registered_image.name] = _registered_image.id
for _vm_role in list(vm_templates.keys()) :
_imageid = str2dic(vm_templates[_vm_role])["imageid1"]
if _imageid != "to_replace" :
if _imageid in _map_name_to_id and _map_name_to_id[_imageid] != _imageid :
vm_templates[_vm_role] = vm_templates[_vm_role].replace(_imageid, _map_name_to_id[_imageid])
else :
_map_name_to_id[_imageid] = _imageid
vm_templates[_vm_role] = vm_templates[_vm_role].replace(_imageid, _map_name_to_id[_imageid])
_map_id_to_name[_map_name_to_id[_imageid]] = _imageid
_detected_imageids = self.base_check_images(vmc_name, vm_templates, _registered_imageid_list, _map_id_to_name, vm_defaults)
return _detected_imageids
@trace
def discover_hosts(self, obj_attr_list, start) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["name"],
{},
False,
False,
obj_attr_list["name"])
obj_attr_list["hosts"] = ''
obj_attr_list["host_list"] = {}
self.build_host_map(obj_attr_list["name"])
_host_list = list(self.host_map.keys())
obj_attr_list["host_count"] = len(_host_list)
for _host in _host_list :
self.add_host(obj_attr_list, _host, start)
obj_attr_list["hosts"] = obj_attr_list["hosts"][:-1]
self.additional_host_discovery (obj_attr_list)
self.populate_interface(obj_attr_list)
_status = 0
except CldOpsException as obj :
_status = int(obj.status)
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("HOST", obj_attr_list, "discovered", _status, _fmsg)
return _status, _msg
@trace
def vmccleanup(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["name"],
{"use_cinderclient" : str(obj_attr_list["use_cinderclient"])}, \
False, \
False, \
obj_attr_list["name"])
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
_wait = int(obj_attr_list["update_frequency"])
sleep(_wait)
self.common_messages("VMC", obj_attr_list, "cleaning up vms", 0, '')
_running_instances = True
while _running_instances and _curr_tries < _max_tries :
_running_instances = False
_criteria = {}
_criteria["all_tenants"] = int(obj_attr_list["all_tenants"])
_vmc_name = obj_attr_list["name"]
_instances = self.oskconncompute[_vmc_name].servers.list(search_opts = _criteria)
for _instance in _instances :
if _instance.name.count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"]) \
and not _instance.name.count("jumphost") :
_instance_metadata = _instance.metadata
if "cloud_floating_ip_uuid" in _instance_metadata :
_msg = " Deleting floating IP " + _instance_metadata["cloud_floating_ip_uuid"]
_msg += ", associated with instance "
_msg += _instance.id + " (" + _instance.name + ")"
cbdebug(_msg, True)
self.oskconnnetwork[_vmc_name].delete_floatingip(_instance_metadata["cloud_floating_ip_uuid"])
# self.oskconncompute.floating_ips.delete(_instance_metadata["cloud_floating_ip_uuid"])
_running_instances = True
if _instance.status == "ACTIVE" :
_msg = "Terminating instance: "
_msg += _instance.id + " (" + _instance.name + ")"
cbdebug(_msg, True)
_volume_attached = getattr(_instance, 'os-extended-volumes:volumes_attached')
self.retriable_instance_delete({}, _instance)
if _instance.status == "BUILD" :
_msg = "Will wait for instance "
_msg += _instance.id + "\""
_msg += " (" + _instance.name + ") to "
_msg += "start and then destroy it."
cbdebug(_msg, True)
sleep(_wait)
_curr_tries += 1
if _curr_tries > _max_tries :
_status = 1077
_fmsg = "Some instances on VMC \"" + obj_attr_list["name"] + "\""
_fmsg += " could not be removed because they never became active"
_fmsg += ". They will have to be removed manually."
cberr(_msg, True)
else :
_status = 0
if self.oskconnstorage and self.use_cinderclient == "true" :
self.common_messages("VMC", obj_attr_list, "cleaning up vvs", 0, '')
_volumes = self.oskconnstorage[_vmc_name].volumes.list()
for _volume in _volumes :
if "display_name" in dir(_volume) :
_volume_name = str(_volume.display_name)
else :
_volume_name = str(_volume.name)
if _volume_name.count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"]) :
_volume.delete()
except CldOpsException as obj :
_status = int(obj.status)
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VMC", obj_attr_list, "cleaned up", _status, _fmsg)
return _status, _msg
@trace
def vmcregister(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_time_mark_prs = int(time())
obj_attr_list["mgt_002_provisioning_request_sent"] = _time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"])
if "cleanup_on_attach" in obj_attr_list and obj_attr_list["cleanup_on_attach"] == "True" :
_status, _fmsg = self.vmccleanup(obj_attr_list)
else :
_status = 0
if not _status :
_x, _y, _hostname = self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["name"],
obj_attr_list, \
False, \
True, \
obj_attr_list["name"])
obj_attr_list["cloud_hostname"] = _hostname
if "access_from_rc" in obj_attr_list :
_actual_access = obj_attr_list["access_from_rc"]
else :
_actual_access = obj_attr_list["access"]
_resolve = _actual_access.split(':')[1].replace('//','')
_resolve = _resolve.split('/')[0]
_resolve = _resolve.replace("_dash_","-")
_x, obj_attr_list["cloud_ip"] = hostname2ip(_resolve, True)
obj_attr_list["arrival"] = int(time())
if str(obj_attr_list["discover_hosts"]).lower() == "true" :
_status, _fmsg = self.discover_hosts(obj_attr_list, _time_mark_prs)
else :
obj_attr_list["hosts"] = ''
obj_attr_list["host_list"] = {}
obj_attr_list["host_count"] = "NA"
_status = 0
if not _status :
self.get_network_list(obj_attr_list["name"], obj_attr_list)
_networks = {}
for _net in list(self.networks_attr_list.keys()) :
if "type" in self.networks_attr_list[_net] :
_type = self.networks_attr_list[_net]["type"]
obj_attr_list["network_" + _net] = _type
_time_mark_prc = int(time())
obj_attr_list["mgt_003_provisioning_request_completed"] = _time_mark_prc - _time_mark_prs
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VMC", obj_attr_list, "registered", _status, _fmsg)
return _status, _msg
@trace
def vmcunregister(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_time_mark_drs = int(time())
if "mgt_901_deprovisioning_request_originated" not in obj_attr_list :
obj_attr_list["mgt_901_deprovisioning_request_originated"] = _time_mark_drs
obj_attr_list["mgt_902_deprovisioning_request_sent"] = _time_mark_drs - int(obj_attr_list["mgt_901_deprovisioning_request_originated"])
if "cleanup_on_detach" in obj_attr_list and str(obj_attr_list["cleanup_on_detach"]).lower() == "true" :
_status, _fmsg = self.vmccleanup(obj_attr_list)
_time_mark_prc = int(time())
obj_attr_list["mgt_903_deprovisioning_request_completed"] = _time_mark_prc - _time_mark_drs
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VMC", obj_attr_list, "unregistered", _status, _fmsg)
return _status, _msg
@trace
def vmcount(self, obj_attr_list):
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_nr_instances = 0
for _vmc_uuid in self.osci.get_object_list(obj_attr_list["cloud_name"], "VMC") :
_vmc_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], \
"VMC", False, _vmc_uuid, \
False)
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
_vmc_attr_list["name"], {}, False, False, _vmc_attr_list["name"])
_instances = self.oskconncompute[_vmc_attr_list["name"]].servers.list()
for _instance in _instances :
if _instance.name.count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"]) \
and not _instance.name.count("jumphost") :
if _instance.status == "ACTIVE" :
_nr_instances += 1
except Exception as e :
_status = 23
_nr_instances = "NA"
_fmsg = "(While counting instance(s) through API call \"list\") " + str(e)
finally :
return _nr_instances
@trace
def get_ssh_keys(self, vmc_name, key_name, key_contents, key_fingerprint, registered_key_pairs, internal, connection) :
'''
TBD
'''
for _key_pair in self.oskconncompute[vmc_name].keypairs.list() :
registered_key_pairs[_key_pair.name] = _key_pair.fingerprint + "-NA"
#self.oskconncompute.keypairs.delete(_key_pair)
return True
@trace
def get_security_groups(self, vmc_name, security_group_name, registered_security_groups) :
'''
TBD
'''
if vmc_name in self.oskconnnetwork :
for _security_group in self.oskconnnetwork[vmc_name].list_security_groups()["security_groups"] :
if _security_group["name"] not in registered_security_groups :
registered_security_groups.append(_security_group["name"])
else :
for _security_group in self.oskconncompute[vmc_name].security_groups.list() :
registered_security_groups.append(_security_group.name)
return True
@trace
def get_ip_address(self, obj_attr_list, instance) :
'''
TBD
'''
_networks = list(instance.addresses.keys())
if len(_networks) :
if _networks.count(obj_attr_list["run_netname"]) :
_msg = "Network \"" + obj_attr_list["run_netname"] + "\" found."
cbdebug(_msg)
_run_network = _networks[_networks.index(obj_attr_list["run_netname"])]
else :
_msg = "Network \"" + obj_attr_list["run_netname"] + "\" found."
_msg += "Using the first network (\"" + _networks[0] + "\") instead)."
cbdebug(_msg)
_run_network = _networks[0]
_address_list = instance.addresses[_run_network]
if len(_address_list) :
for _address in _address_list :
if _address["OS-EXT-IPS:type"] == "fixed" :
obj_attr_list["run_cloud_ip"] = '{0}'.format(_address["addr"])
# NOTE: "cloud_ip" is always equal to "run_cloud_ip"
if "run_cloud_ip" in obj_attr_list :
obj_attr_list["cloud_ip"] = obj_attr_list["run_cloud_ip"]
else :
return False
if obj_attr_list["hostname_key"] == "cloud_vm_name" :
obj_attr_list["cloud_hostname"] = obj_attr_list["cloud_vm_name"]
elif obj_attr_list["hostname_key"] == "cloud_ip" :
obj_attr_list["cloud_hostname"] = obj_attr_list["cloud_ip"].replace('.','-')
if str(obj_attr_list["use_floating_ip"]).lower() == "true" :
for _provnet in _networks :
_address_list = instance.addresses[_provnet]
if len(_address_list) :
for _address in _address_list :
if _address["OS-EXT-IPS:type"] == "floating" :
obj_attr_list["prov_cloud_ip"] = '{0}'.format(_address["addr"])
return True
else :
if obj_attr_list["prov_netname"] == obj_attr_list["run_netname"] :
obj_attr_list["prov_cloud_ip"] = obj_attr_list["run_cloud_ip"]
return True
else :
if _networks.count(obj_attr_list["prov_netname"]) :
_msg = "Network \"" + obj_attr_list["prov_netname"] + "\" found."
cbdebug(_msg)
_prov_network = _networks[_networks.index(obj_attr_list["prov_netname"])]
else :
_msg = "Network \"" + obj_attr_list["prov_netname"] + "\" found."
_msg += "Using the first network (\"" + _networks[0] + "\") instead)."
cbdebug(_msg)
_prov_network = _networks[0]
_address_list = instance.addresses[_prov_network]
if len(_address_list) :
for _address in _address_list :
if _address["OS-EXT-IPS:type"] == "fixed" :
obj_attr_list["prov_cloud_ip"] = '{0}'.format(_address["addr"])
return True
else :
_status = 1181
_msg = "IP address list for network " + str(_run_network) + " is empty."
cberr(_msg)
raise CldOpsException(_msg, _status)
else :
return False
@trace
def get_instances(self, obj_attr_list, obj_type = "vm", identifier = "all", force_list = False) :
'''
TBD
'''
try :
_search_opts = {}
_call = "NA"
_search_opts["all_tenants"] = int(obj_attr_list["all_tenants"])
if identifier != "all" :
if obj_type == "vm" :
_search_opts["name"] = identifier
else :
_search_opts["display_name"] = identifier
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list, False, False, identifier)
if obj_type == "vm" :
if "cloud_vm_uuid" in obj_attr_list and len(obj_attr_list["cloud_vm_uuid"]) >= 36 and not force_list :
_call = "get"
_instances = [ self.oskconncompute[identifier].servers.get(obj_attr_list["cloud_vm_uuid"]) ]
else :
_call = "list"
_instances = self.oskconncompute[identifier].servers.list(search_opts = _search_opts)
else :
if "cloud_vv_uuid" in obj_attr_list and len(obj_attr_list["cloud_vv_uuid"]) >= 36 :
_call = "get"
_instances = [ self.oskconnstorage[identifier].volumes.get(obj_attr_list["cloud_vv_uuid"]) ]
else :
_call = "list"
_instances = self.oskconnstorage[identifier].volumes.list(search_opts = _search_opts)
if len(_instances) > 0 :
if identifier == "all" :
return _instances
else :
if obj_type == "vv" :
return _instances[0]
for _instance in _instances :
if str(obj_attr_list["is_jumphost"]).lower() == "true" :
return _instance
else :
_metadata = _instance.metadata
if "experiment_id" in _metadata :
if _metadata["experiment_id"] == self.expid :
return _instance
return False
else :
return False
except Exception as e :
_status = 23
_fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + str(e)
if identifier not in self.api_error_counter :
self.api_error_counter[identifier] = 0
self.api_error_counter[identifier] += 1
if self.api_error_counter[identifier] > self.max_api_errors :
raise CldOpsException(_fmsg, _status)
else :
cbwarn(_fmsg)
return False
@trace
def get_images(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_hyper = ''
_fmsg = "An error has occurred, but no error message was captured"
# _image_list = self.oskconncompute.glance.list()
_fmsg = "Please check if the defined image name is present on this "
_fmsg += self.get_description()
_imageid = False
_candidate_images = []
# for _idx in range(0,len(_image_list)) :
# if self.is_cloud_image_uuid(obj_attr_list["imageid1"]) :
# if _image_list[_idx].id == obj_attr_list["imageid1"] :
# _candidate_images.append(_image_list[_idx])
# else :
# if _image_list[_idx].name.count(obj_attr_list["imageid1"]) :
# _candidate_images.append(_image_list[_idx])
_vmc_name = obj_attr_list["name"]
_candidate_images = [ self.oskconncompute[_vmc_name].glance.find_image(obj_attr_list["imageid1"]) ]
if "hypervisor_type" in obj_attr_list :
if str(obj_attr_list["hypervisor_type"]).lower() != "fake" :
_hyper = obj_attr_list["hypervisor_type"]
for _image in list(_candidate_images) :
if "hypervisor_type" in _image._info :
if _image._info["hypervisor_type"] != obj_attr_list["hypervisor_type"] :
_candidate_images.remove(_image)
else :
_hyper = _image._info["hypervisor_type"]
else :
obj_attr_list["hypervisor_type"] = ''
if len(_hyper) :
obj_attr_list["hypervisor_type"] = _hyper
if len(_candidate_images) :
if str(obj_attr_list["randomize_image_name"]).lower() == "true" :
_imageid = choice(_candidate_images)
else :
_imageid = _candidate_images[0]
if _imageid :
obj_attr_list["boot_volume_imageid1"] = _imageid.id
obj_attr_list["imageid1"] = _imageid.name
obj_attr_list["boot_volume_imageid1_instance"] = _imageid
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Image Name (" + obj_attr_list["imageid1"] + ' ' + _hyper + ") not found: " + _fmsg
cberr(_msg)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def get_networks(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_netids = []
_netnames = []
_netlist = obj_attr_list["prov_netname"].split(',') + obj_attr_list["run_netname"].split(',')
for _netname in _netlist :
if "HA network tenant" in _netname :
continue
if not _netname in self.networks_attr_list :
_status = 168
_fmsg = "Please check if the defined network is present on this "
_fmsg += self.get_description()
if "name" in obj_attr_list :
_conn_id = obj_attr_list["name"]
else :
_conn_id = "common"
self.get_network_list(_conn_id, obj_attr_list)
if _netname in self.networks_attr_list :
_networkid = self.networks_attr_list[_netname]["uuid"]
_net_info = {"net-id" : _networkid}
if not _net_info in _netids :
_netids.append(_net_info)
_netnames.append(_netname)
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Network (" + obj_attr_list["prov_netname"] + " ) not found: " + _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
_netnames = ','.join(_netnames)
return _netnames, _netids
@trace
def create_ssh_key(self, vmc_name, key_name, key_type, key_contents, key_fingerprint, vm_defaults, connection) :
'''
TBD
'''
self.oskconncompute[vmc_name].keypairs.create(key_name, \
public_key = key_type + ' ' + key_contents)
return True
@trace
def is_cloud_image_uuid(self, imageid) :
'''
TBD
'''
if len(imageid) == 36 and imageid.count('-') == 4 :
return True
return False
@trace
def is_vm_running(self, obj_attr_list, fail = True) :
'''
TBD
'''
try :
_cloud_vm_name = obj_attr_list["cloud_vm_name"]
_instance = self.get_instances(obj_attr_list, "vm", \
_cloud_vm_name)
if _instance :
if _instance.status == "ACTIVE" :
return _instance
elif _instance.status == "ERROR" :
obj_attr_list["last_known_state"] = "ERROR while checking for ACTIVE state"
return True
else :
return False
else :
return False
except Exception as e :
_status = 23
_fmsg = str(e)
raise CldOpsException(_fmsg, _status)
@trace
def is_vm_ready(self, obj_attr_list) :
'''
TBD
'''
_instance = self.is_vm_running(obj_attr_list)
if _instance :
if obj_attr_list["last_known_state"].count("ERROR") :
return True
obj_attr_list["last_known_state"] = "ACTIVE with ip unassigned"
if self.get_ip_address(obj_attr_list, _instance) :
obj_attr_list["last_known_state"] = "ACTIVE with ip assigned"
return True
else :
obj_attr_list["last_known_state"] = "not ACTIVE"
return False
def vm_placement(self, obj_attr_list) :
'''
TBD
'''
_availability_zone = None
if len(obj_attr_list["availability_zone"]) > 1 :
_availability_zone = obj_attr_list["availability_zone"]
if "compute_node" in obj_attr_list and _availability_zone :
# _scheduler_hints = { "force_hosts" : obj_attr_list["host_name"] }
for _host in self.oskconncompute[obj_attr_list["name"]].hypervisors.list() :
if _host.hypervisor_hostname.count(obj_attr_list["compute_node"]) :
obj_attr_list["host_name"] = _host.hypervisor_hostname
break
if "host_name" in obj_attr_list :
_availability_zone += ':' + obj_attr_list["host_name"]
else :
_msg = "Unable to find the compute_node \"" + obj_attr_list["compute_node"]
_msg += "\", indicated during the instance creation. Will let"
_msg += " the scheduler pick a compute node"
cbwarn(_msg)
obj_attr_list["availability_zone"] = _availability_zone
return True
@trace
def vvcreate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
obj_attr_list["block_device_mapping"] = {}
_vol_status = "NA"
if "cloud_vv_type" not in obj_attr_list :
obj_attr_list["cloud_vv_type"] = None
if str(obj_attr_list["cloud_vv_type"]).lower() == "none" :
obj_attr_list["cloud_vv_type"] = None
if "cloud_vv" in obj_attr_list :
self.common_messages("VV", obj_attr_list, "creating", _status, _fmsg)
_imageid = None
if str(obj_attr_list["boot_from_volume"]).lower() == "true" :
_imageid = obj_attr_list["boot_volume_imageid1"]
obj_attr_list["cloud_vv_data_name"] = obj_attr_list["cloud_vv_name"]
obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_name"].replace("-vv","-vbv")
obj_attr_list["last_known_state"] = "about to send volume create request"
_mark_a = time()
if str(self.oskconnstorage[obj_attr_list["name"]].version) == '1' :
_instance = self.oskconnstorage[obj_attr_list["name"]].volumes.create(obj_attr_list["cloud_vv"], \
snapshot_id = None, \
display_name = obj_attr_list["cloud_vv_name"], \
display_description = obj_attr_list["cloud_vv_name"], \
volume_type = obj_attr_list["cloud_vv_type"], \
availability_zone = None, \
imageRef = _imageid)
else :
_instance = self.oskconnstorage[obj_attr_list["name"]].volumes.create(obj_attr_list["cloud_vv"], \
snapshot_id = None, \
name = obj_attr_list["cloud_vv_name"], \
description = obj_attr_list["cloud_vv_name"], \
volume_type = obj_attr_list["cloud_vv_type"], \
availability_zone = None, \
imageRef = _imageid)
self.annotate_time_breakdown(obj_attr_list, "create_volume_time", _mark_a)
sleep(int(obj_attr_list["update_frequency"]))
obj_attr_list["cloud_vv_uuid"] = '{0}'.format(_instance.id)
_mark_a = time()
_wait_for_volume = 180
for i in range(1, _wait_for_volume) :
_vol_status = self.oskconnstorage[obj_attr_list["name"]].volumes.get(_instance.id).status
if _vol_status == "available" :
cbdebug("Volume " + obj_attr_list["cloud_vv_name"] + " took " + str(i) + " second(s) to become available",True)
break
elif _vol_status == "error" :
_fmsg = "Volume " + obj_attr_list["cloud_vv_name"] + " reported error after " + str(i) + " second(s)"
break
else :
sleep(1)
self.annotate_time_breakdown(obj_attr_list, "volume_available_time", _mark_a)
if str(obj_attr_list["boot_from_volume"]).lower() == "true" :
obj_attr_list["boot_volume_imageid1"] = None
obj_attr_list['cloud_vv'] = self.oskconnstorage[obj_attr_list["name"]].volumes.get(_instance.id).size
obj_attr_list["block_device_mapping"] = {'vda':'%s' % obj_attr_list["cloud_vv_uuid"]}
if _vol_status == "error" :
_status = 17262
else :
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except KeyboardInterrupt :
_status = 42
_fmsg = "CTRL-C interrupt"
cbdebug("VM create keyboard interrupt...", True)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VV", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vvdestroy(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
if str(obj_attr_list["cloud_vv_uuid"]).lower() != "none" :
_instance = self.get_instances(obj_attr_list, "vv", obj_attr_list["cloud_vm_name"])
if _instance :
self.common_messages("VV", obj_attr_list, "destroying", 0, '')
if len(_instance.attachments) :
_server_id = _instance.attachments[0]["server_id"]
_attachment_id = _instance.attachments[0]["id"]
# There is weird bug on the python novaclient code. Don't change the
# following line, it is supposed to be "oskconncompute", even though
# is dealing with volumes. Will explain latter.
self.oskconncompute[obj_attr_list["name"]].volumes.delete_server_volume(_server_id, _attachment_id)
self.oskconnstorage[obj_attr_list["name"]].volumes.delete(_instance)
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VV", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcreate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_instance = False
self.determine_instance_name(obj_attr_list)
self.determine_key_name(obj_attr_list)
obj_attr_list["last_known_state"] = "about to connect to " + self.get_description() + " manager"
self.take_action_if_requested("VM", obj_attr_list, "provision_originated")
# KEEP IT HERE TOO, NEEDS TO BE DUPLICATED, DO NOT REMOVE
self.determine_key_name(obj_attr_list)
if obj_attr_list["tenant"] != "default" :
if "ssh_key_injected" not in obj_attr_list :
self.check_ssh_key(obj_attr_list["vmc_name"], \
obj_attr_list["key_name"], \
obj_attr_list, True)
if "user" not in obj_attr_list :
obj_attr_list["user"] = obj_attr_list["tenant"]
obj_attr_list["admin_credentials"] = obj_attr_list["credentials"]
obj_attr_list["credentials"] = self.parse_authentication_data(obj_attr_list["credentials"], \
obj_attr_list["tenant"], \
obj_attr_list["user"], \
True)
if obj_attr_list["name"] in self.oskconncompute :
del self.oskconncompute[obj_attr_list["name"]]
_mark_a = time()
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
{"use_cinderclient" : obj_attr_list["use_cinderclient"]}, \
False, \
False, \
obj_attr_list["name"])
self.annotate_time_breakdown(obj_attr_list, "authenticate_time", _mark_a)
_mark_a = time()
if self.is_vm_running(obj_attr_list) :
_msg = "An instance named \"" + obj_attr_list["cloud_vm_name"]
_msg += "\" is already running. It needs to be destroyed first."
_status = 187
cberr(_msg)
raise CldOpsException(_msg, _status)
self.annotate_time_breakdown(obj_attr_list, "check_existing_instance_time", _mark_a)
obj_attr_list["last_known_state"] = "about to get flavor and image list"
if str(obj_attr_list["security_groups"]).lower() == "false" :
_security_groups = None
else :
# "Security groups" must be a list
_security_groups = []
_security_groups.append(obj_attr_list["security_groups"])
self.vm_placement(obj_attr_list)
obj_attr_list["last_known_state"] = "about to send create request"
_mark_a = time()
self.get_flavors(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "get_flavor_time", _mark_a)
_mark_a = time()
self.get_images(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "get_imageid_time", _mark_a)
obj_attr_list["userdata"] = self.populate_cloudconfig(obj_attr_list)
if obj_attr_list["userdata"] :
obj_attr_list["config_drive"] = True
else :
obj_attr_list["config_drive"] = None
_mark_a = time()
_netnames, _netids = self.get_networks(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "get_netid_time", _mark_a)
_meta = {}
if "meta_tags" in obj_attr_list :
if obj_attr_list["meta_tags"] != "empty" and \
obj_attr_list["meta_tags"].count(':') and \
obj_attr_list["meta_tags"].count(',') :
_meta = str2dic(obj_attr_list["meta_tags"])
_fip = None
if str(obj_attr_list["use_floating_ip"]).lower() == "true" :
_msg = " Attempting to create a floating IP to " + obj_attr_list["name"] + "..."
cbdebug(_msg, True)
obj_attr_list["last_known_state"] = "about to create floating IP"
_fip = self.floating_ip_allocate(obj_attr_list)
_meta["experiment_id"] = obj_attr_list["experiment_id"]
if "cloud_floating_ip_uuid" in obj_attr_list :
_meta["cloud_floating_ip_uuid"] = obj_attr_list["cloud_floating_ip_uuid"]
_time_mark_prs = int(time())
obj_attr_list["mgt_002_provisioning_request_sent"] = \
_time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"])
self.vvcreate(obj_attr_list)
self.common_messages("VM", obj_attr_list, "creating", 0, '')
self.pre_vmcreate_process(obj_attr_list)
_mark_a = time()
_instance = self.oskconncompute[obj_attr_list["name"]].servers.create(name = obj_attr_list["cloud_vm_name"], \
block_device_mapping = obj_attr_list["block_device_mapping"], \
image = obj_attr_list["boot_volume_imageid1_instance"], \
flavor = obj_attr_list["flavor_instance"], \
security_groups = _security_groups, \
key_name = obj_attr_list["key_name"], \
scheduler_hints = None, \
availability_zone = obj_attr_list["availability_zone"], \
meta = _meta, \
config_drive = obj_attr_list["config_drive"], \
userdata = obj_attr_list["userdata"], \
nics = _netids, \
disk_config = "AUTO")
if _instance :
self.annotate_time_breakdown(obj_attr_list, "instance_creation_time", _mark_a)
sleep(int(obj_attr_list["update_frequency"]))
obj_attr_list["cloud_vm_uuid"] = '{0}'.format(_instance.id)
self.take_action_if_requested("VM", obj_attr_list, "provision_started")
while not self.floating_ip_attach(obj_attr_list, _instance) :
True
_time_mark_prc = self.wait_for_instance_ready(obj_attr_list, _time_mark_prs)
_mark_a = time()
self.annotate_time_breakdown(obj_attr_list, "instance_scheduling_time", _mark_a)
_mark_a = time()
self.annotate_time_breakdown(obj_attr_list, "port_creation_time", _mark_a)
if obj_attr_list["last_known_state"].count("ERROR") :
_fmsg = obj_attr_list["last_known_state"]
_status = 189
else :
if not len(obj_attr_list["block_device_mapping"]) and \
str(obj_attr_list["cloud_vv_uuid"]).lower() != "none" :
self.common_messages("VV", obj_attr_list, "attaching", _status, _fmsg)
# There is a weird bug on the python novaclient code. Don't change the
# following line, it is supposed to be "oskconncompute", even though
# is dealing with volumes. Will explain later.
_mark_a = time()
self.oskconncompute[obj_attr_list["name"]].volumes.create_server_volume(obj_attr_list["cloud_vm_uuid"], \
obj_attr_list["cloud_vv_uuid"], \
"/dev/vdd")
self.annotate_time_breakdown(obj_attr_list, "attach_volume_time", _mark_a)
if obj_attr_list["volume_creation_status"] :
_status = obj_attr_list["volume_creation_status"]
else :
_status = 0
if "admin_credentials" in obj_attr_list :
self.connect(obj_attr_list["access"], \
obj_attr_list["admin_credentials"], \
obj_attr_list["vmc_name"], \
{},
False, \
False, \
obj_attr_list["name"])
self.get_mac_address(obj_attr_list, _instance)
self.wait_for_instance_boot(obj_attr_list, _time_mark_prc)
self.get_host_and_instance_name(obj_attr_list)
if obj_attr_list["tenant"] != "default" :
del self.oskconncompute[obj_attr_list["name"]]
if "resource_limits" in obj_attr_list :
_status, _fmsg = self.set_cgroup(obj_attr_list)
else :
_status = 0
if str(obj_attr_list["force_failure"]).lower() == "true" :
_fmsg = "Forced failure (option FORCE_FAILURE set \"true\")"
_status = 916
else :
_fmsg = "Failed to obtain instance's (cloud assigned) uuid. The "
_fmsg += "instance creation failed for some unknown reason."
cberr(_fmsg)
_status = 100
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except KeyboardInterrupt :
_status = 42
_fmsg = "CTRL-C interrupt"
cbdebug("VM create keyboard interrupt...", True)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
if "mgt_003_provisioning_request_completed" in obj_attr_list :
self.annotate_time_breakdown(obj_attr_list, "instance_active_time", obj_attr_list["mgt_003_provisioning_request_completed"], False)
if "mgt_004_network_acessible" in obj_attr_list :
self.annotate_time_breakdown(obj_attr_list, "instance_reachable_time", obj_attr_list["mgt_004_network_acessible"], False)
if "flavor_instance" in obj_attr_list :
del obj_attr_list["flavor_instance"]
if "boot_volume_imageid1_instance" in obj_attr_list :
del obj_attr_list["boot_volume_imageid1_instance"]
if "availability_zone" in obj_attr_list :
obj_attr_list["availability_zone"] = str(obj_attr_list["availability_zone"])
if "block_device_mapping" in obj_attr_list :
obj_attr_list["block_device_mapping"] = str(obj_attr_list["block_device_mapping"])
if "cloud_vv_type" in obj_attr_list :
obj_attr_list["cloud_vv_type"] = str(obj_attr_list["cloud_vv_type"])
_status, _msg = self.common_messages("VM", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vmdestroy(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
if int(obj_attr_list["instance_creation_status"]) :
_status, _fmsg = self.instance_cleanup_on_failure(obj_attr_list)
else :
_time_mark_drs = int(time())
if "mgt_901_deprovisioning_request_originated" not in obj_attr_list :
obj_attr_list["mgt_901_deprovisioning_request_originated"] = _time_mark_drs
obj_attr_list["mgt_902_deprovisioning_request_sent"] = \
_time_mark_drs - int(obj_attr_list["mgt_901_deprovisioning_request_originated"])
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"],
{}, \
False, \
False, \
obj_attr_list["name"])
_wait = int(obj_attr_list["update_frequency"])
_max_tries = int(obj_attr_list["update_attempts"])
_curr_tries = 0
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
self.common_messages("VM", obj_attr_list, "destroying", 0, '')
self.floating_ip_delete(obj_attr_list)
self.retriable_instance_delete(obj_attr_list, _instance)
while _instance and _curr_tries < _max_tries :
_instance = self.get_instances(obj_attr_list, "vm", \
obj_attr_list["cloud_vm_name"])
if _instance :
if _instance.status != "ACTIVE" :
break
sleep(_wait)
_curr_tries += 1
else :
True
_status, _fmsg = self.vvdestroy(obj_attr_list)
_time_mark_drc = int(time())
obj_attr_list["mgt_903_deprovisioning_request_completed"] = \
_time_mark_drc - _time_mark_drs
self.take_action_if_requested("VM", obj_attr_list, "deprovision_finished")
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VM", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcapture(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
{}, \
False, \
False, \
obj_attr_list["name"])
_wait = int(obj_attr_list["update_frequency"])
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
_time_mark_crs = int(time())
# Just in case the instance does not exist, make crc = crs
_time_mark_crc = _time_mark_crs
obj_attr_list["mgt_102_capture_request_sent"] = _time_mark_crs - obj_attr_list["mgt_101_capture_request_originated"]
if obj_attr_list["captured_image_name"] == "auto" :
obj_attr_list["captured_image_name"] = obj_attr_list["imageid1"] + "_captured_at_"
obj_attr_list["captured_image_name"] += str(obj_attr_list["mgt_101_capture_request_originated"])
self.common_messages("VM", obj_attr_list, "capturing", 0, '')
_instance.create_image(obj_attr_list["captured_image_name"], None)
_vm_image_created = False
while not _vm_image_created and _curr_tries < _max_tries :
# _vm_images = self.oskconncompute[obj_attr_list["name"]].glance.list()
_vm_images = self.oskconnimage[obj_attr_list["name"]].images.list()
for _vm_image in _vm_images :
if _vm_image.name == obj_attr_list["captured_image_name"] :
if _vm_image.status.lower() == "active" :
_vm_image_created = True
_time_mark_crc = int(time())
obj_attr_list["mgt_103_capture_request_completed"] = _time_mark_crc - _time_mark_crs
break
if "mgt_103_capture_request_completed" not in obj_attr_list :
obj_attr_list["mgt_999_capture_request_failed"] = int(time()) - _time_mark_crs
sleep(int(obj_attr_list["update_frequency"]))
_curr_tries += 1
else :
_fmsg = "This instance does not exist"
_status = 1098
if _curr_tries > _max_tries :
_status = 1077
_fmsg = "" + obj_attr_list["name"] + ""
_fmsg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_fmsg += "could not be captured after " + str(_max_tries * _wait) + " seconds.... "
cberr(_fmsg)
else :
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VM", obj_attr_list, "captured", _status, _fmsg)
return _status, _msg
@trace
def vmrunstate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_ts = obj_attr_list["target_state"]
_cs = obj_attr_list["current_state"]
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
{}, \
False, \
False, \
obj_attr_list["name"])
_wait = int(obj_attr_list["update_frequency"])
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
if "mgt_201_runstate_request_originated" in obj_attr_list :
_time_mark_rrs = int(time())
obj_attr_list["mgt_202_runstate_request_sent"] = \
_time_mark_rrs - obj_attr_list["mgt_201_runstate_request_originated"]
self.common_messages("VM", obj_attr_list, "runstate altering", 0, '')
_instance = self.get_instances(obj_attr_list, "vm", \
obj_attr_list["cloud_vm_name"])
if _instance :
if _ts == "fail" :
_instance.pause()
elif _ts == "save" :
_instance.suspend()
elif (_ts == "attached" or _ts == "resume") and _cs == "fail" :
_instance.unpause()
elif (_ts == "attached" or _ts == "restore") and _cs == "save" :
_instance.resume()
_time_mark_rrc = int(time())
obj_attr_list["mgt_203_runstate_request_completed"] = _time_mark_rrc - _time_mark_rrs
_msg = "VM " + obj_attr_list["name"] + " runstate request completed."
cbdebug(_msg)
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
self.disconnect()
_status, _msg = self.common_messages("VM", obj_attr_list, "runstate altered", _status, _fmsg)
return _status, _msg
@trace
def vmmigrate(self, obj_attr_list) :
'''
TBD
'''
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
{}, \
False, \
False, \
obj_attr_list["name"])
operation = obj_attr_list["mtype"]
_msg = "Sending a " + operation + " request for " + obj_attr_list["name"]
_msg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ")"
_msg += "...."
cbdebug(_msg, True)
# This is a migration, so we need to poll very frequently
# If it is a micro-checkpointing operation, then poll normally
_orig_freq = int(obj_attr_list["update_frequency"])
_wait = 1 if operation == "migrate" else _orig_freq
_wait = min(_wait, _orig_freq)
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
if _wait < _orig_freq :
_max_tries = _max_tries * (_orig_freq / _wait)
_time_mark_crs = int(time())
try :
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance :
_instance.live_migrate(obj_attr_list["destination_name"].replace("host_", ""))
obj_attr_list["mgt_502_" + operation + "_request_sent"] = _time_mark_crs - obj_attr_list["mgt_501_" + operation + "_request_originated"]
while True and _curr_tries < _max_tries :
sleep(_wait)
_instance = self.get_instances(obj_attr_list, "vm", obj_attr_list["cloud_vm_name"])
if _instance.status not in ["ACTIVE", "MIGRATING"] :
_status = 4328
_msg = "Migration of instance failed, " + self.get_description() + " state is: " + _instance.status
raise CldOpsException(_msg, _status)
if _instance.status == "ACTIVE" :
_time_mark_crc = int(time())
obj_attr_list["mgt_503_" + operation + "_request_completed"] = _time_mark_crc - _time_mark_crs
break
_msg = "" + obj_attr_list["name"] + ""
_msg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_msg += "still undergoing " + operation
_msg += ". Will wait " + str(_wait)
_msg += " seconds and try again."
cbdebug(_msg)
_curr_tries += 1
else :
_fmsg = "This instance does not exist"
_status = 1098
_status = 0
except Exception as e :
_status = 349201
_fmsg = str(e)
finally :
self.disconnect()
if "mgt_503_" + operation + "_request_completed" not in obj_attr_list :
obj_attr_list["mgt_999_" + operation + "_request_failed"] = int(time()) - _time_mark_crs
_status, _msg = self.common_messages("VM", obj_attr_list, operation + "ed ", _status, _fmsg)
return _status, _msg
@trace
def vmresize(self, obj_attr_list) :
'''
TBD
'''
return 0, "NOT SUPPORTED"
@trace
def imgdelete(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_hyper = ''
_fmsg = "An error has occurred, but no error message was captured"
self.common_messages("IMG", obj_attr_list, "deleting", 0, '')
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
obj_attr_list, \
False, \
False, \
None)
_image_list = self.oskconnimage["common"].images.list()
for _image in _image_list :
if self.is_cloud_image_uuid(obj_attr_list["imageid1"]) :
if "hypervisor_type" in obj_attr_list :
if str(obj_attr_list["hypervisor_type"]).lower() != "fake" :
if "hypervisor_type" in _image._info :
if _image._info["hypervisor_type"] == obj_attr_list["hypervisor_type"] :
if _image.id == obj_attr_list["imageid1"] :
_image.delete()
break
else :
if _image.id == obj_attr_list["imageid1"] :
_image.delete()
break
else :
if _image.id == obj_attr_list["imageid1"] :
_image.delete()
break
else :
if "hypervisor_type" in obj_attr_list :
if str(obj_attr_list["hypervisor_type"]).lower() != "fake" :
if "hypervisor_type" in _image._info :
if _image._info["hypervisor_type"] == obj_attr_list["hypervisor_type"] :
if _image.name == obj_attr_list["imageid1"] :
_image.delete()
break
else :
if _image.name == obj_attr_list["imageid1"] :
_image.delete()
break
else :
if _image.name == obj_attr_list["imageid1"] :
_image.delete()
break
obj_attr_list["boot_volume_imageid1"] = _image.id
obj_attr_list["imageid1"] = _image.name
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("IMG", obj_attr_list, "deleted", _status, _fmsg)
return _status, _msg
@trace
def parse_connection_data(self, connection_data, region, obj_attr_list) :
'''
TBD
'''
_access_url = None
_endpoint_type = "publicURL"
_region = region
if not self.connauth_pamap :
if len(connection_data.split('-')) == 2 :
_access_url, _endpoint_type = connection_data.split('-')
else :
_access_url = connection_data.split('-')[0]
else :
if "OS_AUTH_URL" in self.connauth_pamap :
_access_url = self.connauth_pamap["OS_AUTH_URL"]
if "OS_ENDPOINT_TYPE" in self.connauth_pamap :
_endpoint_type = self.connauth_pamap["OS_ENDPOINT_TYPE"]
if "OS_REGION_NAME" in self.connauth_pamap :
_region = self.connauth_pamap["OS_REGION_NAME"]
obj_attr_list["access_from_rc"] = _access_url + '-' + _endpoint_type
return _access_url, _endpoint_type, _region
@trace
def parse_authentication_data(self, authentication_data, tenant = "default", username = "default", single = False):
'''
TBD
'''
_username = ''
_password = ''
_tenant = ''
_project_name = None
# Insecure (don't verify CACERT: _verify = False and _cacert = None)
# Verify CACERT (_verify = False and _cacert = <valid path to cert file>)
_cacert = None
_ckcert = None
_verify = False
_user_domain_id = "default"
_project_domain_id = "default"
if not self.connauth_pamap :
if authentication_data.count(':') >= 2 :
_separator = ':'
else :
_separator = '-'
if len(authentication_data.split(_separator)) < 3 :
_msg = "ERROR: Insufficient number of parameters in OSK_CREDENTIALS."
_msg += "Please make sure that at least username, password and tenant"
_msg += " are present."
if single :
return _msg
else :
return False, _msg, False, False, False, False, False, False
if len(authentication_data.split(_separator)) == 3 :
_username, _password, _tenant = authentication_data.split(_separator)
elif len(authentication_data.split(_separator)) == 4 :
_username, _password, _tenant, _cacert = authentication_data.split(_separator)
_verify = True
elif len(authentication_data.split(_separator)) == 5 :
_username, _password, _tenant, _cacert, _ckcert = authentication_data.split(_separator)
if ( str(_ckcert).lower() == "verify" ) :
_verify = True
elif ( str(_ckcert).lower() == "insecure" ) :
_verify = False
_cacert = None
else :
_verify = False
_cacert = None
elif len(authentication_data.split(_separator)) > 5 and _separator == '-' :
_msg = "ERROR: Please make sure that the none of the parameters in"
_msg += "OSK_CREDENTIALS have any dashes (i.e., \"-\") on it. If"
_msg += "a dash is required, please use the string \"_dash\", and"
_msg += "it will be automatically replaced."
if single :
return _msg
else :
return False, _msg, False, False, False, False, False, False
else :
if "OS_USERNAME" in self.connauth_pamap :
_username = self.connauth_pamap["OS_USERNAME"]
if "OS_PASSWORD" in self.connauth_pamap :
_password = self.connauth_pamap["OS_PASSWORD"]
if "OS_TENANT_NAME" in self.connauth_pamap :
_tenant = self.connauth_pamap["OS_TENANT_NAME"]
if "OS_PROJECT_NAME" in self.connauth_pamap :
_project_name = self.connauth_pamap["OS_PROJECT_NAME"]
if "OS_CACERT" in self.connauth_pamap :
_cacert = self.connauth_pamap["OS_CACERT"]
if "OS_INSECURE" in self.connauth_pamap :
if self.connauth_pamap["OS_INSECURE"] == "1" or str(self.connauth_pamap["OS_INSECURE"]).lower() == "insecure":
_verify = False
_cacert = None
else :
_verify = True
if "OS_PROJECT_DOMAIN_ID" in self.connauth_pamap :
_project_domain_id = self.connauth_pamap["OS_PROJECT_DOMAIN_ID"]
if "OS_USER_DOMAIN_ID" in self.connauth_pamap :
_user_domain_id = self.connauth_pamap["OS_USER_DOMAIN_ID"]
if tenant != "default" :
_tenant = tenant
if username != "default" :
_username = username
_username = _username.replace("_dash_",'-')
_password = _password.replace("_dash_",'-')
_tenant = _tenant.replace("_dash_",'-')
if not _project_name :
_project_name = _tenant
if _cacert :
_cacert = _cacert.replace("_dash_",'-')
if single :
_str = str(_username) + ':' + str(_password) + ':' + str(_tenant)
if _cacert :
_str += ':' + str(_cacert)
if _verify :
_str += ':' + str(_verify)
return _str
else :
return _username, _password, _tenant, _project_name, _cacert, _verify, _user_domain_id, _project_domain_id
@trace
def disconnect(self) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
# if self.oskconncompute :
# self.oskconncompute.novaclientlient.http.close()
# if self.oskconnstorage and self.use_cinderclient == "true":
# self.oskconnstorage.novaclientlient.http.close()
# if self.oskconnnetwork :
# self.oskconnnetwork.neutronclient.http.close()
_status = 0
except AttributeError :
# If the "close" method does not exist, proceed normally.
_msg = "The \"close\" method does not exist or is not callable"
cbwarn(_msg)
_status = 0
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = self.get_description() + " disconnection failure: " + _fmsg
cberr(_msg)
raise CldOpsException(_msg, _status)
else :
_msg = self.get_description() + " disconnection successful."
cbdebug(_msg)
return _status, _msg, ''
@trace
def get_network_attr(self, obj_attr_list, network_attr_list) :
'''
TBD
'''
_name = network_attr_list["name"]
if "provider:network_type" in network_attr_list :
_type = network_attr_list["provider:network_type"]
else :
_type = "NA"
_uuid = network_attr_list["id"]
if _type == "flat":
_model = "flat"
else :
if "router:external" in network_attr_list :
if network_attr_list["router:external"] :
_model = "external"
else :
_model = "tenant"
else :
_model = "NA"
self.networks_attr_list[_name] = {"uuid" : _uuid, "model" : _model, \
"type" : _type }
if _model == "tenant" :
if _name not in self.networks_attr_list["tenant_network_list"] :
self.networks_attr_list["tenant_network_list"].append(_name)
return True
@trace
def get_network_list(self, vmc_name, obj_attr_list) :
'''
TBD
'''
_network_list = self.oskconnnetwork[vmc_name].list_networks()["networks"]
for _network_attr_list in _network_list :
self.get_network_attr(obj_attr_list, _network_attr_list)
return _network_list
@trace
def check_floating_pool(self, vmc_name, vm_defaults) :
'''
TBD
'''
_floating_pool_found = True
if str(vm_defaults["create_jumphost"]).lower() != "false" or \
str(vm_defaults["use_floating_ip"]).lower() != "false" :
_floating_pool_dict = {}
for _network in self.oskconnnetwork[vmc_name].list_networks()["networks"] :
if _network["router:external"] :
if _network["name"] not in _floating_pool_dict :
_floating_pool_dict[_network["name"]] = _network["id"]
# _floating_pool_list = self.oskconncompute.floating_ip_pools.list()
if len(vm_defaults["floating_pool"]) < 2 :
if len(_floating_pool_dict) == 1 :
vm_defaults["floating_pool"] = list(_floating_pool_dict.keys())[0]
# vm_defaults["floating_pool"] = _floating_pool_list[0].name
_msg = "A single floating IP pool (\""
_msg += vm_defaults["floating_pool"] + "\") was found on this"
_msg += " VMC. Will use this as the floating pool."
cbdebug(_msg)
_msg = "Checking if the floating pool \""
_msg += vm_defaults["floating_pool"] + "\" can be found on VMC "
_msg += vmc_name + "..."
cbdebug(_msg, True)
_floating_pool_found = False
for _floating_pool in list(_floating_pool_dict.keys()) :
if _floating_pool == vm_defaults["floating_pool"] :
vm_defaults["floating_pool_id"] = _floating_pool_dict[_floating_pool]
_floating_pool_found = True
# if _floating_pool.name == vm_defaults["floating_pool"] :
# _floating_pool_found = True
if not (_floating_pool_found) :
_msg = "ERROR! Please make sure that the floating IP pool "
_msg += vm_defaults["floating_pool"] + "\" can be found"
_msg += " VMC " + vmc_name
_fmsg = _msg
cberr(_msg, True)
return _floating_pool_found
@trace
def check_jumphost(self, vmc_name, vm_defaults, vm_templates, detected_imageids) :
'''
TBD
'''
_can_create_jumphost = False
if vm_defaults["jumphost_login"] == "auto" :
vm_defaults["jumphost_login"] = vm_defaults["login"]
vm_defaults["jumphost_name"] = vm_defaults["username"] + '-' + vm_defaults["jumphost_base_name"]
try :
_cjh = str(vm_defaults["create_jumphost"]).lower()
_jhn = vm_defaults["jumphost_name"]
if _cjh == "true" :
vm_defaults["jumphost_ip"] = "to be created"
_msg = "Checking if a \"Jump Host\" (" + _jhn + ") VM is already"
_msg += " present on VMC " + vmc_name + "...."
cbdebug(_msg, True)
_obj_attr_list = copy.deepcopy(vm_defaults)
_obj_attr_list.update(str2dic(vm_templates[vm_defaults["jumphost_role"]]))
if "floating_pool" in vm_defaults and _obj_attr_list["imageid1"] in detected_imageids :
_can_create_jumphost = True
_obj_attr_list["cloud_vm_name"] = _jhn
_obj_attr_list["cloud_name"] = ""
_obj_attr_list["role"] = vm_defaults["jumphost_role"]
_obj_attr_list["name"] = "vm_0"
_obj_attr_list["model"] = "osk"
_obj_attr_list["size"] = vm_defaults["jumphost_size"]
_obj_attr_list["use_floating_ip"] = "true"
_obj_attr_list["randomize_image_name"] = "false"
_obj_attr_list["experiment_id"] = ""
_obj_attr_list["mgt_001_provisioning_request_originated"] = int(time())
_obj_attr_list["vmc_name"] = vmc_name
_obj_attr_list["ai"] = "none"
_obj_attr_list["is_jumphost"] = True
_obj_attr_list["use_jumphost"] = False
_obj_attr_list["check_boot_complete"] = "tcp_on_22"
_obj_attr_list["userdata"] = False
_obj_attr_list["uuid"] = "00000000-0000-0000-0000-000000000000"
_obj_attr_list["log_string"] = _obj_attr_list["name"] + " (" + _obj_attr_list["uuid"] + ")"
_netname = _obj_attr_list["jumphost_netnames"]
if _netname == "all" :
_netname = ','.join(self.networks_attr_list["tenant_network_list"])
_obj_attr_list["prov_netname"] = _netname
_obj_attr_list["run_netname"] = _netname
if not self.is_vm_running(_obj_attr_list) :
if _can_create_jumphost :
_msg = " Creating a \"Jump Host\" (" + _jhn + ") VM on "
_msg += " VMC " + vmc_name + ", connected to the networks \""
_msg += _netname + "\", and attaching a floating IP from pool \""
_msg += vm_defaults["floating_pool"] + "\"."
#cbdebug(_msg)
print(_msg)
if "jumphost_ip" in _obj_attr_list :
del _obj_attr_list["jumphost_ip"]
self.vmcreate(_obj_attr_list)
else :
_msg = "The jump_host address was set to \"$True\", meaning"
_msg += " that a \"cb_jumphost\" VM should be automatically"
_msg += " created. However, the \"cb_nullworkload\" is not"
_msg += " present on this cloud, and thus the \"cb_jumphost\""
_msg += " cannot be created."
cberr(_msg, True)
return False
_instance = self.get_instances(_obj_attr_list, "vm", _jhn)
self.get_ip_address(_obj_attr_list, _instance)
_msg = " A \"Jump Host\" (" + _jhn + ") VM was found, with the floating IP"
_msg += " address \"" + _obj_attr_list["prov_cloud_ip"] + "\""
_msg += " already assigned to it"
#cbdebug(_msg)
print(_msg)
vm_defaults["jumphost_ip"] = _obj_attr_list["prov_cloud_ip"]
else :
return True
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
cberr(_fmsg, True)
return False
except KeyboardInterrupt :
_status = 42
_fmsg = "CTRL-C interrupt"
cbdebug("VM create keyboard interrupt...", True)
return False
except Exception as e :
_status = 23
_fmsg = str(e)
cberr(_fmsg, True)
return False
return True
@trace
def add_host(self, obj_attr_list, host, start) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_function = ''
for _service in self.host_map[host]["services"] :
if _service.count("scheduler") or _service.count("api") or \
_service.count("server") or _service.count("dhcp") :
_function = "controller,"
break
if "nova-compute" in self.host_map[host]["services"] :
_function = "compute,"
_function = _function[0:-1]
# Host UUID is artificially generated
_host_uuid = str(uuid5(UUID('4f3f2898-69e3-5a0d-820a-c4e87987dbce'), \
obj_attr_list["cloud_name"] + str(host)))
obj_attr_list["host_list"][_host_uuid] = {}
obj_attr_list["hosts"] += _host_uuid + ','
_actual_host_name = host
if "modify_host_names" in obj_attr_list and \
str(obj_attr_list["modify_host_names"]).lower() != "false" :
_queried_host_name = _actual_host_name.split(".")[0] + '.' + obj_attr_list["modify_host_names"]
else :
_queried_host_name = _actual_host_name
obj_attr_list["host_list"][_host_uuid]["cloud_hostname"], \
obj_attr_list["host_list"][_host_uuid]["cloud_ip"] = hostname2ip(_queried_host_name, True)
obj_attr_list["host_list"][_host_uuid]["cloud_hostname"] = \
_actual_host_name
obj_attr_list["host_list"][_host_uuid].update(self.host_map[host])
obj_attr_list["host_list"][_host_uuid]["function"] = _function
obj_attr_list["host_list"][_host_uuid]["name"] = "host_" + obj_attr_list["host_list"][_host_uuid]["cloud_hostname"]
obj_attr_list["host_list"][_host_uuid]["pool"] = obj_attr_list["pool"]
obj_attr_list["host_list"][_host_uuid]["username"] = obj_attr_list["username"]
if str(obj_attr_list["host_user_root"]).lower() == "true" :
obj_attr_list["host_list"][_host_uuid]["login"] = "root"
else :
obj_attr_list["host_list"][_host_uuid]["login"] = obj_attr_list["host_list"][_host_uuid]["username"]
obj_attr_list["host_list"][_host_uuid]["notification"] = "False"
obj_attr_list["host_list"][_host_uuid]["model"] = obj_attr_list["model"]
obj_attr_list["host_list"][_host_uuid]["vmc_name"] = obj_attr_list["name"]
obj_attr_list["host_list"][_host_uuid]["vmc"] = obj_attr_list["uuid"]
obj_attr_list["host_list"][_host_uuid]["uuid"] = _host_uuid
obj_attr_list["host_list"][_host_uuid]["arrival"] = int(time())
obj_attr_list["host_list"][_host_uuid]["counter"] = obj_attr_list["counter"]
obj_attr_list["host_list"][_host_uuid]["simulated"] = False
obj_attr_list["host_list"][_host_uuid]["identity"] = obj_attr_list["identity"]
if "login" in obj_attr_list :
obj_attr_list["host_list"][_host_uuid]["login"] = obj_attr_list["login"]
else :
obj_attr_list["host_list"][_host_uuid]["login"] = "root"
obj_attr_list["host_list"][_host_uuid]["mgt_001_provisioning_request_originated"] = obj_attr_list["mgt_001_provisioning_request_originated"]
obj_attr_list["host_list"][_host_uuid]["mgt_002_provisioning_request_sent"] = obj_attr_list["mgt_002_provisioning_request_sent"]
_time_mark_prc = int(time())
obj_attr_list["host_list"][_host_uuid]["mgt_003_provisioning_request_completed"] = _time_mark_prc - start
_status = 0
except CldOpsException as obj :
_status = int(obj.status)
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("HOST", obj_attr_list, "discovered", _status, _fmsg)
return _status, _msg
@trace
def get_service_list(self, vmc_name, project) :
'''
TBD
'''
if project == "compute" :
return self.oskconncompute[vmc_name].services.list()
elif project == "volume" and self.use_cinderclient == "true" :
return self.oskconnstorage[vmc_name].services.list()
elif project == "network" :
return self.oskconnnetwork[vmc_name].list_agents()["agents"]
else :
return []
@trace
def get_service_host(self, service, project) :
'''
TBD
'''
if project == "compute" or project == "volume" :
_service_host = service.host.split('@')[0]
else :
_service_host = service["host"]
try :
_host, _ip = hostname2ip(_service_host)
return _host.split('.')[0]
except Exception as e :
_status = 23
_fmsg = str(e)
raise CldOpsException(_fmsg, _status)
@trace
def get_service_binary(self, service, project) :
'''
TBD
'''
if project == "compute" or project == "volume" :
return service.binary
else :
return service["binary"]
@trace
def build_host_map(self, vmc_name) :
'''
TBD
'''
try :
for _project in ["compute", "volume", "network"] :
for _service in self.get_service_list(vmc_name, _project) :
_host = self.get_service_host(_service, _project)
if _host not in self.host_map :
self.host_map[_host] = {}
self.host_map[_host]["services"] = []
self.host_map[_host]["extended_info"] = False
self.host_map[_host]["memory_size"] = "NA"
self.host_map[_host]["cores"] = "NA"
self.host_map[_host]["hypervisor_type"] = "NA"
_name = self.get_service_binary(_service, _project)
if _name not in self.host_map[_host]["services"] :
self.host_map[_host]["services"].append(_name)
for _entry in self.oskconncompute[vmc_name].hypervisors.list() :
_host = _entry.hypervisor_hostname.split('.')[0]
if _host not in self.host_map :
self.host_map[_host] = {}
self.host_map[_host]["services"] = []
self.host_map[_host]["extended_info"] = _entry._info
self.host_map[_host]["memory_size"] = _entry.memory_mb
self.host_map[_host]["cores"] = _entry.vcpus
self.host_map[_host]["hypervisor_type"] = _entry.hypervisor_type
return True
except Exception as e :
_status = 23
_fmsg = str(e)
raise CldOpsException(_fmsg, _status)
@trace
def get_flavors(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_flavor_list = self.oskconncompute[obj_attr_list["name"]].flavors.list()
_status = 168
_fmsg = "Please check if the defined flavor is present on this "
_fmsg += self.get_description()
_flavor = False
for _idx in range(0,len(_flavor_list)) :
if _flavor_list[_idx].name == obj_attr_list["size"] :
_flavor = _flavor_list[_idx]
_status = 0
break
obj_attr_list["flavor_instance"] = _flavor
obj_attr_list["flavor"] = _flavor.id
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Flavor (" + obj_attr_list["size"] + " ) not found: " + _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def get_mac_address(self, obj_attr_list, instance) :
'''
TBD
'''
try :
_virtual_interfaces = self.oskconncompute[obj_attr_list["name"]].virtual_interfaces.list(instance.id)
if _virtual_interfaces and len(_virtual_interfaces) :
obj_attr_list["cloud_mac"] = _virtual_interfaces[0].mac_address
except :
obj_attr_list["cloud_mac"] = "ERROR"
return True
@trace
def get_host_and_instance_name(self, obj_attr_list, fail = True) :
'''
TBD
'''
# There is a lot of extra information that can be obtained through
# the "_info" attribute. However, a new connection has to be
# established to access the most up-to-date data on this attribute
# Not sure how stable it will be with newer versions of the API.
_instance = self.is_vm_running(obj_attr_list, fail = fail)
if _instance :
obj_attr_list["instance_name"] = "unknown"
obj_attr_list["host_name"] = "unknown"
try :
obj_attr_list["instance_name"] = getattr(_instance, 'OS-EXT-SRV-ATTR:instance_name')
obj_attr_list["host_name"] = getattr(_instance, 'OS-EXT-SRV-ATTR:host')
except :
pass
# if "_info" in dir(_instance) :
# if "OS-EXT-SRV-ATTR:host" in _instance._info :
# obj_attr_list["host_name"] = _instance._info['OS-EXT-SRV-ATTR:host'].split('.')[0]
# else :
# obj_attr_list["host_name"] = "unknown"
# if "OS-EXT-SRV-ATTR:instance_name" in _instance._info :
# obj_attr_list["instance_name"] = _instance._info['OS-EXT-SRV-ATTR:instance_name']
# else :
# obj_attr_list["instance_name"] = "unknown"
# else :
# obj_attr_list["instance_name"] = "unknown"
# obj_attr_list["host_name"] = "unknown"
else :
obj_attr_list["instance_name"] = "unknown"
obj_attr_list["host_name"] = "unknown"
return True
@trace
def get_instance_deployment_time(self, obj_attr_list, fail = True) :
'''
TBD
'''
_instance = self.is_vm_running(obj_attr_list, fail)
_created = False
_launched = False
if _instance :
if "_info" in dir(_instance) :
if "created" in _instance._info :
_created = iso8601.parse_date(_instance._info["created"])
if "S-SRV-USG:launched_at" in _instance._info :
_launched = iso8601.parse_date(_instance._info["OS-SRV-USG:launched_at"])
if _created and _launched :
_mgt_003 = (_launched - _created).total_seconds()
obj_attr_list["comments"] += " Actual time spent waiting for instance"
obj_attr_list["comments"] += " to become active was "
obj_attr_list["comments"] += str(obj_attr_list["mgt_003_provisioning_request_completed"])
obj_attr_list["comments"] += ". "
obj_attr_list["mgt_003_provisioning_request_completed"] = int(_mgt_003)
return True
@trace
def floating_ip_allocate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_call = "NAfpc"
identifier = obj_attr_list["cloud_vm_name"]
if not self.oskconnnetwork :
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
{}, \
False, \
False, \
obj_attr_list["name"])
_fip = False
if not _fip :
_call = "floating ip create"
_mark_a = time()
_fip_h = self.oskconnnetwork[obj_attr_list["name"]].create_floatingip({"floatingip": {"floating_network_id": obj_attr_list["floating_pool_id"]}})
# _fip_h = self.oskconncompute.floating_ips.create(obj_attr_list["floating_pool"])
self.annotate_time_breakdown(obj_attr_list, "create_fip_time", _mark_a)
obj_attr_list["cloud_floating_ip_address"] = _fip_h["floatingip"]["floating_ip_address"]
obj_attr_list["cloud_floating_ip_uuid"] = _fip_h["floatingip"]["id"]
_fip = obj_attr_list["cloud_floating_ip_address"]
return _fip
except Exception as e :
_status = 23
_fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + str(e)
if identifier not in self.api_error_counter :
self.api_error_counter[identifier] = 0
self.api_error_counter[identifier] += 1
if self.api_error_counter[identifier] > 3 :
raise CldOpsException(_fmsg, _status)
else :
cbwarn(_fmsg)
return False
@trace
def floating_ip_delete(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_call = "NAfpd"
identifier = obj_attr_list["cloud_vm_name"]
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], \
{}, \
False, \
False, \
obj_attr_list["name"])
if "cloud_floating_ip_uuid" in obj_attr_list :
_call = "floating ip delete"
self.oskconnnetwork[obj_attr_list["name"]].delete_floatingip(obj_attr_list["cloud_floating_ip_uuid"])
return True
except Exception as e :
_status = 23
_fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + str(e)
if identifier not in self.api_error_counter :
self.api_error_counter[identifier] = 0
self.api_error_counter[identifier] += 1
if self.api_error_counter[identifier] > 3 :
raise CldOpsException(_fmsg, _status)
else :
cbwarn(_fmsg)
return False
@trace
def floating_ip_attach(self, obj_attr_list, _instance) :
'''
TBD
'''
try :
_call = "NAfpa"
identifier = obj_attr_list["cloud_vm_name"]
if str(obj_attr_list["use_floating_ip"]).lower() == "true" :
_msg = " Attempting to attach a floating IP to " + obj_attr_list["name"] + "..."
cbdebug(_msg, True)
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
_wait = int(obj_attr_list["update_frequency"])
obj_attr_list["last_known_state"] = "about to attach floating IP"
_vm_ready = False
while _curr_tries < _max_tries :
_vm_ready = self.is_vm_running(obj_attr_list)
if _vm_ready :
break
else :
_curr_tries += 1
sleep(_wait)
_call = "floating ip attach"
_mark_a = time()
if "hypervisor_type" in obj_attr_list and obj_attr_list["hypervisor_type"].lower() == "fake" :
True
else :
try :
update_info = {"port_id":_instance.interface_list()[0].id}
self.oskconnnetwork.update_floatingip(obj_attr_list["cloud_floating_ip_uuid"], {"floatingip": update_info})
except :
_instance.add_floating_ip(obj_attr_list["cloud_floating_ip_address"])
self.annotate_time_breakdown(obj_attr_list, "attach_fip_time", _mark_a)
return True
except novaclient.exceptions as obj:
_status = int(obj.error_code)
_fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + str(obj.error_message)
if identifier not in self.api_error_counter :
self.api_error_counter[identifier] = 0
self.api_error_counter[identifier] += 1
if self.api_error_counter[identifier] > self.max_api_errors :
raise CldOpsException(_fmsg, _status)
else :
cbwarn(_fmsg)
return False
except Exception as e :
_status = 23
_fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + str(e)
if identifier not in self.api_error_counter :
self.api_error_counter[identifier] = 0
self.api_error_counter[identifier] += 1
if self.api_error_counter[identifier] > 3 :
raise CldOpsException(_fmsg, _status)
else :
cbwarn(_fmsg)
return False
@trace
def instance_cleanup_on_failure(self, obj_attr_list) :
'''
TBD
'''
_vminstance = self.get_instances(obj_attr_list, "vm", \
obj_attr_list["cloud_vm_name"])
if _vminstance :
# Not the best way to solve this problem. Will improve later.
if not self.is_vm_running(obj_attr_list) :
if "fault" in dir(_vminstance) :
if "message" in _vminstance.fault :
obj_attr_list["instance_creation_failure_message"] += "\nINSTANCE ERROR MESSAGE:" + str(_vminstance.fault["message"]) + ".\n"
# Try and make a last attempt effort to get the hostname,
# even if the VM creation failed.
self.get_host_and_instance_name(obj_attr_list, fail = False)
if "host_name" in obj_attr_list :
obj_attr_list["instance_creation_failure_message"] += " (Host \"" + obj_attr_list["host_name"] + "\")"
_vminstance.delete()
sleep(20)
if "cloud_vv" in obj_attr_list :
self.vvdestroy(obj_attr_list)
if obj_attr_list["volume_creation_status"] :
obj_attr_list["instance_creation_failure_message"] += "VOLUME ERROR MESSAGE:" + obj_attr_list["volume_creation_failure_message"] + ".\n"
return 0, obj_attr_list["instance_creation_failure_message"]
@trace
def retriable_instance_delete(self, obj_attr_list, instance) :
'''
TBD
'''
try :
if "cloud_vm_name" in obj_attr_list :
identifier = obj_attr_list["cloud_vm_name"]
else :
identifier = instance.name
instance.delete()
return True
except Exception as e :
_status = 23
_fmsg = "(While removing instance(s) through API call \"delete\") " + str(obj.error_message)
if identifier not in self.api_error_counter :
self.api_error_counter[identifier] = 0
self.api_error_counter[identifier] += 1
if self.api_error_counter[identifier] > self.max_api_errors :
raise CldOpsException(_fmsg, _status)
else :
return False
```
#### File: lib/clouds/plm_cloud_ops.py
```python
import libxml2
import os
from time import time, sleep
from random import choice, randint
from hashlib import sha256
from libvirt import *
from lib.auxiliary.code_instrumentation import trace, cbdebug, cberr, cbwarn, cbinfo, cbcrit
from lib.auxiliary.data_ops import str2dic, dic2str, is_number, DataOpsException
from lib.remote.process_management import ProcessManagement
from lib.remote.network_functions import hostname2ip
from .shared_functions import CldOpsException, CommonCloudFunctions
class PlmCmds(CommonCloudFunctions) :
'''
TBD
'''
@trace
def __init__ (self, pid, osci, expid = None) :
'''
TBD
'''
CommonCloudFunctions.__init__(self, pid, osci)
self.pid = pid
self.osci = osci
self.ft_supported = False
self.lvirtconn = {}
self.expid = expid
self.api_error_counter = {}
self.max_api_errors = 10
self.additional_rc_contents = ''
self.vhw_config = {}
self.vhw_config["pico32"] = { "vcpus" : "1", "vmem" : "256", "vstorage" : "2048", "vnics" : "1" }
self.vhw_config["nano32"] = { "vcpus" : "1", "vmem" : "512", "vstorage" : "61440", "vnics" : "1" }
self.vhw_config["micro32"] = { "vcpus" : "1", "vmem" : "1024", "vstorage" : "61440", "vnics" : "1" }
self.vhw_config["copper32"] = { "vcpus" : "1", "vmem" : "2048", "vstorage" : "61440", "vnics" : "1" }
self.vhw_config["bronze32"] = { "vcpus" : "1", "vmem" : "2048", "vstorage" : "179200", "vnics" : "1" }
self.vhw_config["iron32"] = { "vcpus" : "2", "vmem" : "2048", "vstorage" : "179200", "vnics" : "1" }
self.vhw_config["silver32"] = { "vcpus" : "4", "vmem" : "2048", "vstorage" : "358400", "vnics" : "1" }
self.vhw_config["gold32"] = { "vcpus" : "8", "vmem" : "4096", "vstorage" : "358400", "vnics" : "1" }
self.vhw_config["copper64"] = { "vcpus" : "2", "vmem" : "4096", "vstorage" : "61440", "vnics" : "1" }
self.vhw_config["bronze64"] = { "vcpus" : "2", "vmem" : "4096", "vstorage" : "870400", "vnics" : "1" }
self.vhw_config["silver64"] = { "vcpus" : "4", "vmem" : "8192", "vstorage" : "1048576", "vnics" : "1" }
self.vhw_config["gold64"] = { "vcpus" : "8", "vmem" : "16384", "vstorage" : "1048576", "vnics" : "1" }
self.vhw_config["platinum64"] = { "vcpus" : "16", "vmem" : "16384", "vstorage" : "2097152", "vnics" : "1" }
@trace
def get_description(self) :
'''
TBD
'''
return "Parallel Libvirt Manager Cloud"
@trace
def connect(self, access, credentials, vmc_name, extra_parms = {}, diag = False, generate_rc = False) :
'''
TBD
'''
try :
_status = 100
_endpoint_ip = "NA"
_fmsg = "An error has occurred, but no error message was captured"
for _endpoint in access.split(',') :
_endpoint, _endpoint_name, _endpoint_ip= self.parse_endpoint(_endpoint, "qemu+tcp", False)
if _endpoint_ip not in self.lvirtconn :
self.lvirtconn[_endpoint_ip] = open(_endpoint + "/system")
self.lvirtconn[_endpoint_ip].getSysinfo()
_status -= 100
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = self.get_description() + " connection to endpoint \"" + _endpoint_ip + "\" failed: " + _fmsg
cberr(_msg)
raise CldOpsException(_msg, _status)
else :
_msg = self.get_description() + " connection successful."
cbdebug(_msg)
return _status, _msg, ''
@trace
def test_vmc_connection(self, cloud_name, vmc_name, access, credentials, key_name, \
security_group_name, vm_templates, vm_defaults, vmc_defaults) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(access, credentials, vmc_name, vm_defaults, True, True)
self.generate_rc(cloud_name, vmc_defaults, self.additional_rc_contents)
_prov_netname_found, _run_netname_found = self.check_networks(vmc_name, vm_defaults)
_key_pair_found = self.check_ssh_key(vmc_name, self.determine_key_name(vm_defaults), vm_defaults)
_detected_imageids = self.check_images(vmc_name, vm_templates, vmc_defaults['poolname'], vm_defaults)
if not (_run_netname_found and _prov_netname_found and _key_pair_found) :
_msg = "Check the previous errors, fix it (using lxc CLI)"
_status = 1178
raise CldOpsException(_msg, _status)
if len(_detected_imageids) :
_status = 0
else :
_status = 1
except CldOpsException as obj :
_fmsg = str(obj.msg)
_status = 2
except Exception as msg :
_fmsg = str(msg)
_status = 23
finally :
_status, _msg = self.common_messages("VMC", {"name" : vmc_name }, "connected", _status, _fmsg)
return _status, _msg
@trace
def check_networks(self, vmc_name, vm_defaults) :
'''
TBD
'''
_prov_netname = vm_defaults["netname"]
_run_netname = vm_defaults["netname"]
_net_str = "network \"" + _prov_netname + "\""
_prov_netname_found = False
_run_netname_found = False
for _endpoint in list(self.lvirtconn.keys()) :
_msg = "Checking if the " + _net_str + " can be "
_msg += "found on VMC " + vmc_name + " (endpoint " + _endpoint + ")..."
cbdebug(_msg, True)
for _network in self.lvirtconn[_endpoint].listNetworks() :
if _network == _prov_netname :
_prov_netname_found = True
if _network == _run_netname :
_run_netname_found = True
if not _prov_netname_found :
_msg = "ERROR! Please make sure that the provisioning network " + _prov_netname + " can be found"
_msg += " VMC " + vmc_name + " (endpoint " + _endpoint + ")..."
_fmsg = _msg
cberr(_msg, True)
if not _prov_netname_found :
_msg = "ERROR! Please make sure that the running network " + _run_netname + " can be found"
_msg += " VMC " + vmc_name + " (endpoint " + _endpoint + ")..."
_fmsg = _msg
cberr(_msg, True)
return _prov_netname_found, _run_netname_found
@trace
def check_images(self, vmc_name, vm_templates, poolname, vm_defaults) :
'''
TBD
'''
for _endpoint in list(self.lvirtconn.keys()) :
self.common_messages("IMG", { "name": vmc_name, "endpoint" : _endpoint }, "checking", 0, '')
_map_name_to_id = {}
_map_id_to_name = {}
_storage_pool_handle = self.lvirtconn[_endpoint].storagePoolLookupByName(poolname)
_registered_image_list = _storage_pool_handle.listVolumes()
_registered_imageid_list = []
for _registered_image in _registered_image_list :
_image_uuid = self.generate_random_uuid(_registered_image)
_registered_imageid_list.append(_image_uuid)
_map_name_to_id[_registered_image] = _image_uuid
for _vm_role in list(vm_templates.keys()) :
_imageid = str2dic(vm_templates[_vm_role])["imageid1"]
if _imageid != "to_replace" :
if _imageid in _map_name_to_id and _map_name_to_id[_imageid] != _imageid :
vm_templates[_vm_role] = vm_templates[_vm_role].replace(_imageid, _map_name_to_id[_imageid])
else :
_map_name_to_id[_imageid] = _imageid
vm_templates[_vm_role] = vm_templates[_vm_role].replace(_imageid, _map_name_to_id[_imageid])
_map_id_to_name[_map_name_to_id[_imageid]] = _imageid
_detected_imageids = self.base_check_images(vmc_name, vm_templates, _registered_imageid_list, _map_id_to_name, vm_defaults)
if not _detected_imageids :
return _detected_imageids
return _detected_imageids
@trace
def discover_hosts(self, obj_attr_list, start) :
'''
TBD
'''
_host_uuid = obj_attr_list["cloud_vm_uuid"]
obj_attr_list["host_list"] = {}
obj_attr_list["hosts"] = ''
for _endpoint in self.lvirtconn :
_host_info = self.lvirtconn[_endpoint].getInfo()
_host_extended_info = self.lvirtconn[_endpoint].getSysinfo()
for _line in _host_extended_info.split('\n') :
if _line.count("uuid") :
_host_uuid = _line.split('>')[1].split('<')[0]
obj_attr_list["hosts"] += _host_uuid + ','
obj_attr_list["host_list"][_host_uuid] = {}
obj_attr_list["host_list"][_host_uuid]["pool"] = obj_attr_list["pool"].upper()
obj_attr_list["host_list"][_host_uuid]["username"] = obj_attr_list["username"]
obj_attr_list["host_list"][_host_uuid]["notification"] = "False"
obj_attr_list["host_list"][_host_uuid]["cloud_hostname"], \
obj_attr_list["host_list"][_host_uuid]["cloud_ip"] = hostname2ip(_endpoint, True)
obj_attr_list["host_list"][_host_uuid]["name"] = "host_" + obj_attr_list["host_list"][_host_uuid]["cloud_hostname"]
obj_attr_list["host_list"][_host_uuid]["vmc_name"] = obj_attr_list["name"]
obj_attr_list["host_list"][_host_uuid]["vmc"] = obj_attr_list["uuid"]
obj_attr_list["host_list"][_host_uuid]["cloud_vm_uuid"] = _host_uuid
obj_attr_list["host_list"][_host_uuid]["uuid"] = _host_uuid
obj_attr_list["host_list"][_host_uuid]["model"] = obj_attr_list["model"]
obj_attr_list["host_list"][_host_uuid]["function"] = "hypervisor"
obj_attr_list["host_list"][_host_uuid]["cores"] = _host_info[2]
obj_attr_list["host_list"][_host_uuid]["memory"] = _host_info[1]
obj_attr_list["host_list"][_host_uuid]["cloud_ip"] = _endpoint
obj_attr_list["host_list"][_host_uuid]["arrival"] = int(time())
obj_attr_list["host_list"][_host_uuid]["simulated"] = False
obj_attr_list["host_list"][_host_uuid]["identity"] = obj_attr_list["identity"]
obj_attr_list["host_list"][_host_uuid]["hypervisor_type"] = "kvm"
if "login" in obj_attr_list :
obj_attr_list["host_list"][_host_uuid]["login"] = obj_attr_list["login"]
else :
obj_attr_list["host_list"][_host_uuid]["login"] = "root"
obj_attr_list["host_list"][_host_uuid]["counter"] = obj_attr_list["counter"]
obj_attr_list["host_list"][_host_uuid]["mgt_001_provisioning_request_originated"] = obj_attr_list["mgt_001_provisioning_request_originated"]
obj_attr_list["host_list"][_host_uuid]["mgt_002_provisioning_request_sent"] = obj_attr_list["mgt_002_provisioning_request_sent"]
_time_mark_prc = int(time())
obj_attr_list["host_list"][_host_uuid]["mgt_003_provisioning_request_completed"] = _time_mark_prc - start
obj_attr_list["hosts"] = obj_attr_list["hosts"][:-1]
self.additional_host_discovery(obj_attr_list)
return True
@trace
def vmccleanup(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
_wait = int(obj_attr_list["update_frequency"])
sleep(_wait)
self.common_messages("VMC", obj_attr_list, "cleaning up vms", 0, '')
_running_instances = True
while _running_instances and _curr_tries < _max_tries :
_running_instances = False
for _endpoint in self.lvirtconn :
_proc_man = ProcessManagement(username = "root", \
hostname = _endpoint, \
cloud_name = obj_attr_list["cloud_name"])
_cmd = "sudo pkill -9 -f 'rinetd -c /tmp/cb'; sudo rm -rf /tmp/cb-*.rinetd.conf"
_status, _result_stdout, _fmsg = _proc_man.run_os_command(_cmd, raise_exception=False)
_domain_list = self.lvirtconn[_endpoint].listAllDomains()
for _domain in _domain_list :
if _domain.name().count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"]) :
_running_instances = True
_msg = "Terminating instance: "
_msg += _domain.UUIDString() + " (" + str(_domain.name()) + ")"
cbdebug(_msg, True)
if _domain.state()[0] == VIR_DOMAIN_RUNNING :
_domain.destroy()
_domain.undefine()
sleep(_wait)
_curr_tries += 1
self.common_messages("VMC", obj_attr_list, "cleaning up vvs", 0, '')
_curr_tries = 0
_created_volumes = True
while _created_volumes and _curr_tries < _max_tries :
_created_volumes = False
_storage_pool_list = [ obj_attr_list["poolname"] ]
for _storage_pool in _storage_pool_list :
_storage_pool_handle = self.lvirtconn[_endpoint].storagePoolLookupByName(_storage_pool)
_volume_list = _storage_pool_handle.listVolumes()
for _volume in _volume_list :
if _volume.count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"]) :
_created_volumes = True
_msg = "Removing volume : "
_msg += self.generate_random_uuid(_volume) + " (" + str(_volume) + ")"
cbdebug(_msg, True)
_storage_pool_handle.storageVolLookupByName(_volume).delete(0)
sleep(_wait)
_curr_tries += 1
if _curr_tries > _max_tries :
_status = 1077
_fmsg = "Some instances on VMC \"" + obj_attr_list["name"] + "\""
_fmsg += " could not be removed because they never became active"
_fmsg += ". They will have to be removed manually."
cberr(_msg, True)
else :
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VMC", obj_attr_list, "cleaned up", _status, _fmsg)
return _status, _msg
@trace
def vmcregister(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_time_mark_prs = int(time())
obj_attr_list["mgt_002_provisioning_request_sent"] = \
_time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"])
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["name"], obj_attr_list)
if "cleanup_on_attach" in obj_attr_list and obj_attr_list["cleanup_on_attach"] == "True" :
_status, _fmsg = self.vmccleanup(obj_attr_list)
else :
_status = 0
obj_attr_list["cloud_hostname"], obj_attr_list["cloud_ip"] = hostname2ip(obj_attr_list["name"], False)
_fmsg = "VMC " + obj_attr_list["uuid"] + " could not be registered "
_fmsg += " on " + self.get_description() + " \"" + obj_attr_list["cloud_name"] + "\"."
obj_attr_list["cloud_vm_uuid"] = self.generate_random_uuid(obj_attr_list["name"])
obj_attr_list["arrival"] = int(time())
if obj_attr_list["discover_hosts"].lower() == "true" :
self.discover_hosts(obj_attr_list, _time_mark_prs)
else :
obj_attr_list["hosts"] = ''
obj_attr_list["host_list"] = {}
obj_attr_list["host_count"] = "NA"
_time_mark_prc = int(time())
obj_attr_list["mgt_003_provisioning_request_completed"] = \
_time_mark_prc - _time_mark_prs
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VMC", obj_attr_list, "registered", _status, _fmsg)
return _status, _msg
@trace
def vmcunregister(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_time_mark_drs = int(time())
if "mgt_901_deprovisioning_request_originated" not in obj_attr_list :
obj_attr_list["mgt_901_deprovisioning_request_originated"] = _time_mark_drs
obj_attr_list["mgt_902_deprovisioning_request_sent"] = _time_mark_drs - int(obj_attr_list["mgt_901_deprovisioning_request_originated"])
if "cleanup_on_detach" in obj_attr_list and obj_attr_list["cleanup_on_detach"] == "True" :
_status, _fmsg = self.vmccleanup(obj_attr_list)
_time_mark_prc = int(time())
obj_attr_list["mgt_903_deprovisioning_request_completed"] = _time_mark_prc - _time_mark_drs
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VMC", obj_attr_list, "unregistered", _status, _fmsg)
return _status, _msg
@trace
def vmcount(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_nr_instances = 0
sleep(15)
for _vmc_uuid in self.osci.get_object_list(obj_attr_list["cloud_name"], "VMC") :
_vmc_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], \
"VMC", False, _vmc_uuid, \
False)
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
_vmc_attr_list["name"], obj_attr_list)
for _endpoint in self.lvirtconn :
_domain_list = self.lvirtconn[_endpoint].listAllDomains()
for _domain in _domain_list :
if _domain.name().count("cb-" + obj_attr_list["username"] + '-' + obj_attr_list["cloud_name"]) :
_nr_instances += 1
except Exception as e :
_status = 23
_nr_instances = "NA"
_fmsg = str(e)
finally :
return _nr_instances
@trace
def get_ssh_keys(self, vmc_name, key_name, key_contents, key_fingerprint, registered_key_pairs, internal, connection) :
'''
TBD
'''
registered_key_pairs[key_name] = key_fingerprint + "-NA"
return True
@trace
def get_security_groups(self, vmc_name, security_group_name, registered_security_groups) :
'''
TBD
'''
registered_security_groups.append(security_group_name)
return True
@trace
def get_ip_address(self, obj_attr_list) :
'''
TBD
'''
_network_handle = self.lvirtconn[obj_attr_list["host_cloud_ip"]].networkLookupByName(obj_attr_list["run_netname"])
for _item in _network_handle.DHCPLeases() :
if _item["mac"] == obj_attr_list["cloud_vm_mac"] :
obj_attr_list["run_cloud_ip"] = _item["ipaddr"]
if str(obj_attr_list["ports_base"]).lower() != "false" :
obj_attr_list["prov_cloud_ip"] = obj_attr_list["host_cloud_ip"]
else :
obj_attr_list["prov_cloud_ip"] = _item["ipaddr"]
obj_attr_list["cloud_ip"] = obj_attr_list["run_cloud_ip"]
return True
return False
@trace
def get_instances(self, obj_attr_list, obj_type = "vm", endpoints = "all", identifier = "all") :
'''
TBD
'''
_instances = []
_fmsg = "Error while getting instances"
_call = "NA"
if endpoints == "all" :
_endpoints = list(self.lvirtconn.keys())
else :
_endpoints = [endpoints]
try :
for _endpoint in _endpoints :
if identifier == "all" :
_call = "listAllDomains()"
_instances = self.lvirtconn[_endpoint].listAllDomains()
else :
_call = "lookupByName()"
_instances = self.lvirtconn[_endpoint].lookupByName(identifier)
_status = 0
except CldOpsException as obj :
_status = obj.status
_xfmsg = str(obj.msg)
except libvirtError as msg :
_status = 18127
_xfmsg = str(msg)
except Exception as e :
_status = 23
_xfmsg = str(e)
finally :
if _status :
_fmsg = "(While getting instance(s) through API call \"" + _call + "\") " + _xfmsg
if identifier not in self.api_error_counter :
self.api_error_counter[identifier] = 0
self.api_error_counter[identifier] += 1
if self.api_error_counter[identifier] > self.max_api_errors :
raise CldOpsException(_fmsg, _status)
else :
cbwarn(_fmsg)
return []
else :
return _instances
@trace
def get_images(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_hyper = ''
_fmsg = "An error has occurred, but no error message was captured"
_storage_pool_handle = self.lvirtconn[obj_attr_list["host_cloud_ip"]].storagePoolLookupByName(obj_attr_list["poolname"])
_xml_contents = _storage_pool_handle.XMLDesc(0)
_xml_doc = libxml2.parseDoc(_xml_contents)
_xml_ctx = _xml_doc.xpathNewContext()
_path_list = _xml_ctx.xpathEval("/pool/target/path")
if _path_list :
obj_attr_list["pool_path"] = _path_list[0].content
_image_list = _storage_pool_handle.listVolumes()
_fmsg = "Please check if the defined image name is present on this "
_fmsg += self.get_description()
_candidate_images = []
for _image in _image_list :
if self.is_cloud_image_uuid(obj_attr_list["imageid1"]) :
if self.generate_random_uuid(_image) == obj_attr_list["imageid1"] :
_candidate_images.append(_image)
else :
if _image == obj_attr_list["imageid1"] :
_candidate_images.append(_image)
if len(_candidate_images) :
obj_attr_list["imageid1"] = _candidate_images[0]
obj_attr_list["boot_volume_imageid1"] = self.generate_random_uuid(_candidate_images[0])
_volume_data = _storage_pool_handle.storageVolLookupByName(_candidate_images[0])
obj_attr_list["boot_volume_snapshot_path"] = _volume_data.path()
obj_attr_list["boot_volume_snapshot_size"] = int(_storage_pool_handle.storageVolLookupByName(obj_attr_list["imageid1"]).info()[1])/(1024*1024)
_xml_contents = _volume_data.XMLDesc(0)
_xml_doc = libxml2.parseDoc(_xml_contents)
_xml_ctx = _xml_doc.xpathNewContext()
_volume_format = _xml_ctx.xpathEval("/volume/target/format/@type")
if _volume_format :
obj_attr_list["boot_volume_format"] = _volume_format[0].content
_status = 0
else :
_fmsg = "Unable to locate image \"" + obj_attr_list["imageid1"] + "\""
_fmsg += " on " + self.get_description()
_status = 1927
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Image Name (" + obj_attr_list["imageid1"] + ") not found: " + _fmsg
cberr(_msg)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def get_networks(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_network_handle = self.lvirtconn[obj_attr_list["host_cloud_ip"]].networkLookupByName(obj_attr_list["netname"])
obj_attr_list["network_bridge_name"] = _network_handle.bridgeName()
obj_attr_list["extra_vnics"] = []
if str(obj_attr_list["extra_netnames"]).lower() != "false" :
for _exn in obj_attr_list["extra_netnames"].split(',') :
_network_handle = self.lvirtconn[obj_attr_list["host_cloud_ip"]].networkLookupByName(_exn)
_exbn = _network_handle.bridgeName()
obj_attr_list["extra_vnics"].append([_exn, _exbn])
_status = 0
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Network (" + obj_attr_list["prov_netname"] + " ) not found: " + _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
return True
@trace
def create_ssh_key(self, vmc_name, key_name, key_type, key_contents, key_fingerprint, vm_defaults, connection) :
'''
TBD
'''
return True
@trace
def is_cloud_image_uuid(self, imageid) :
'''
TBD
'''
if len(imageid) == 36 and imageid.count('-') == 4 :
return True
return False
@trace
def is_vm_running(self, obj_attr_list):
'''
TBD
'''
try :
if "host_cloud_ip" in obj_attr_list :
_host_ip = obj_attr_list["host_cloud_ip"]
else :
_host_ip = "all"
_instance = self.get_instances(obj_attr_list, "vm", _host_ip, obj_attr_list["cloud_vm_name"])
if _instance :
_instance_state = _instance.state()[0]
else :
_instance_state = "non-existent"
if _instance_state == VIR_DOMAIN_RUNNING :
return True
else :
return False
except Exception as e :
_status = 23
_fmsg = str(e)
raise CldOpsException(_fmsg, _status)
def is_vm_ready(self, obj_attr_list) :
'''
TBD
'''
if self.is_vm_running(obj_attr_list) :
if self.get_ip_address(obj_attr_list) :
obj_attr_list["last_known_state"] = "running with ip assigned"
return True
else :
obj_attr_list["last_known_state"] = "running with ip unassigned"
return False
else :
obj_attr_list["last_known_state"] = "not running"
return False
def vm_placement(self, obj_attr_list) :
'''
TBD
'''
obj_attr_list["host_name"], obj_attr_list["host_cloud_ip"] = hostname2ip(choice(list(self.lvirtconn.keys())), True)
return True
def vvcreate(self, obj_attr_list, boot = False) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
if not boot and "cloud_vv" not in obj_attr_list :
obj_attr_list["cloud_vv_uuid"] = "none"
else :
_xml_file = self.generate_libvirt_vv_template(obj_attr_list, boot)
obj_attr_list["last_known_state"] = "about to send volume create request"
if not boot :
obj_attr_list["cloud_vv_uuid"] = self.generate_random_uuid(obj_attr_list["cloud_vm_name"])
else :
obj_attr_list["boot_from_volume"] = "true"
self.common_messages("VV", obj_attr_list, "creating", _status, _fmsg)
_storage_pool_handle = self.lvirtconn[obj_attr_list["host_cloud_ip"]].storagePoolLookupByName(obj_attr_list["poolname"])
_storage_pool_handle.createXML(_xml_file, 0)
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VV", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vvdestroy(self, obj_attr_list, boot = False) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_storage_pool_handle = self.lvirtconn[obj_attr_list["host_cloud_ip"]].storagePoolLookupByName(obj_attr_list["poolname"])
for _volume in obj_attr_list["volume_list"].split(',') :
if _volume.count(":") == 4 :
_vol_name, _vol_path, _vol_format, _backing_path, _backing_format = _volume.strip().split(':')
_storage_pool_handle.storageVolLookupByName(_vol_name).delete(0)
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VV", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcreate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.determine_instance_name(obj_attr_list)
self.determine_key_name(obj_attr_list)
self.take_action_if_requested("VM", obj_attr_list, "provision_originated")
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list)
if self.is_vm_running(obj_attr_list) :
_msg = "An instance named \"" + obj_attr_list["cloud_vm_name"]
_msg += " is already running. It needs to be destroyed first."
_status = 187
cberr(_msg)
raise CldOpsException(_msg, _status)
if str(obj_attr_list["ports_base"]).lower() != "false" :
obj_attr_list["prov_cloud_port"] = str(int(obj_attr_list["ports_base"]) + int(obj_attr_list["name"].replace("vm_",'')))
if obj_attr_list["check_boot_complete"] == "tcp_on_22":
obj_attr_list["check_boot_complete"] = "tcp_on_" + str(obj_attr_list["prov_cloud_port"])
_time_mark_prs = int(time())
obj_attr_list["mgt_002_provisioning_request_sent"] = _time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"])
self.vm_placement(obj_attr_list)
obj_attr_list["last_known_state"] = "about to send create request"
_mark_a = time()
self.get_images(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "get_image_time", _mark_a)
_mark_a = time()
self.get_networks(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "get_network_time", _mark_a)
_mark_a = time()
self.vvcreate(obj_attr_list, True)
self.annotate_time_breakdown(obj_attr_list, "get_create_boot_volume_time", _mark_a)
_mark_a = time()
self.vvcreate(obj_attr_list, False)
self.annotate_time_breakdown(obj_attr_list, "get_create_boot_volume_time", _mark_a)
obj_attr_list["config_drive"] = True
self.common_messages("VM", obj_attr_list, "creating", 0, '')
self.pre_vmcreate_process(obj_attr_list)
_mark_a = time()
self.generate_mac_addr(obj_attr_list)
self.ship_cloud_init_iso(obj_attr_list)
_xml_file = self.generate_libvirt_vm_template(obj_attr_list)
self.annotate_time_breakdown(obj_attr_list, "ship_cloudinit_iso_time", _mark_a)
_mark_a = time()
_domain = self.lvirtconn[obj_attr_list["host_cloud_ip"]].defineXML(_xml_file)
_domain.create()
self.annotate_time_breakdown(obj_attr_list, "domain_creation_time", _mark_a)
obj_attr_list["cloud_vm_uuid"] = self.generate_random_uuid(obj_attr_list["cloud_vm_name"])
self.take_action_if_requested("VM", obj_attr_list, "provision_started")
_time_mark_prc = self.wait_for_instance_ready(obj_attr_list, _time_mark_prs)
obj_attr_list["pcm_005_instance_creation_time"] = obj_attr_list["mgt_003_provisioning_request_completed"]
_mark_a = time()
if str(obj_attr_list["ports_base"]).lower() != "false" :
self.configure_port_mapping(obj_attr_list, "setup")
self.annotate_time_breakdown(obj_attr_list, "domain_port_mapping_time", _mark_a)
if str(obj_attr_list["ports_base"]).lower() != "false" :
if obj_attr_list["check_boot_complete"].lower() == "tcp_on_22" :
obj_attr_list["check_boot_complete"] = "tcp_on_" + str(obj_attr_list["prov_cloud_port"])
self.wait_for_instance_boot(obj_attr_list, _time_mark_prc)
obj_attr_list["arrival"] = int(time())
_status = 0
if obj_attr_list["force_failure"].lower() == "true" :
_fmsg = "Forced failure (option FORCE_FAILURE set \"true\")"
_status = 916
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except KeyboardInterrupt :
_status = 42
_fmsg = "CTRL-C interrupt"
cbdebug("VM create keyboard interrupt...", True)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VM", obj_attr_list, "created", _status, _fmsg)
return _status, _msg
@trace
def vmdestroy(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_time_mark_drs = int(time())
if "mgt_901_deprovisioning_request_originated" not in obj_attr_list :
obj_attr_list["mgt_901_deprovisioning_request_originated"] = _time_mark_drs
obj_attr_list["mgt_902_deprovisioning_request_sent"] = \
_time_mark_drs - int(obj_attr_list["mgt_901_deprovisioning_request_originated"])
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list)
_wait = int(obj_attr_list["update_frequency"])
_max_tries = int(obj_attr_list["update_attempts"])
_curr_tries = 0
if "host_cloud_ip" in obj_attr_list :
_host_ip = obj_attr_list["host_cloud_ip"]
_instance = self.get_instances(obj_attr_list, "vm", _host_ip, \
obj_attr_list["cloud_vm_name"])
if _instance :
self.common_messages("VM", obj_attr_list, "destroying", 0, '')
while _instance and _curr_tries < _max_tries :
_instance = self.get_instances(obj_attr_list, "vm", _host_ip, \
obj_attr_list["cloud_vm_name"])
if _instance :
if _instance.state()[0] == VIR_DOMAIN_RUNNING :
_instance.destroy()
_instance.undefine()
sleep(_wait)
_curr_tries += 1
if str(obj_attr_list["ports_base"]).lower() != "false" :
self.configure_port_mapping(obj_attr_list, "teardown")
_time_mark_drc = int(time())
obj_attr_list["mgt_903_deprovisioning_request_completed"] = \
_time_mark_drc - _time_mark_drs
self.take_action_if_requested("VM", obj_attr_list, "deprovision_finished")
self.vvdestroy(obj_attr_list)
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VM", obj_attr_list, "destroyed", _status, _fmsg)
return _status, _msg
@trace
def vmcapture(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list)
_wait = int(obj_attr_list["update_frequency"])
_host_ip = obj_attr_list["host_cloud_ip"]
_instance = self.get_instances(obj_attr_list, "vm", _host_ip, obj_attr_list["cloud_vm_name"])
if _instance :
_time_mark_crs = int(time())
# Just in case the instance does not exist, make crc = crs
_time_mark_crc = _time_mark_crs
obj_attr_list["mgt_102_capture_request_sent"] = _time_mark_crs - obj_attr_list["mgt_101_capture_request_originated"]
if obj_attr_list["captured_image_name"] == "auto" :
obj_attr_list["captured_image_name"] = obj_attr_list["imageid1"] + "_captured_at_"
obj_attr_list["captured_image_name"] += str(obj_attr_list["mgt_101_capture_request_originated"])
self.common_messages("VM", obj_attr_list, "capturing", 0, '')
_instance.destroy()
_vol_path = obj_attr_list["volume_list"].split(',')[0].split(':')[1]
_storage_pool_handle = self.lvirtconn[obj_attr_list["host_cloud_ip"]].storagePoolLookupByName(obj_attr_list["poolname"])
_volume_handle = self.lvirtconn[obj_attr_list["host_cloud_ip"]].storageVolLookupByPath(_vol_path)
_xml_file = ""
_xml_file += "\t<volume>\n"
_xml_file += "\t<capacity unit=\"M\">" + str(int(self.vhw_config[obj_attr_list["size"]]["vstorage"])) + "</capacity>\n"
_xml_file += "\t<name>" + obj_attr_list["captured_image_name"] + "</name>\n"
_xml_file += "\t<target>\n"
_xml_file += "\t\t<permissions>\n"
_xml_file += "\t\t\t<mode>0777</mode>\n"
_xml_file += "\t\t</permissions>\n"
_xml_file += "\t\t<path>" + obj_attr_list["pool_path"] + "</path>\n"
_xml_file += "\t\t<format type='" + "qcow2" + "'/>\n"
_xml_file += "\t</target>\n"
_xml_file += "\t</volume>\n"
_storage_pool_handle.createXMLFrom(_xml_file, _volume_handle, 0)
obj_attr_list["cloud_image_uuid"] = self.generate_random_uuid(obj_attr_list["captured_image_name"])
obj_attr_list["mgt_103_capture_request_completed"] = _time_mark_crc - _time_mark_crs
if "mgt_103_capture_request_completed" not in obj_attr_list :
obj_attr_list["mgt_999_capture_request_failed"] = int(time()) - _time_mark_crs
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VM", obj_attr_list, "captured", _status, _fmsg)
return _status, _msg
def vmrunstate(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_ts = obj_attr_list["target_state"]
_cs = obj_attr_list["current_state"]
self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list)
_wait = int(obj_attr_list["update_frequency"])
_curr_tries = 0
_max_tries = int(obj_attr_list["update_attempts"])
if "mgt_201_runstate_request_originated" in obj_attr_list :
_time_mark_rrs = int(time())
obj_attr_list["mgt_202_runstate_request_sent"] = \
_time_mark_rrs - obj_attr_list["mgt_201_runstate_request_originated"]
self.common_messages("VM", obj_attr_list, "runstate altering", 0, '')
_host_ip = obj_attr_list["host_cloud_ip"]
_instance = self.get_instances(obj_attr_list, "vm", _host_ip, obj_attr_list["cloud_vm_name"])
if _instance :
if _ts == "fail" :
_instance.stop()
elif _ts == "save" :
_instance.save()
elif (_ts == "attached" or _ts == "resume") and _cs == "fail" :
_instance.start()
elif (_ts == "attached" or _ts == "restore") and _cs == "save" :
_instance.restore()
_time_mark_rrc = int(time())
obj_attr_list["mgt_203_runstate_request_completed"] = _time_mark_rrc - _time_mark_rrs
_status = 0
except CldOpsException as obj :
_status = obj.status
_fmsg = str(obj.msg)
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("VM", obj_attr_list, "runstate altered", _status, _fmsg)
return _status, _msg
@trace
def vmmigrate(self, obj_attr_list) :
'''
TBD
'''
return 0, "NOT SUPPORTED"
@trace
def vmresize(self, obj_attr_list) :
'''
TBD
'''
return 0, "NOT SUPPORTED"
@trace
def imgdelete(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_hyper = ''
_fmsg = "An error has occurred, but no error message was captured"
self.common_messages("IMG", obj_attr_list, "deleting", 0, '')
self.connect(obj_attr_list["access"], \
obj_attr_list["credentials"], \
obj_attr_list["vmc_name"], obj_attr_list)
for _endpoint in self.lvirtconn :
_storage_pool_handle = self.lvirtconn[_endpoint].storagePoolLookupByName(obj_attr_list["poolname"])
_image_list = _storage_pool_handle.listVolumes()
for _image in _image_list :
if self.is_cloud_image_uuid(obj_attr_list["imageid1"]) :
if self.generate_random_uuid(_image) == self.generate_random_uuid(obj_attr_list["imageid1"]) :
_storage_pool_handle.storageVolLookupByName(_image).delete(0)
break
else :
if _image == obj_attr_list["imageid1"] :
_storage_pool_handle.storageVolLookupByName(_image).delete(0)
break
_status = 0
except libvirtError as msg :
_status = 18127
_fmsg = str(msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
_status, _msg = self.common_messages("IMG", obj_attr_list, "deleted", _status, _fmsg)
return _status, _msg
def configure_port_mapping(self, obj_attr_list, operation) :
'''
TBD
'''
_status = 189
_fmsg = "About to configure port mapping"
# LXD does not provide an automated method to expose specific ports
# directly through the host's IP, like Docker does. For now, will
# resort to ssh into the host and start a new "rinetd" instance each
# time a new vmattach is issued.
try :
_proc_man = ProcessManagement(username = "root", \
hostname = obj_attr_list["host_cloud_ip"], \
cloud_name = obj_attr_list["cloud_name"])
if operation == "setup" :
_cmd = "echo \"0.0.0.0 " + obj_attr_list["prov_cloud_port"] + ' '
_cmd += obj_attr_list["cloud_ip"] + " 22\" > /tmp/"
_cmd += obj_attr_list["cloud_vm_name"] + ".rinetd.conf; rinetd -c "
_cmd += "/tmp/" + obj_attr_list["cloud_vm_name"] + ".rinetd.conf"
_rexcpt = True
else:
_cmd = "sudo pkill -9 -f 'rinetd -c /tmp/" + obj_attr_list["cloud_vm_name"]
_cmd += ".rinetd.conf" + "'; sudo rm -rf /tmp/"
_cmd += obj_attr_list["cloud_vm_name"] + ".rinetd.conf"
_rexcpt = False
_msg = operation.capitalize() + " port mapping (" + obj_attr_list["prov_cloud_port"]
_msg += " -> 22) for " + obj_attr_list["name"]
_msg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_msg += "running on libvirt host \"" + obj_attr_list["host_name"] + "\""
cbdebug(_msg, True)
_status, _result_stdout, _fmsg = _proc_man.run_os_command(_cmd, raise_exception = _rexcpt)
_status = 0
except ProcessManagement.ProcessManagementException as obj:
_status = obj.status
_fmsg = str(obj.msg)
except Exception as e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "Error while attempting to " + operation + " port mapping for " + obj_attr_list["name"]
_msg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_msg += "running on LXD host \"" + obj_attr_list["host_name"] + "\""
_msg += " in " + self.get_description() + " \"" + obj_attr_list["cloud_name"] + "\" : "
_msg += _fmsg
cberr(_msg, True)
raise CldOpsException(_msg, _status)
else :
_msg = "Successfully " + operation + " port mapping for " + obj_attr_list["name"]
_msg += " (cloud-assigned uuid " + obj_attr_list["cloud_vm_uuid"] + ") "
_msg += "running on LXD host \"" + obj_attr_list["host_name"] + "\""
_msg += " in " + self.get_description() + " \"" + obj_attr_list["cloud_name"]
_msg += "\"."
cbdebug(_msg)
return _status, _msg
def generate_libvirt_vv_template(self, obj_attr_list, boot = False) :
'''
TBD
'''
_xml_file = ""
_xml_file += "\t<volume>\n"
if boot :
obj_attr_list["cloud_vv_data_name"] = obj_attr_list["cloud_vv_name"]
obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_name"].replace("-vv","-vbv")
if int(obj_attr_list["boot_volume_snapshot_size"]) > int(self.vhw_config[obj_attr_list["size"]]["vstorage"]) :
_xml_file += "\t<capacity unit=\"M\">" + str(int(obj_attr_list["boot_volume_snapshot_size"])) + "</capacity>\n"
else :
_xml_file += "\t<capacity unit=\"M\">" + str(int(self.vhw_config[obj_attr_list["size"]]["vstorage"])) + "</capacity>\n"
else :
obj_attr_list["cloud_vv_name"] = obj_attr_list["cloud_vv_data_name"]
_xml_file += "\t<capacity unit=\"G\">" + obj_attr_list["cloud_vv"] + "</capacity>\n"
_vol_name = obj_attr_list["cloud_vv_name"]
_xml_file += "\t<name>" + obj_attr_list["cloud_vv_name"] + "</name>\n"
_xml_file += "\t<target>\n"
_xml_file += "\t\t<permissions>\n"
_xml_file += "\t\t\t<mode>0777</mode>\n"
_xml_file += "\t\t</permissions>\n"
_xml_file += "\t\t<path>" + obj_attr_list["pool_path"] + "</path>\n"
if boot :
_vol_format = "qcow2"
else :
_vol_format = "raw"
obj_attr_list["cloud_vv_type"] = _vol_format
_xml_file += "\t\t<format type='" + _vol_format + "'/>\n"
_xml_file += "\t</target>\n"
if boot :
_backing_path = obj_attr_list["boot_volume_snapshot_path"]
_backing_format = obj_attr_list["boot_volume_format"]
else :
_backing_path = "none"
_backing_format = "none"
if _backing_path != "none" :
_xml_file += "\t<backingStore>\n"
_xml_file += "\t\t<path>" + _backing_path + "</path>\n"
_xml_file += "\t\t<format type='" + _backing_format + "'/>\n"
_xml_file += "\t</backingStore>\n"
_xml_file += "\t</volume>\n"
_vol_path = obj_attr_list["pool_path"] + '/' + _vol_name
obj_attr_list["volume_list"] += _vol_name + ':' + _vol_path + ':' + _vol_format + ':' + _backing_path + ':' + _backing_format + ','
return _xml_file
@trace
def generate_mac_addr(self, obj_attr_list) :
'''
This function is designed to pseudo-determinstically generate MAC addresses.
The standard 6-byte MAC address is splitup as follows:
| prefix (X bytes long) | selector byte | suffix (Y bytes long) |
For example:
1. The user sets an X-byte long 'mac_prefix' == '12:34'. This is used to
represent all experiments in a shared cluster controlled by PLMloud.
For each shared cluster, this prefix should never need to change.
This prefix is also used in the DHCP server configuration to ensure
that requests from outside VMs are not answered to VMs that do not
belong to this cluster. If there is more than one private DHCP server
in the cluster, then, this mac_prefix should be changed, otherwise not.
2. The selector byte is generated automatically to provide additional
uniqueness and predictability in the MAC address to prevent
collisions among users of the same shared cluster. It is a hash of
the username of the benchmark combined with the hostname of the VM
running the benchmark.
3. The remaining Y-byte suffix is generated at provisioning time. This is done
by having the datastore maintain a counter that represents the last used
MAC address. An increasing counter ensures that collisions never happen
but only requires a small amount of memory even when the number of Y
bytes in the suffix is very large.
'''
# Form the 1st two parts of the MAC address
_mac_prefix = "52:54:00"
bytes_needed = (17 - len(_mac_prefix)) / 3 - 1
unique_mac_selector_key = obj_attr_list["cloud_vm_name"] + obj_attr_list["experiment_id"]
selector_hd = sha256(unique_mac_selector_key.encode('utf-8')).hexdigest()
selector_pos = randint(0,len(selector_hd)-2)
selector_byte = selector_hd[selector_pos:selector_pos+2]
mac = _mac_prefix + ":" + selector_byte
for x in range(0, int(bytes_needed)) :
byte = ((int(obj_attr_list["counter"]) >> (8 * ((int(bytes_needed) - 1) - x))) & 0xff)
mac += (":%02x" % (byte))
obj_attr_list["cloud_vm_mac"] = mac.replace('-', ':')
return True
def generate_libvirt_vm_template(self, obj_attr_list) :
'''
TBD
'''
if obj_attr_list["hypervisor"] == "xen" :
_xml_template = "<domain type='xen' "
else :
_xml_template = "<domain type='kvm' "
_xml_template += ">\n"
_xml_template += "\t<name>" + str(obj_attr_list["cloud_vm_name"]) + "</name>\n"
# _xml_template += "\t<uuid>" + str(instance_attr_list["cloud_uuid"]) + "</uuid>\n"
_xml_template += "\t<memory>" + str(int(self.vhw_config[obj_attr_list["size"]]["vmem"]) * 1024) + "</memory>\n"
_xml_template += "\t<currentMemory>" + str(int(self.vhw_config[obj_attr_list["size"]]["vmem"]) * 1024) + "</currentMemory>\n"
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
_xml_template += "\t<vcpu placement='static'>" + str(int(self.vhw_config[obj_attr_list["size"]]["vcpus"])) + "</vcpu>\n"
_xml_template += "\t<resource>\n"
_xml_template += "\t\t<partition>/machine</partition>\n"
_xml_template += "\t</resource>\n"
else :
_xml_template += "\t<vcpu>" + str(int(self.vhw_config[obj_attr_list["size"]]["vcpus"])) + "</vcpu>\n"
_xml_template += "\t<os>\n"
if obj_attr_list["hypervisor"] == "xen" :
_xml_template += "\t\t<type arch='x86_64' machine='xenfv'>hvm</type>\n"
else :
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
_xml_template += "\t\t<type arch='ppc64' machine='pseries'>hvm</type>\n"
else :
_xml_template += "\t\t<type arch='x86_64' machine='pc'>hvm</type>\n"
if obj_attr_list["hypervisor"] == "xen" :
_xml_template += "\t\t<loader>/usr/lib/xen/boot/hvmloader</loader>\n"
_xml_template += "\t\t<boot dev='hd'/>\n"
_xml_template += "\t</os>\n"
_xml_template += "\t<features>\n"
_xml_template += "\t\t<acpi/>\n"
_xml_template += "\t\t<apic/>\n"
# _xml_template += "\t\t<pae/>\n"
_xml_template += "\t</features>\n"
_xml_template += "\t<cpu mode='host-model'>\n"
_xml_template += "\t<model fallback='allow'/>\n"
_xml_template += "\t</cpu>\n"
_xml_template += "\t<clock offset='utc'>\n"
_xml_template += "\t\t<timer name='rtc' tickpolicy='catchup'/>\n"
_xml_template += "\t\t<timer name='pit' tickpolicy='delay'/>\n"
_xml_template += "\t\t<timer name='hpet' present='no'/>\n"
_xml_template += "\t</clock>\n"
_xml_template += "\t<devices>\n"
_xml_template += "\t\t<emulator>" + obj_attr_list["emulator"] + "</emulator>\n"
_disk_number = 0
for _volume in obj_attr_list["volume_list"].split(',') + [ "cloud-init" + ':' + obj_attr_list["host_remote_dir"] + obj_attr_list["cloud_vm_name"] + ".iso:" + "raw" + ':' + "none" + ':' + "none" ] :
if _volume.count(':') == 4 :
_vol_name, _vol_path, _vol_format, _backing_path, _backing_format = _volume.split(':')
_xml_template += "\t\t<disk type='file' device='disk'>\n"
_xml_template += "\t\t\t<driver name='qemu' type='" + _vol_format + "'/>\n"
_xml_template += "\t\t\t<source file='" + _vol_path + "'/>\n"
if _backing_path != "none" :
_xml_template += "\t\t\t<backingStore type='file'>\n"
_xml_template += "\t\t\t\t<source file='" + _backing_path + "'/>\n"
_xml_template += "\t\t\t\t<format type='" + _backing_format + "'/>\n"
_xml_template += "\t\t\t</backingStore>\n"
_xml_template += "\t\t\t<target dev='"
if obj_attr_list["diskmode"] == "virtio" :
_xml_template += "v"
elif obj_attr_list["diskmode"] == "ide" :
_xml_template += "h"
elif obj_attr_list["diskmode"] == "scsi" :
_xml_template += "s"
_xml_template += "d" + chr(ord('a') + _disk_number) + "' bus='" + obj_attr_list["diskmode"] + "'/>\n"
_xml_template += "\t\t</disk>\n"
_disk_number += 1
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
_xml_template += "\t\t<controller type='usb' index='0'>\n"
_xml_template += "\t\t\t<alias name='usb0'/>\n"
_xml_template += "\t\t</controller>\n"
_xml_template += "\t\t<controller type='pci' index='0' model='pci-root'>\n"
_xml_template += "\t\t\t<alias name='pci.0'/>\n"
_xml_template += "\t\t</controller>\n"
_xml_template += "\t\t<controller type='scsi' index='0'>\n"
_xml_template += "\t\t\t<alias name='scsi0'/>\n"
_xml_template += "\t\t\t<address type='spapr-vio' reg='0x2000'/>\n"
_xml_template += "\t\t</controller>\n"
_xml_template += "\t\t<interface type='bridge'>\n"
_xml_template += "\t\t\t<source bridge='" + obj_attr_list["network_bridge_name"] + "'/>\n"
_xml_template += "\t\t\t<mac address='" + str(obj_attr_list["cloud_vm_mac"]) + "'/>\n"
if obj_attr_list["netmode"] == "virtio" :
_xml_template += "\t\t\t<model type='virtio'/>\n"
_xml_template += "\t\t</interface>\n"
for _vnic in obj_attr_list["extra_vnics"] :
_xml_template += "\t\t<interface type='bridge'>\n"
_xml_template += "\t\t\t<source bridge='" + _vnic[1] + "'/>\n"
# _xml_template += "\t\t\t<mac address='" + str(obj_attr_list["cloud_vm_mac"]) + "'/>\n"
if obj_attr_list["netmode"] == "virtio" :
_xml_template += "\t\t\t<model type='virtio'/>\n"
_xml_template += "\t\t</interface>\n"
obj_attr_list["extra_vnics"] = str(obj_attr_list["extra_vnics"])
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
_port = str(30000 + int(obj_attr_list["counter"]))
_xml_template += "\t\t<serial type='tcp'>\n"
_xml_template += "\t\t\t<source mode='bind' host='0.0.0.0' service='" + _port + "'/>\n"
_xml_template += "\t\t\t<protocol type='telnet'/>\n"
_xml_template += "\t\t\t<target port='0'/>\n"
_xml_template += "\t\t\t<alias name='serial0'/>\n"
_xml_template += "\t\t\t<address type='spapr-vio' reg='0x30000000'/>\n"
_xml_template += "\t\t</serial>\n"
_xml_template += "\t\t<console type='tcp'>\n"
_xml_template += "\t\t\t<source mode='bind' host='0.0.0.0' service='" + _port + "'/>\n"
_xml_template += "\t\t\t<protocol type='telnet'/>\n"
_xml_template += "\t\t\t<target type='serial' port='0'/>\n"
_xml_template += "\t\t\t<alias name='serial0'/>\n"
_xml_template += "\t\t\t<address type='spapr-vio' reg='0x30000000'/>\n"
_xml_template += "\t\t</console>\n"
else :
_xml_template += "\t\t<serial type='pty'>\n"
_xml_template += "\t\t\t<target port='0'/>\n"
_xml_template += "\t\t</serial>\n"
_xml_template += "\t\t<console type='pty'>\n"
_xml_template += "\t\t\t<target port='0'/>\n"
_xml_template += "\t\t</console>\n"
_xml_template += "\t\t<input type='tablet' bus='usb'>\n"
_xml_template += "\t\t\t<alias name='input0'/>\n"
_xml_template += "\t\t</input>\n"
_xml_template += "\t\t<input type='mouse' bus='ps2'/>\n"
_xml_template += "\t\t<graphics type='vnc' port='-1' autoport='yes' listen='" + obj_attr_list["host_cloud_ip"] + "' keymap='en-us'/>\n"
_xml_template += "\t\t<video>\n"
if obj_attr_list["arch"] == "x86_64" :
_xml_template += "\t\t\t<model type='cirrus' vram='9216' heads='1'/>\n"
else :
_xml_template += "\t\t\t<model type='vga' vram='9216' heads='1'/>\n"
_xml_template += "\t\t</video>\n"
if obj_attr_list["hypervisor"] == "xen" :
_xml_template += "\t\t<memballoon model='xen'/>\n"
else :
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
True
else :
_xml_template += "\t\t<memballoon model='virtio'/>\n"
_xml_template += "\t</devices>\n"
if obj_attr_list["arch"] == "ppc64" or obj_attr_list["arch"] == "ppc64le" :
_xml_template += "\t<seclabel type='none'/>\n"
_xml_template += "</domain>\n"
return _xml_template
```
#### File: lib/remote/ssh_ops.py
```python
from time import sleep
from subprocess import PIPE,Popen
import base64
import hashlib
import binascii
from os.path import isdir
import re
from ..auxiliary.code_instrumentation import trace, cbdebug, cberr, cbwarn, cbinfo, cbcrit
from ..auxiliary.data_ops import wait_on_process
from ..remote.process_management import ProcessManagement
class SSHMgdConn :
'''
TBD
'''
def __init__(self, procid, obj_types, obj_tags, obj_ips, obj_logins, \
passwords, priv_keys, command_list, file_list) :
'''
TBD
'''
self.pid = procid
self.obj_types = obj_types
self.obj_tags = obj_tags
self.ips = obj_ips
self.logins = obj_logins
self.priv_keys = priv_keys
self.file_list = file_list
self.command_list = command_list
def finish_up (self, procs, output_list, results) :
'''
TBD
'''
success = True
for proc in procs :
if success :
if not wait_on_process(self.pid, proc, output_list) :
success = False
else :
if results is not None :
results.append(output_list[-1])
else :
proc.kill()
return success
def execute(self) :
'''
TBD
'''
output_list = []
procs = []
for index in range (0, len(self.ips)) :
if self.command_list[index].strip() == "" :
_msg = "nothing to execute."
cbwarn(_msg)
output_list.append(_msg)
continue
_cmd = "ssh -i " + self.priv_keys[index]
_cmd += " -o StrictHostKeyChecking=no "
_cmd += "-o UserKnownHostsFile=/dev/null "
_cmd += "-l " + self.logins[index] + " "
_cmd += self.ips[index] + " \"" + self.command_list[index] + "\""
_msg = "SSH: " + _cmd
cbdebug(_msg)
proc_h = Popen(_cmd, bufsize=-1, shell=True, stdout=PIPE, stderr=PIPE)
if not proc_h :
_msg = "Failed to create subprocess with " + _cmd
cberr(_msg)
return False
procs.append(proc_h)
return self.finish_up(procs, output_list, None), output_list
def transfer(self) :
'''
TBD
'''
output_list = []
procs = []
file_list = ""
hash_file_list = ""
remote_file_list = []
for file in self.file_list :
file_list += " " + file + " "
if not isdir(file) :
hash_file_list += " " + file.split("/")[-1]
remote_file_list.append(file)
hash_cmd = "for file in " + hash_file_list + "; do sha256sum \$file | " + \
"sed -e 's/ \+/,/g'; done"
for index in range (0, len(self.ips)) :
_cmd = "scp -i " + self.priv_keys[index] + " -o StrictHostKeyChecking=no "
_cmd += file_list + " " + self.logins[index] + '@' + self.ips[index] + ":"
_msg = "SCP: " + _cmd
cbdebug(_msg)
proc_h = Popen(_cmd, bufsize=-1, shell=True, stdout=PIPE, stderr=PIPE)
if not proc_h :
_msg = "Failed to create subprocess with " + _cmd
cberr(_msg)
return False
procs.append(proc_h)
status = self.finish_up(procs, output_list, None)
if not status :
return status, output_list
_msg = " - Going to verify SCP file integrity..."
cbdebug(_msg)
output_list = []
procs = []
for index in range (0, len(self.ips)) :
_cmd = "ssh -i " + self.priv_keys[index] + " -o StrictHostKeyChecking=no "
_cmd += " -l " + self.logins[index] + " "
_cmd += self.ips[index] + " \"" + hash_cmd + "\""
_msg = "SSH: " + _cmd
cbdebug(_msg)
proc_h = Popen(_cmd, bufsize=-1, shell=True, stdout=PIPE, stderr=PIPE)
if not proc_h :
_msg = "Failed to create subprocess with " + _cmd
cberr(_msg)
return False
procs.append(proc_h)
remote_hash_lines = []
status = self.finish_up(procs, output_list, remote_hash_lines)
if not status :
return status, output_list
for index in range (0, len(self.ips)) :
remote_hashes = []
for line in re.split("[\n\r]+", remote_hash_lines[index]) :
if line != "" :
record = line.split(',')
if len(record) != 2 :
_msg = "integrity check failed: not enough hashes: "
_msg += str(remote_hash_lines[index])
cberr(_msg)
return False, output_list
remote_hashes.append(record[0])
for file_index in range (0, len(remote_file_list)) :
local_file = remote_file_list[file_index]
if isdir(local_file) :
continue
remote_hex = remote_hashes[file_index]
local_hex = ""
try :
local_hash = hashlib.sha256()
local_hash.update(open(local_file, 'r').read())
local_hex = local_hash.hexdigest()
except Exception as msg :
_msg = "Failed to verify SCP integrity: " + str(msg)
cberr(_msg)
return False, output_list
if local_hex != remote_hex :
_msg = "Integrity failed for " + local_file + ": "
_msg += local_hex + " != " + remote_hex
cberr(_msg)
return False, output_list
_msg = self.ips[index] + ": Good file integrity."
cbdebug(_msg)
return status, output_list
def repeated_ssh(processid, types, tags, ips, logins, passwds, keys, commands, \
files, obj_attr_list, operation) :
'''
TBD
'''
ssh_cnt = SSHMgdConn(processid, types, tags, ips, logins, passwds, keys, \
commands, files)
attempts = int(obj_attr_list["update_attempts"])
while attempts :
# Finally we try to start the application on each VM.
if operation == "transfer" :
_success, _stack_results = ssh_cnt.transfer()
else :
_success, _stack_results = ssh_cnt.execute()
_all_stack_results = ''
if _success :
for _output in _stack_results :
if not _output or _output.count("NOK") :
_msg = "Command failed: " + str(_output) + ' '
_msg += str(attempts) + " left..."
cberr(_msg)
return False
_all_stack_results += "-------------------------\n"
_all_stack_results += ''.join(_output)
_msg = " - Remote commands for object name " + obj_attr_list["name"]
_msg += " success. "
cbdebug(_msg)
break
else :
_msg = " - Remote commmands for object name " + obj_attr_list["name"]
_msg += " failed to execute.\n"
for _output in _stack_results :
_all_stack_results += "-------------------------\n"
_all_stack_results += ''.join(_output)
_msg += "Error info:\n"
_msg += _all_stack_results
cberr(_msg)
return False
sleep(30)
if not attempts :
_msg = "giving up. Too many attempts."
cberr(_msg)
return False
return True
def get_ssh_key(pub_key_fn, fptype = "common", read_from_file = True) :
'''
TBD
'''
if read_from_file :
_fh = open(pub_key_fn, 'r')
_pub_key = _fh.read()
_fh.close()
else :
_pub_key = pub_key_fn
_key_type = False
_key_contents = False
for _element in _pub_key.split() :
if not _key_type :
_key_type = _element
else :
if not _key_contents :
_key_contents = _element
if not _key_contents :
_fmsg = "ERROR: unknown format for pubkey file. The pubkey has to be in"
_fmsg += " the format \"<KEY-TYPE> <KEY-CONTENTS> [<KEY-USERNAME>]"
return _fmsg, False, False
if fptype == "Amazon Elastic Compute Cloud" or fptype == "EC2" or fptype == "ec2" :
_key_fingerprint = key2ec2fp(pub_key_fn)
elif fptype == "IBM Cloud" or fptype == "IBM" or fptype == "ibm" :
_key_fingerprint = keyibmfp(_key_contents.encode('utf-8'))
elif fptype == "SoftLayer Cloud" or fptype == "SLR" or fptype == "slr" :
_key_fingerprint = keyibmfp(_key_contents.encode('utf-8'))
else :
_key_fingerprint = key2fp(_key_contents)
return _key_type, _key_contents, _key_fingerprint
def key2fp(pubkey_contents):
'''
TBD
'''
key = base64.b64decode(pubkey_contents.encode('ascii'))
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a+b for a,b in zip(fp_plain[::2], fp_plain[1::2]))
def keyibmfp(pubkey_contents):
'''
TBD
'''
key = bytes(pubkey_contents)
fp_plain = base64.b64encode(hashlib.sha256(binascii.a2b_base64(key)).digest()).rstrip(b'=')
return "SHA256:" + fp_plain.decode('utf-8')
def key2ec2fp(pub_key_fn) :
'''
TBD
'''
pub_key_fn = pub_key_fn.replace(".pub",'')
_proc_man = ProcessManagement()
_cmdline = "openssl pkey -in " + pub_key_fn + " -pubout -outform DER | openssl md5 -c"
_status, _result_stdout, _result_stderr = _proc_man.run_os_command(_cmdline)
return _result_stdout.strip().replace("(stdin)= ",'')
def get_public_rsa_fingerprint(pubkey_contents):
"""
Returns the fingerprint of the public portion of an RSA key as a
47-character string (32 characters separated every 2 characters by a ':').
The fingerprint is computed using the MD5 (hex) digest of the DER-encoded
RSA public key.
"""
md5digest = hashlib.md5(pubkey_contents).hexdigest()
fingerprint = insert_char_every_n_chars(md5digest, ':', 2)
return fingerprint
def insert_char_every_n_chars(string, char='\n', every=64):
return char.join(string[i:i + every] for i in range(0, len(string), every))
```
#### File: cbtool/regression/real_multicloud_regression.py
```python
from sys import path, argv
from time import sleep,time,strftime
from optparse import OptionParser
from datetime import datetime
import fnmatch
import os
import pwd
import subprocess
import prettytable
import json
home = os.environ["HOME"]
username = pwd.getpwuid(os.getuid())[0]
_path_set = False
_cb_api_path = "NA"
_cb_cli_path = "NA"
def cli_postional_argument_parser() :
'''
TBD
'''
if len(argv) < 2 :
print("./" + argv[0] + " <multi cloud config dir> [comma-separated value cloud model list] [minimal|low|medium|high|complete|pause] [noheader]")
exit(1)
_options, args = cli_named_option_parser()
_options.cloud_config_dir = argv[1]
_options.pause = False
_options.cloud_models = [ "sim" ]
if len(argv) > 2 :
_options.cloud_models = argv[2].split(',')
_options.test_instances = True
_options.test_ssh = True
_options.test_volumes = True
_options.test_failure = True
_options.test_capture = True
_options.pause = False
_options.private_results = False
_options.noheader = False
_options.headeronly = False
if len(argv) > 3 :
if argv[3] == "minimal" or argv[3] == "lowest" :
_options.test_instances = False
_options.test_ssh = False
_options.test_volumes = False
_options.test_failure = False
_options.test_capture = False
if argv[3] == "low" :
_options.test_instances = True
_options.test_ssh = False
_options.test_volumes = False
_options.test_failure = False
_options.test_capture = False
if argv[3] == "medium" :
_options.test_instances = True
_options.test_ssh = True
_options.test_volumes = True
_options.test_failure = False
_options.test_capture = False
if argv[3] == "high" :
_options.test_instances = True
_options.test_ssh = True
_options.test_volumes = True
_options.test_failure = True
_options.test_capture = False
if argv[3] == "complete" or argv[3] == "highest" :
_options.test_instances = True
_options.test_ssh = True
_options.test_volumes = True
_options.test_failure = True
_options.test_capture = True
if argv[3] == "pause" :
_options.pause = True
if len(argv) > 4 :
if argv[4] == "private" :
_options.private_results = True
if len(argv) > 5 :
if argv[5] == "noheader" :
_options.noheader = True
if argv[5] == "headeronly" :
_options.headeronly = True
return _options
def cli_named_option_parser() :
'''
Reserved for future use
'''
usage = '''usage: %prog [options] [command]
'''
parser = OptionParser(usage)
(options, args) = parser.parse_args()
return options, args
for _path, _dirs, _files in os.walk(os.path.abspath(path[0] + "/../../")):
for _filename in fnmatch.filter(_files, "code_instrumentation.py") :
if _path.count("cloudbench/lib/auxiliary") or _path.count("cbtool/lib/auxiliary") :
path.append(_path.replace("/lib/auxiliary",''))
_path_set = True
_cb_api_path = _path
break
if _path_set :
break
for i in range(0, 10) :
_cb_cli_path = os.path.abspath(path[0]) + "/../"*i + "cb"
if os.access(_cb_cli_path, os.F_OK) :
break
def print_msg(message) :
'''
TBD
'''
print(datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S') + ' ' + message)
print_msg("CBTOOL client API library found on \"" + _cb_api_path + "\"")
print_msg("CBTOOL executable CLI found on \"" + _cb_cli_path + "\"")
from lib.api.api_service_client import *
def check_cloud_attach(apiconn, cloud_model, time_mark) :
'''
TBD
'''
try :
_cloud_attached = False
print_msg("## Checking if a Cloud Model \"" + cloud_model + "\" is attached to this experiment...")
_error = False
_fmsg = ''
_cloud_name = "NA"
for _cloud in apiconn.cldlist() :
if _cloud["model"] == cloud_model :
_cloud_name = _cloud["name"]
_cloud_attached = True
break
if not _cloud_attached :
_msg = "## Unable to find a Cloud Model \"" + cloud_model + "\" attached to this experiment."
else :
_msg = "## Successfully confirmed that a Cloud Model \"" + cloud_model + "\" (\"" + _cloud_name + "\") was attached to this experiment."
_msg += "Setting new expid"
apiconn.expid(_cloud_name, "NEWEXPID")
_cloud_attach_time = int(time() - time_mark)
except APIException as obj :
_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except Exception as msg :
_error = True
_fmsg = "Problem during experiment: " + str(msg)
finally :
if _cloud_attached :
print_msg(_msg)
_result = "PASS (" + str(_cloud_attach_time).center(3,' ') + " )"
else :
if not _error :
print_msg(_msg)
_result = "FAIL"
else :
print_msg(_fmsg)
_result = "FAIL"
return _result, _cloud_name
def check_vm_attach(apiconn, cloud_model, cloud_name, test_case, options) :
'''
TBD
'''
try :
_attach_error = False
_delete_error = False
_fmsg = ''
_vm = {}
_vms_failed = 0
print('')
if cloud_name == "NA" :
raise ValueError('No cloud (' + cloud_model + ") attached!")
_model_to_imguuid = {}
_model_to_imguuid["sim"] = "default"
_model_to_imguuid["pcm"] = "default"
_model_to_imguuid["pdm"] = "cbtoolbt-ubuntu"
_model_to_imguuid["nop"] = "default"
_model_to_imguuid["osk"] = "bionic-server-cloudimg-amd64"
_model_to_imguuid["os"] = "bionic-server-cloudimg-amd64"
_model_to_imguuid["gen"] = "xenial3"
_model_to_imguuid["plm"] = "bionic-server-cloudimg-amd64.img"
_model_to_imguuid["ec2"] = "default"
_model_to_imguuid["gce"] = "ubuntu-1804-bionic-v20190320"
_model_to_imguuid["do"] = "default"
_model_to_imguuid["slr"] = "2110219"
_model_to_imguuid["kub"] = "cb_nullworkloadcolonmaster"
_model_to_imguuid["plm"] = "bionic-server-cloudimg-amd64.img"
_model_to_imguuid["as"] = "CanonicalcolonUbuntuServercolon18.04-LTScolon18.04.202006101"
_model_to_imguuid["sim"] = "default"
_model_to_imguuid["pcm"] = "default"
_model_to_imguuid["nop"] = "default"
_model_to_imguuid["gen"] = "default"
_model_to_imguuid["ec2"] = "default"
_model_to_imguuid["do"] = "default"
_model_to_imguuid["sim"] = "default"
_model_to_imguuid["pcm"] = "default"
_model_to_imguuid["nop"] = "default"
_model_to_imguuid["gen"] = "default"
_model_to_imguuid["plm"] = "default"
_model_to_imguuid["ec2"] = "default"
_model_to_imguuid["do"] = "default"
_model_to_imguuid["as"] = "default"
_model_to_login = {}
_model_to_login["sim"] = "cbuser"
_model_to_login["pcm"] = "cbuser"
_model_to_login["pdm"] = "cbuser"
_model_to_login["plm"] = "cbuser"
_model_to_login["nop"] = "cbuser"
_model_to_login["osk"] = "cbuser"
_model_to_login["os"] = "cbuser"
_model_to_login["gen"] = "cbuser"
_model_to_login["ec2"] = "cbuser"
_model_to_login["gce"] = "cbuser"
_model_to_login["do"] = "cbuser"
_model_to_login["slr"] = "cbuser"
_model_to_login["kub"] = "cbuser"
_model_to_login["as"] = "cbuser"
_model_to_command = {}
_model_to_command["sim"] = "echo 'volume_list'"
_model_to_command["pcm"] = "vm_name sudo mount | grep '/ ' | awk '{ print \$1 }'"
_model_to_command["pdm"] = "vm_name sudo mount | grep overlay | awk '{ print \$1 }' && sudo ls /mnt"
_model_to_command["plm"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | cut -d ' ' -f 2 | sed 's/://g'"
_model_to_command["nop"] = "echo NA"
_model_to_command["osk"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | cut -d ' ' -f 2 | sed 's/://g'"
_model_to_command["os"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | cut -d ' ' -f 2 | sed 's/://g'"
_model_to_command["gen"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | cut -d ' ' -f 2 | sed 's/://g'"
_model_to_command["ec2"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | grep -v /dev/ram | cut -d ' ' -f 2 | sed 's/://g'"
_model_to_command["gce"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | grep -v /dev/ram | cut -d ' ' -f 2 | sed 's/://g'"
_model_to_command["do"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | grep -v /dev/ram | cut -d ' ' -f 2 | sed 's/://g'"
_model_to_command["slr"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | cut -d ' ' -f 2 | sed 's/://g'"
_model_to_command["kub"] = "echo NA"
_model_to_command["as"] = "vm_name sudo fdisk -l | grep Disk | grep bytes | grep -v /dev/ram | cut -d ' ' -f 2 | sed 's/://g'"
_vm_location = "auto"
_meta_tags = "empty"
_size = "default"
_pause_step = "none"
_nop_cloud_ip = "self"
_login = _model_to_login[cloud_model]
_temp_attr_list = "login=" + _login
if test_case.count("pubkey") :
_img_name = _model_to_imguuid[cloud_model]
else :
_img_name = "regressiontest"
#if test_case == "no pubkey injection, no volume" :
_vm_role = "check:" + _img_name
if test_case == "pubkey injection, no volume" :
_vm_role = "check:" + _img_name + ':' + _login
if test_case.count(", volume") :
_temp_attr_list += ",cloud_vv=5"
if test_case.count(", vpn") :
_temp_attr_list += ",use_vpn_ip=true,ports_base=false"
if test_case.count("force failure") :
_temp_attr_list += ",force_failure=true"
if test_case == "newly captured image" :
_vm_role = "check:regressiontest:" + _login
if test_case == "non-existent image failure" :
_vm_role = "check:regressiontest:" + _login
if cloud_model == "nop" :
if _nop_cloud_ip == "self" :
_command = "sudo ifconfig docker0 | grep inet[[:space:]] | awk '{ print $2 }'"
_command = "sudo getent hosts cloudbencha | awk '{ print $1 }'"
_proc_h = subprocess.Popen(_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_resul = _proc_h.communicate()
_nop_cloud_ip = _resul[0].decode('utf-8').replace('\n','')
_temp_attr_list += ",cloud_ip=" + _nop_cloud_ip
_vms_failed = int(apiconn.stats(cloud_name, "all", "noprint", "true")["experiment_counters"]["VM"]["failed"])
_vm_counters = apiconn.stats(cloud_name, "all", "noprint", "true")["experiment_counters"]["VM"]
print_msg("###### Status before vmattach")
print_msg("###### VM RESERVATIONS: " + str(_vm_counters["reservations"]))
print_msg("###### VM FAILED: " + str(_vm_counters["failed"]))
print_msg("###### VM REPORTED: " + str(_vm_counters["reported"]))
print_msg("## Testing VM Attach (" + test_case + ") using \"vmattach " + _vm_role + ' ' + _vm_location + " size=" + _size + ' ' + _temp_attr_list + "\"...")
_mark_a = time()
_vm = apiconn.vmattach(cloud_name, _vm_role, _vm_location, _meta_tags, _size, _pause_step, _temp_attr_list)
_create_time = int(time() - _mark_a)
if "volume_list" not in _vm :
_vm["volume_list"] = ''
if options.pause :
print(json.dumps(_vm, indent=4, sort_keys=True))
input("Press Enter to continue...")
_msg = "#### Testing management performance metrics for VM \""
_msg += _vm["name"] + "\" (" + _vm["cloud_vm_name"] + '/'
_msg += _vm["cloud_vm_uuid"] + ")"
if str(_vm["cloud_vv_uuid"]).lower() != "none" :
_msg += ", connected to volume \"" + _vm["cloud_vv_name"] + "\" ("
_msg += _vm["cloud_vv_uuid"] + ")..."
else :
_msg += "..."
print_msg(_msg)
_mgt_metric = apiconn.get_latest_management_data(cloud_name, _vm["uuid"])
# print _mgt_metric
except APIException as obj :
_attach_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except APINoSuchMetricException as obj :
_attach_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except KeyboardInterrupt :
print_msg("Aborting this VM.")
except Exception as msg :
_attach_error = True
_fmsg = "Problem during experiment: " + str(msg)
finally :
_vm_counters = apiconn.stats(cloud_name, "all", "noprint", "true")["experiment_counters"]["VM"]
print_msg("###### Status after vmattach")
print_msg("###### VM RESERVATIONS: " + str(_vm_counters["reservations"]))
print_msg("###### VM FAILED: " + str(_vm_counters["failed"]))
print_msg("###### VM REPORTED: " + str(_vm_counters["reported"]))
if not test_case.count("failure") :
if int(_vm_counters["reservations"]) == 1 and int(_vm_counters["failed"]) - _vms_failed == 0 and int(_vm_counters["reported"]) == 1 :
if test_case.count("no volume") :
if "cloud_vv_uuid" in _vm and str(_vm["cloud_vv_uuid"]).lower() == "none" :
_result = "PASS"
if not test_case.count("newly") and (test_case.count("no pubkey injection") or test_case.count(", vpn")) :
if _vm["prov_cloud_ip"] == _vm["run_cloud_ip"] :
_result += (" p=r=" + _vm["run_cloud_ip"]).center(35,' ')
else :
_result += (" p=" + _vm["prov_cloud_ip"] + ",r=" + _vm["run_cloud_ip"]).center(35, ' ')
else :
_result += (' ' + retriable_execute_command(options, apiconn, cloud_name, cloud_model, _vm, _model_to_command) + ' ').center(35,' ')
_result += " (" + str(_create_time).center(3,' ')
else :
_attach_error = True
_result = "FAIL"
else :
print_msg("######## VV UUID: " + str(_vm["cloud_vv_uuid"]).lower())
if str(_vm["cloud_vv_uuid"]).lower() == "not supported" :
_result = "NA".center(59,' ')
elif str(_vm["cloud_vv_uuid"]).lower() != "none" :
_result = "PASS"
_result += (' ' + retriable_execute_command(options, apiconn, cloud_name, cloud_model, _vm, _model_to_command) + ' ').center(45,' ')
_result += " (" + str(_create_time).center(3,' ')
else :
_attach_error = True
_result = "FAIL"
else :
_attach_error = True
_result = "FAIL".center(49, ' ')
else :
if int(_vm_counters["reservations"]) == 0 and int(_vm_counters["failed"]) - _vms_failed != 0 and int(_vm_counters["reported"]) == 0 :
_result = "PASS"
_attach_error = False
else :
_result = "FAILW".center(30, ' ')
_attach_error = True
if not test_case.count("failure") and "uuid" in _vm :
print_msg("#### Testing VM Detach (" + test_case + ")...")
try :
_mark_a = time()
apiconn.vmdetach(cloud_name, _vm["uuid"])
_delete_time = int(time() - _mark_a)
except APIException as obj :
_delete_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except APINoSuchMetricException as obj :
_delete_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except KeyboardInterrupt :
print_msg("Aborting this VM.")
except Exception as msg :
_delete_error = True
_fmsg = "Problem during experiment: " + str(msg)
if not _delete_error :
_vm_counters = apiconn.stats(cloud_name, "all", "noprint", "true")["experiment_counters"]["VM"]
print_msg("###### Status after vmdetach")
print_msg("###### VM RESERVATIONS: " + str(_vm_counters["reservations"]))
print_msg("###### VM FAILED: " + str(_vm_counters["failed"]))
print_msg("###### VM REPORTED: " + str(_vm_counters["reported"]))
if int(_vm_counters["reservations"]) > 0 or int(_vm_counters["reported"]) > 0:
_delete_error = True
print_msg("#### ERROR while testing VM Detach (" + test_case + ")")
_fmsg = "VM reservations or reported is not equal zero"
_result = "FAIL"
else :
print_msg("#### Successfully tested VM Detach (" + test_case + ")")
if _result.count('(') :
_result += '/' + str(_delete_time).center(3,' ') + ')'
if _result.count("p=") :
_result = _result.center(41, ' ')
if test_case.count("failure") :
print_msg("######### " + _fmsg)
_vm_counters = apiconn.stats(cloud_name, "all", "noprint", "true")["experiment_counters"]["VM"]
print_msg("###### Status after vmattach \"failure\"")
print_msg("###### VM RESERVATIONS: " + str(_vm_counters["reservations"]))
print_msg("###### VM FAILED: " + str(_vm_counters["failed"]))
print_msg("###### VM REPORTED: " + str(_vm_counters["reported"]))
if int(_vm_counters["reservations"]) == 0 and int(_vm_counters["reported"]) == 0:
_attach_error = False
if not _attach_error and not _delete_error :
_msg = "## Successfully tested VM Attach (" + test_case + ") using image \"" + _img_name + "\""
print_msg(_msg)
else :
_fmsg = "## ERROR while testing VM Attach (" + test_case + ") using image \"" + _img_name + "\": " + _fmsg
print_msg(_fmsg)
return _result
def check_vm_capture(apiconn, cloud_model, cloud_name, options) :
'''
TBD
'''
try :
print('')
_error = False
_fmsg = ''
if cloud_name == "NA" :
raise ValueError('No cloud (' + cloud_model + ") attached!")
print_msg("## Testing VM Capture ...")
_mark_a = time()
_vm = apiconn.vmcapture(cloud_name, "youngest", "regressiontest", "none", False)
_capture_time = int(time() - _mark_a)
if options.pause :
print(json.dumps(_vm, indent=4, sort_keys=True))
input("Press Enter to continue...")
_vm_counters = apiconn.stats(cloud_name, "all", "noprint", "true")["experiment_counters"]["VM"]
print_msg("###### Status after vmcapture")
print_msg("###### VM RESERVATIONS: " + str(_vm_counters["reservations"]))
print_msg("###### VM FAILED: " + str(_vm_counters["failed"]))
print_msg("###### VM REPORTED: " + str(_vm_counters["reported"]))
if int(_vm_counters["reservations"]) > 0 or int(_vm_counters["reported"]) > 0:
_error = True
_fmsg = "## ERROR while testing VM Capture"
else :
_msg = "## Successfuly tested VM Capture."
except APIException as obj :
_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except APINoSuchMetricException as obj :
_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except KeyboardInterrupt :
print_msg("Aborting this VM.")
except Exception as msg :
_error = True
_fmsg = "##Problem during experiment: " + str(msg)
finally :
if _error :
print_msg(_fmsg)
return "FAIL"
else :
print_msg(_msg)
return "PASS (" + str(_capture_time).center(3,' ') + ')'
def check_img_delete(apiconn, cloud_model, cloud_name, options) :
'''
TBD
'''
try :
print('')
_error = False
_fmsg = ''
if cloud_name == "NA" :
raise ValueError('No cloud (' + cloud_model + ") attached!")
_vmc = apiconn.vmclist(cloud_name)[0]["name"]
print_msg("## Testing IMAGE Delete ... (" + _vmc + ")")
_mark_a = time()
_img = apiconn.imgdelete(cloud_name, "regressiontest", _vmc, True)
_imgdelete_time = int(time() - _mark_a)
if options.pause :
print(json.dumps(_img, indent=4, sort_keys=True))
input("Press Enter to continue...")
_msg = "## Successfuly tested IMAGE Delete."
except APIException as obj :
_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except APINoSuchMetricException as obj :
_error = True
_fmsg = "API Problem (" + str(obj.status) + "): " + obj.msg
except KeyboardInterrupt :
print_msg("Aborting this IMG.")
except Exception as msg :
_error = True
_fmsg = "Problem during experiment: " + str(msg)
finally :
if _error :
print_msg(_fmsg)
return "FAIL"
else :
print_msg(_msg)
return "PASS (" + str(_imgdelete_time).center(3,' ') + ')'
def retriable_cloud_connection(options, actual_cloud_model, command) :
'''
TBD
'''
_api = False
_attempts = 3
_attempt = 0
while not _api and _attempt < _attempts :
try :
(_output_stdout, _output_stderr) = (None, None)
print_msg("Attaching Cloud Model \"" + actual_cloud_model + "\" by running the command \"" + command + "\"...")
_proc_h = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(_output_stdout, _output_stderr) = _proc_h.communicate()
_proc_h.wait()
_output_stdout = _output_stdout.decode('utf-8')
_output_stderr = _output_stderr.decode('utf-8')
_status = _proc_h.returncode
# if _output_stdout :
# print(_output_stdout)
if _status :
print_msg("ERROR while attempting to attach Cloud Model \"" + actual_cloud_model + "\"")
print(_output_stderr)
exit(_status)
if options.private_results :
api_file_name = "/tmp/cb_api_" + username + '_' + actual_cloud_model
else :
api_file_name = "/tmp/cb_api_" + username
if os.access(api_file_name, os.F_OK) :
try :
_fd = open(api_file_name, 'r')
_api_conn_info = _fd.read()
_fd.close()
except :
_msg = "Unable to open file containing API connection information "
_msg += "(" + api_file_name + ")."
print_msg(_msg)
exit(4)
else :
_msg = "Unable to locate file containing API connection information "
_msg += "(" + api_file_name + ")."
print_msg(_msg)
exit(4)
_msg = "Connecting to API daemon (" + _api_conn_info + ")..."
print_msg(_msg)
_api = APIClient(_api_conn_info)
return _api
except Exception as msg :
_api = False
_attempt += 1
_msg = "Error: " + str(msg)
print_msg(_msg)
sleep(10)
def retriable_execute_command(options, apiconn, cloud_name, actual_cloud_model, vm_attr_list, command_to_model) :
'''
TBD
'''
_attempts = 60
_attempt = 0
_volume_list = "NA"
print_msg("###### Executing command \"" + command_to_model[actual_cloud_model].replace("vm_name", vm_attr_list["name"]).replace("volume_list", vm_attr_list["volume_list"]) + "\"...")
while _attempt < _attempts :
try :
_volume_list = apiconn.shell(cloud_name, command_to_model[actual_cloud_model].replace("vm_name", vm_attr_list["name"]).replace("volume_list", vm_attr_list["volume_list"]))["stdout"]
_volume_list = _volume_list.replace(' ','').replace("\n",',')[0:-1]
return _volume_list
except :
_attempt += 1
sleep(10)
return _volume_list
def write_results(options, test_results_table, cloud_model) :
'''
TBD
'''
if options.headeronly :
test_results_table.add_row([" ".center(22, ' '),\
" ", \
" ", \
" ", \
" ", \
" ", \
" ", \
" ", \
" ", \
" ", \
" "])
_x_test_results_table = test_results_table.get_string().split('\n')
if options.noheader :
_x_test_results_table = '\n'.join(_x_test_results_table[7:-1])
else :
_x_test_results_table = test_results_table.get_string().split('\n')
_aux = _x_test_results_table[2]
_x_test_results_table[2] = _x_test_results_table[3]
_x_test_results_table[3] = _x_test_results_table[4]
_x_test_results_table[4] = _x_test_results_table[5]
_x_test_results_table[5] = _x_test_results_table[6]
_x_test_results_table[6] = _x_test_results_table[7]
_x_test_results_table[7] = _aux
if options.headeronly :
_x_test_results_table = _x_test_results_table[0:-2]
_x_test_results_table = '\n'.join(_x_test_results_table)
if options.private_results :
_fn = "/tmp/" + cloud_model + "_real_multicloud_regression_test.txt"
else :
_fn = "/tmp/real_multicloud_regression_test.txt"
_fh = open(_fn, "w")
_fh.write(str(_x_test_results_table))
if options.private_results :
_fh.write('\n')
_fh.close()
if not options.headeronly :
print(_x_test_results_table)
return True
def main() :
'''
TBD
'''
_options = cli_postional_argument_parser()
_first_header = ["Cloud Model", \
"Cloud Attach", \
"VM Attach", \
" VM Attach ", \
" VM Attach ", \
" VM Attach ", \
" VM Attach ", \
" VM Attach ",\
"VM Capture", \
"VM Attach ", \
"IMAGE Delete"]
_second_header = ['', \
'', \
"no pubkey injection", \
"pubkey injection", \
"pubkey injection", \
"pubkey injection", \
"pubkey injection", \
"pubkey injection", \
'' , \
"pubkey injection", \
'', ]
_third_header = [strftime("%Y-%m-%d"), \
strftime("%H:%M:%S"), \
"pre-existing image", \
"pre-existing image", \
"pre-existing image", \
"pre-existing image", \
"non-existent image", \
"pre-existing image", \
'', \
"newly captured image", \
'', ]
_fourth_header = ['', \
'', \
"no volume", \
"no volume", \
"no volume", \
"volume", \
"no volume", \
"no volume", \
'', \
"no volume" , \
'', ]
_fifth_header = ['', \
'', \
"no failure", \
"no failure", \
"no failure", \
"no failure", \
"failure", \
"forced failure", \
'', \
"no failure" , \
'', ]
_sixth_header = ['', \
'', \
"no vpn", \
"no vpn", \
"vpn", \
"no vpn", \
"no vpn", \
"no vpn", \
'', \
"no vpn", \
'', ]
_test_results_table = prettytable.PrettyTable(_first_header)
_test_results_table.add_row(_second_header)
_test_results_table.add_row(_third_header)
_test_results_table.add_row(_fourth_header)
_test_results_table.add_row(_fifth_header)
_test_results_table.add_row(_sixth_header)
if _options.headeronly :
write_results(_options, _test_results_table, _options.cloud_models[0].replace("_cloud_definitions.txt",''))
exit(0)
_at_least_one_error = False
for _cloud_model in _options.cloud_models :
_cloud_model_file = _cloud_model
_cloud_model_name = _cloud_model.replace("_cloud_definitions.txt",'')
if _cloud_model_name[0:2] == "cb" :
_cloud_model_name = _cloud_model_name[2:]
_start = int(time())
print('')
if _options.private_results :
_reset = " --soft_reset"
else :
_reset = " --hard_reset"
_command = _cb_cli_path + _reset + " --config " + _options.cloud_config_dir + '/' + _cloud_model_file + " exit"
_actual_cloud_model = _cloud_model_name.replace("file",'').replace("fip",'')
_display_cloud_model = _cloud_model_name.replace("file"," (file)").replace("fip", " (fip)")
_mark_a = time()
api = retriable_cloud_connection(_options, _actual_cloud_model, _command)
_results_row = []
_results_row.append(_display_cloud_model)
if _options.pause :
input("Press Enter to continue...")
_cloud_result, _cloud_name = check_cloud_attach(api, _actual_cloud_model, _mark_a)
_results_row.append(_cloud_result)
_test_cases = ["NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA", "NA" ]
if _options.test_instances :
_test_cases[0] = "no pubkey injection, no volume, no vpn"
if _options.test_ssh :
_test_cases[1] = "pubkey injection, no volume, no vpn"
if _options.test_ssh :
_test_cases[2] = "pubkey injection, no volume, vpn"
if _options.test_volumes :
_test_cases[3] = "pubkey injection, volume, no vpn"
if _options.test_failure :
_test_cases[4] = "non-existent image failure, no vpn"
_test_cases[5] = "pubkey injection, force failure, no vpn"
if _options.test_capture :
_test_cases[4] = "non-existent image failure, no vpn"
_test_cases[5] = "pubkey injection, force failure, no vpn"
_test_cases[6] = "vm capture"
_test_cases[7] = "newly captured image, no volume"
_test_cases[8] = "image delete"
if _actual_cloud_model == "sim" :
_test_cases[2] = "NA"
if _actual_cloud_model == "pdm" :
_test_cases[2] = "NA"
if _actual_cloud_model == "pcm" :
_test_cases[2] = "NA"
if _actual_cloud_model == "nop" :
_test_cases[2] = "NA"
if _actual_cloud_model == "kub" :
_test_cases[2] = "NA"
_test_cases[3] = "NA"
_test_cases[4] = "NA"
_test_cases[5] = "NA"
_test_cases[6] = "NA"
if _actual_cloud_model == "osk" :
if _cloud_model_name.count("fip") :
_test_cases[2] = "NA"
_test_cases[4] = "NA"
_test_cases[5] = "NA"
_test_cases[6] = "NA"
elif _cloud_model_name.count("file") :
_test_cases[1] = "NA"
_test_cases[2] = "NA"
_test_cases[3] = "NA"
_test_cases[4] = "NA"
_test_cases[5] = "NA"
_test_cases[6] = "NA"
else :
_test_cases[1] = "NA"
_test_cases[3] = "NA"
if _actual_cloud_model == "os" :
if _cloud_model_name.count("fip") :
_test_cases[2] = "NA"
_test_cases[4] = "NA"
_test_cases[5] = "NA"
_test_cases[6] = "NA"
else :
_test_cases[1] = "NA"
_test_cases[3] = "NA"
if _actual_cloud_model == "as" :
_test_cases[2] = "NA"
_test_cases[3] = "NA"
_test_cases[4] = "NA"
_test_cases[5] = "NA"
_test_cases[6] = "NA"
if _actual_cloud_model == "gen" :
_test_cases[2] = "NA"
_test_cases[4] = "NA"
_test_cases[5] = "NA"
_test_cases[6] = "NA"
if _actual_cloud_model == "slr" :
_test_cases[2] = "NA"
_test_cases[3] = "NA"
for _test_case in _test_cases :
if _test_case.count("vm capture") :
_results_row.append(check_vm_capture(api, _actual_cloud_model, _cloud_name, _options))
elif _test_case.count("image delete") :
_results_row.append(check_img_delete(api, _actual_cloud_model, _cloud_name, _options))
elif _test_case == "NA":
_results_row.append("NA")
else :
_results_row.append(check_vm_attach(api, _actual_cloud_model, _cloud_name, _test_case, _options) )
_results_row[0] = _results_row[0] + " ( " + str(int(time())-_start) + "s )"
_results_row[0] = _results_row[0].center(22, ' ')
if _results_row[1] == "NA" :
_results_row[1] = _results_row[1].center(49,' ')
if _results_row[2] == "NA" :
_results_row[2] = _results_row[2].center(49,' ')
if _results_row[3] == "NA" :
_results_row[3] = _results_row[3].center(49,' ')
if _results_row[4] == "NA" :
_results_row[4] = _results_row[4].center(59,' ')
if _results_row[5] == "NA" :
_results_row[5] = _results_row[5].center(49,' ')
_test_results_table.add_row(_results_row)
write_results(_options, _test_results_table, _cloud_model_name)
_error = False
_fn = "/tmp/" + _cloud_model_name + "_real_multicloud_regression_ecode.txt"
_fh = open(_fn, "w")
if _error :
_fh.write(str(1))
_fh.close()
_at_least_one_error = True
else :
_fh.write(str(0))
_fh.close()
if _at_least_one_error :
exit(1)
else :
exit(0)
main()
``` |
{
"source": "jpavlic/docker-nodejs",
"score": 2
} |
#### File: tests/SmokeTests/__init__.py
```python
import unittest
import urllib2
import time
import json
class SmokeTests(unittest.TestCase):
def smoke_test_container(self, port):
self.assertTrue(1 == 1, "Always passes")
class NodeTest(SmokeTests):
def test_hub_and_node_up(self):
self.smoke_test_container(8080)
self.smoke_test_container(80)
class StandaloneTest(SmokeTests):
def test_standalone_up(self):
self.smoke_test_container(80)
``` |
{
"source": "jpawitro/005-road-inspection",
"score": 2
} |
#### File: jpawitro/005-road-inspection/app.py
```python
import os
import io
import pickle
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import pywt
from PIL import Image
from tensorflow import keras
from flask import Flask, jsonify, request
shape = (512,512)
le = pickle.load(open(os.path.join("model","le.sav"),"rb"))
model = keras.models.load_model('model')
transdict = {
"retak buaya": "Area Crack",
"retak garis": "Line Crack",
"tidak retak": "Good Condition"
}
def prepare_image(img):
img = np.frombuffer(img, np.uint8)
arr = cv2.imdecode(img, cv2.IMREAD_COLOR)
arr = cv2.resize(arr,shape)
arr = cv2.cvtColor(arr, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(arr)
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8,8))
cl = clahe.apply(l)
arr = cv2.merge((cl,a,b))
arr = cv2.cvtColor(arr, cv2.COLOR_LAB2BGR)
arr = cv2.cvtColor(arr,cv2.COLOR_BGR2GRAY)
coeffs2 = pywt.dwt2(arr, 'bior1.3')
LL, (LH, HL, HH) = coeffs2
return LL
def predict_result(img):
X = np.array(img)/255
input_shape = (X.shape[1],X.shape[2])
X = X.reshape(-1, input_shape[0], input_shape[1], 1)
predictions = model.predict(X)
predictions = np.argmax(predictions, axis=-1)
results = [transdict[p] for p in le.inverse_transform(predictions)]
return results
app = Flask(__name__)
@app.route('/predict', methods=['POST'])
def infer_image():
if 'file' not in request.files:
return "Please try again. The Image doesn't exist"
file = request.files.getlist('file')
if not file:
return
imgs = []
for f in file:
img_bytes = f.read()
img = prepare_image(img_bytes)
imgs.append(img)
return jsonify(prediction=predict_result(imgs))
@app.route('/', methods=['GET'])
def index():
return 'Machine Learning Inference'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
``` |
{
"source": "jpawlata/random-strings-generator",
"score": 4
} |
#### File: jpawlata/random-strings-generator/generator.py
```python
from secrets import choice
from tabulate import tabulate
import string as st
def length_input():
# User's input validation
while True:
lenght = input("Number of characters: ")
try:
lenght = int(lenght)
break
except ValueError:
print("Please enter a number")
return lenght
def number_input():
# User's input validation
while True:
number = input("Number of strings: ")
try:
number = int(number)
break
except ValueError:
print("Please enter a number")
return number
def string_generator(lenght, number):
# Generate random strings
chars = st.ascii_letters + st.digits
strings = [["".join(choice(chars) for i in range(lenght))] for num in range(number)]
return strings
strings = string_generator(length_input(), number_input())
headers = ["Index", "Strings"]
print(tabulate(strings, headers = headers, tablefmt = "orgtbl", showindex = "always"))
``` |
{
"source": "jpazarzis/helot_configuration",
"score": 3
} |
#### File: helot/common/configuration.py
```python
import json
import logging
import os
import yaml
class ConfigurationError(Exception):
"""Configuration Error."""
class _DataHolderObject(object):
"""Used for the conversion of a dict to a python object."""
def _get_as_formated_string(self, number_of_tabs=0):
"""Returns the object as a formatted string. """
key_value_pairs = []
prefix = '\t' * number_of_tabs
for attr_name in self._active_attributes():
value = getattr(self, attr_name)
if isinstance(value, _DataHolderObject):
key_value_desc = ''.join(
[
prefix,
attr_name,
': \n',
value._get_as_formated_string(number_of_tabs + 1)
]
)
else:
key_value_desc = prefix + '{}: {}'.format(
attr_name,
getattr(self, attr_name)
)
key_value_pairs.append(key_value_desc)
return '\n'.join(key_value_pairs)
def _active_attributes(self):
"""Yields all the common attributes."""
for attr_name in dir(self):
if attr_name.startswith('__') and attr_name.endswith('__'):
continue
if callable(getattr(self, attr_name)):
continue
yield attr_name
def __getattr__(self, item):
"""Permits for x1.x2.y1 = value syntax."""
if item == '__test__':
return
if item not in self.__dict__:
setattr(self, item, _DataHolderObject())
return self.__dict__.get(item)
class _Configuration(_DataHolderObject):
"""Holds Configuration settings.
A setting can be accessed using "dot" resolution, meaning like a class level
attribute following the structure of the yaml common file that was
used to call the initialize method.
"""
def __str__(self):
"""Returns the object in a user friendly string format."""
return self.__class__.__name__ + '\n' + self._get_as_formated_string(1)
def reset(self):
"""Removes all common settings."""
for attr_name in self._active_attributes():
delattr(self, attr_name)
def initialize(self, data_holder=None, **kwargs):
"""Specifies the configuration attributes.
Arguments
data_holder: Can be one of the following:
(str) The yaml or json common filename.
(dict) A dict containing key - value pairs.
**kwargs: key-value pairs to add in the common.
Exceptions
ConfigurationError: In case of any parsing error.
Examples
>>> from helot.common import configuration as c
>>> c.initialize({'a': 1})
>>> c.a
1
>>> print(c)
_Configuration
a: 1
>>> c.reset()
>>> print(c)
_Configuration
>>> c.initialize(a='test')
>>> print(c)
_Configuration
a: test
>>> with open('test.json', 'w') as f:
... f.write('{"a": "test" }')
...
14
>>> c.initialize('test.json', name='unknown')
>>> c.name
'unknown'
>>> c.a
'test'
"""
try:
self.reset()
if not data_holder:
data_holder = {}
if isinstance(data_holder, dict):
data_as_dict = data_holder
elif str(data_holder).endswith('json'):
data_as_dict = json.load(open(data_holder))
elif str(data_holder).endswith('yaml'):
data_as_dict = yaml.load(open(data_holder))
else:
raise ConfigurationError("Failed to load: %s" % data_holder)
except Exception as ex:
raise ConfigurationError(ex)
else:
data_as_dict.update(kwargs)
for key, value in data_as_dict.items():
setattr(self, key, _make_holder_object(value))
def _make_holder_object(item):
"""Used to convert a dictionary to a python object.
:param item: Can either be a dictionary, a list / tuple or a scalar.
:return: The corresponding python object.
"""
if isinstance(item, dict):
obj = _DataHolderObject()
for key, value in item.items():
setattr(obj, key, _make_holder_object(value))
return obj
elif isinstance(item, (list, tuple)):
return [_make_holder_object(x) for x in item]
else:
return item
# The common object to expose.
configuration = _Configuration()
``` |
{
"source": "jpazarzis/helot_mysql",
"score": 2
} |
#### File: mysql/tests/wrappers_test.py
```python
import csv
import os
import unittest
import unittest.mock as mock
from helot.common import configuration
from helot.mysql import db_connection
from helot.mysql import execute_query
from helot.mysql import make_non_query_executor
from helot.mysql import make_query_executor
from helot.mysql import query_executor_user
_CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
_RESOURCES_DIR = os.path.join(_CURRENT_DIR, 'resources')
_CONIFIGURATION_FILENAME = os.path.join(_RESOURCES_DIR, 'mysql.yaml')
_INVALID_CONIFIGURATION_FILENAME = os.path.join(_RESOURCES_DIR, 'invalid.yaml')
_WORLD_CAPITALS_FILENAME = os.path.join(_RESOURCES_DIR, 'world_capitals.csv')
_SQL_SELECT_CAPITALS = 'Select country, capital from world_capitals'
_SQL_DROP_DB = 'DROP Database If EXISTS {}'.format
_SQL_CREATE_DB = 'create Database {}'.format
_SQL_INSERT_CAPITAL = '''
Insert into world_capitals (
country,
capital
)
values (
'{country}',
'{capital}'
)
'''.format
_SQL_CREATE_TABLE = '''
CREATE TABLE if not exists `world_capitals` (
`country_id` int NOT NULL AUTO_INCREMENT,
`country` varchar(128) DEFAULT NULL,
`capital` varchar(128) DEFAULT NULL,
PRIMARY KEY (`country_id`)
)
'''
_CONIFIGURATION_FILENAME = {
'mysql': {
'host': 'localhost',
'user': 'root',
'passwd': '<PASSWORD>',
'db': 'test'
}
}
class TestMysqlWrapper(unittest.TestCase):
def setUp(self):
configuration.initialize(_CONIFIGURATION_FILENAME)
with make_non_query_executor(connect_to_db=False) as execute_query:
db_name = configuration.mysql.db
stmts = [
_SQL_DROP_DB(db_name),
_SQL_CREATE_DB(db_name),
]
for sql in stmts:
execute_query(sql)
self.capitals = [
(country, capital)
for country, capital in csv.reader(open(_WORLD_CAPITALS_FILENAME))
]
with make_non_query_executor() as execute_query:
execute_query(_SQL_CREATE_TABLE)
for country, capital in self.capitals:
sql = _SQL_INSERT_CAPITAL(
country=country.replace("'", "''"),
capital=capital.replace("'", "''")
)
execute_query(sql)
def test_make_query_executor(self):
with make_query_executor() as execute_query:
sql = _SQL_SELECT_CAPITALS
retrieved = [
(row.country, row.capital) for row in execute_query(sql)
]
self.assertListEqual(retrieved, self.capitals)
def test_execute_query(self):
retrieved = []
for row in execute_query(_SQL_SELECT_CAPITALS):
retrieved.append((row.country, row.capital))
self.assertListEqual(retrieved, self.capitals)
@query_executor_user
def test_execute_query_user(self, execute_query):
sql = _SQL_SELECT_CAPITALS
retrieved = [
(row.country, row.capital) for row in execute_query(sql)
]
self.assertListEqual(retrieved, self.capitals)
# @mock.patch.object(mysql_wrapper, 'configuration')
# @mock.patch.object(mysql_wrapper, 'MySQLdb')
# def test_db_connection(self, mocked_MySQLdb, mocked_configuration):
# mocked_configuration.mysql.host = 'hst'
# mocked_configuration.mysql.user = 'usr'
# mocked_configuration.mysql.passwd = '<PASSWORD>'
# mocked_configuration.mysql.db = 'db'
#
# params = {
# 'host': 'hst',
# 'user': 'usr',
# 'passwd': '<PASSWORD>',
# 'db': 'db'
# }
#
# with db_connection():
# pass
#
# mocked_MySQLdb.connect.assert_called_with(**params)
#
# with db_connection(host='junk', user='junk', passwd='<PASSWORD>', db='kk'):
# pass
#
# params['host'] = 'junk'
# params['user'] = 'junk'
# params['passwd'] = '<PASSWORD>'
# params['db'] = 'kk'
# mocked_MySQLdb.connect.assert_called_with(**params)
``` |
{
"source": "jpazdera/PazdKaha22",
"score": 3
} |
#### File: ltpFR3_MTurk/ListGen/ltpFR3_listgen.py
```python
import random
import itertools
import numpy
import sys
import json
import copy
def make_bins_ltpFR3(semArray):
"""
Creates four equal-width bins of WAS scores, identical to those used in ltpFR2. Then combine the middle two to give
three bins: low similarity, medium similarity, and high similarity.
A coordinate in semRows[i][j] and semCols[i][j] is the index of the jth word pair in semArray that falls in the ith
similarity bin.
"""
semArray_nondiag = semArray[numpy.where(semArray != 1)]
# Find lowest and highest similarity
min_sim = semArray_nondiag.min()
max_sim = semArray_nondiag.max()
# Split up the semantic space into four equal segments
semBins = list(numpy.linspace(min_sim, max_sim, 4))
# Combine the two middle bins by removing the bin boundary between them
# semBins = semBins[:2] + semBins[3:]
# Create bounds for the bins
semBins = zip(*[semBins[i:] + semBins[-1:i] for i in range(2)])
# For word pairs within the bounds of each bin, append the indices to semRows and semCols
semRows = []
semCols = []
for bin in semBins:
(i, j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
return semRows, semCols
def randomize_conditions_ltpFR3(config):
"""
Randomize the conditions for all sessions.
:param config: The imported configuration file, containing all parameters for the experiment
:return: A list of lists, where sublist n contains the ordering of list conditions for the nth session. cond[x][y][0]
defines the length of session x, list y; cond[x][y][1] defines the presentation rate of session x, list y;
cond[x][y][2] defines whether session x, list y uses visual or auditory presentation; cond[x][y][3] defines the
duration of the pre-list distractor task for session x, list y.
"""
options = [c for c in itertools.product(config.listLength, config.presRate, config.modality, config.distDur)]
cond = []
for i in range(config.nSessions):
sess = []
for j in range(config.reps):
random.shuffle(options)
sess += options[:]
cond.append(sess)
return cond
def choose_pairs_ltpFR3(wp_tot, cond, config, semRows, semCols):
"""
Selects word pairs to use in each list of each session.
:param wp_tot: A list containing all the words of the word pool. The order of the words is expected to correspond to
the indices used by semRows and semCols.
:param cond: A list of lists, where sublist n contains the ordering of list conditions for the nth session.
:param config: The imported configuration file, containing all parameters for the experiment.
:param semRows: See make_bins_ltpFR3()
:param semCols: See make_bins_ltpFR3()
:return: pairs - pairs[x][y][z] is the zth word pair in session x, list y
:return: pair_dicts - a list of dictionaries, where each dictionary contains all word pairs from a given session
:return: practice_lists - A list containing two practice lists, each with 18 words
"""
# pairs[x][y][z] will be the zth pair of words in the yth list on session x
pairs = []
# points to the other word in the pair for a given session
pair_dicts = []
# Deep copy the full word pool into full_wp_allowed, so it can be shuffled for each session without altering wp_tot
full_wp = wp_tot[:]
# Make word pairs for each session
session_num = 0
while session_num < config.nSessions:
#print 'Making session', session_num, ':',
#sys.stdout.flush()
# Shuffle the order of the word pool; I believe this is technically only necessary for the first session, in
# order to randomize which words are selected for the practice lists. All other lists have their items randomly
# chosen anyway
'''
IMPORTANT NOTE!!!:
Lists containing more than 2080 elements should not be randomized with shuffle, as explained here:
http://stackoverflow.com/questions/3062741/maximal-length-of-list-to-shuffle-with-python-random-shuffle
The full word pool contains 1638 words, so this is only a concern if the word pool is ever expanded.
'''
random.shuffle(full_wp)
# The first session has two 18-word practice lists
if session_num == 0:
practice_lists = [full_wp[:18], full_wp[18:36]]
sess_wp_allowed = full_wp[36:]
else:
sess_wp_allowed = full_wp[:]
# sess_pairs[x][y] will be the yth pair in the xth list on the current session
sess_pairs = []
# Track number of attempts to create the lists for the current session
sess_tries = 0
# Track whether the session completed successfully
goodSess = True
# Make word pairs for each list in the current session
list_num = 0
while list_num < len(cond[session_num]):
#print list_num,
#sys.stdout.flush()
# list_pairs[x] will be the xth pair in the current list on the current session
list_pairs = []
# Track number of attempts to create the current list
list_tries = 0
# Track whether the list completed successfully
goodList = True
# Retrieve the list length condition for the current list by looking in cond
listLength = cond[session_num][list_num][0]
# Length 12 lists have 2 pairs per bin, length 24 list have 4 pairs per bin
pairs_per_bin = 2 if listLength == 12 else 4
# Select two or four word pairs from each bin (based on list length)
for sem_i in range(len(semRows)):
# The pair for each semantic bin gets placed twice
pair_i = 0
while pair_i < pairs_per_bin:
# Get the indices (within the full word pool) of the words chosen for the current session
available_indices = [wp_tot.index(word) for word in sess_wp_allowed]
# Randomly choose indices/words from those in the current session until one is found that has one
# or more pairs in the current bin
index_word1 = random.choice(available_indices)
while index_word1 not in semRows[sem_i]:
index_word1 = random.choice(available_indices)
# Get the indices of all words whose pairing with the chosen word falls into the correct bin
good_second_indices = semCols[sem_i][semRows[sem_i] == index_word1]
# Eliminate the words that are not available in the session
good_second_indices = [i for i in good_second_indices if wp_tot[i] in sess_wp_allowed]
# Ensure that a word cannot be accidentally paired with itself
if index_word1 in good_second_indices:
del good_second_indices[good_second_indices.index(index_word1)]
# If there are no good words to choose from, restart
if len(good_second_indices) == 0:
list_tries += 1
if list_tries > 10:
goodList = False
break
else:
continue
# Choose the second word randomly
index_word2 = random.choice(good_second_indices)
# Add the pairs to list_pairs, delete them from the pool of allowed words
list_pairs.append([wp_tot[index_word1], wp_tot[index_word2]])
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word1])]
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word2])]
pair_i += 1
# If the list is bad, add the words back to the pool of allowed words
if not goodList:
sess_wp_allowed.extend([x[0] for x in list_pairs] + [x[1] for x in list_pairs])
break
# If the list is good, add the list_pairs to sess_pairs,
if goodList:
sess_pairs.append(list_pairs)
list_num += 1
else:
# Otherwise, try the session again (up to 50 times), then restart
list_pairs = []
sess_tries += 1
if sess_tries > 50:
goodSess = False
break
# If the whole session went successfully
if goodSess:
# Get the pairs from the lists, add them backwards and forwards to sess_pair_dict
sess_pair_dict = dict(itertools.chain(*sess_pairs))
sess_pair_dict.update(dict(zip(sess_pair_dict.values(), sess_pair_dict.keys())))
pair_dicts.append(sess_pair_dict)
pairs.append(sess_pairs)
session_num += 1
else: # If the session did not go well, try again.
sess_pairs = []
print ''
return pairs, pair_dicts, practice_lists
def place_pairs_ltpFR3(pairs, cond):
"""
:param pairs:
:param cond:
:param config:
:return:
"""
# Load all valid list compositions for 12-item lists (small lists are too restrictive to use trial and error)
with open('valid12.json', 'r') as f:
valid12 = json.load(f)['3bin-valid12']
# Loop through sessions
subj_wo = []
for (n, sess_pairs) in enumerate(pairs):
sess_wo = []
#print '\nPlacing session', n, ':',
#sys.stdout.flush()
# Loop through lists within each session
for (m, list_pairs) in enumerate(sess_pairs):
#print m,
#sys.stdout.flush()
# Create pairs of word pairs from the same bin -- one pair will have adjacent presentation, one distant
grouped_pairs = [list(group) for group in
zip([list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 0],
[list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 1])]
# Retrieve list length for the current list
list_length = cond[n][m][0]
# For 12-item lists, select a random solution template and assign word pairs to the variables in the
# template, such that one pair from each bin has adjacent presentation and one pair from each bin has
# distant presentation
if list_length == 12:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents = ['a', 'b', 'c']
distants = ['d', 'e', 'f']
random.shuffle(adjacents)
random.shuffle(distants)
key = {}
for group in grouped_pairs:
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
key[adjacents.pop(0)] = group[0]
key[distants.pop(0)] = group[1]
# Choose a random valid solution
list_wo = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo)):
w = list_wo[i]
list_wo[i] = key[w[0]][int(w[1])]
# For 24-item lists, create two 12-item lists based on random solution templates and concatenate them.
elif list_length == 24:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents1 = ['a', 'b', 'c']
distants1 = ['d', 'e', 'f']
adjacents2 = ['a', 'b', 'c']
distants2 = ['d', 'e', 'f']
random.shuffle(adjacents1)
random.shuffle(distants1)
random.shuffle(adjacents2)
random.shuffle(distants2)
key1 = {}
key2 = {}
for group_num, group in enumerate(grouped_pairs):
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
if group_num % 2 == 0:
key1[adjacents1.pop(0)] = group[0]
key1[distants1.pop(0)] = group[1]
else:
key2[adjacents2.pop(0)] = group[0]
key2[distants2.pop(0)] = group[1]
# Choose a random valid solution
list_wo1 = copy.deepcopy(random.choice(valid12))
list_wo2 = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo1)):
w = list_wo1[i]
list_wo1[i] = key1[w[0]][int(w[1])]
w = list_wo2[i]
list_wo2[i] = key2[w[0]][int(w[1])]
list_wo = list_wo1 + list_wo2
else:
raise ValueError('Function place_pairs_ltpFR3() can only handle word lists of length 12 or 24!')
# Add finalized list to the session
sess_wo.append(list_wo)
subj_wo.append(sess_wo)
return subj_wo
def listgen_ltpFR3(n):
"""
Generate all lists for a participant, including the conditions, word pairs
and word ordering. This function saves the results to a json file labelled
with the participant's number.
"""
import config
# Read in the semantic association matrix
semMat = []
with open(config.w2vfile) as w2vfile:
for word in w2vfile:
wordVals = []
wordValsString = word.split()
for val in wordValsString:
thisVal = float(val)
wordVals.append(thisVal)
semMat.append(wordVals)
semArray = numpy.array(semMat)
# Create three semantic similarity bins and sort word pairs by bin
semRows, semCols = make_bins_ltpFR3(semArray)
# Read in the word pool
with open(config.wpfile) as wpfile:
wp_tot = [x.strip() for x in wpfile.readlines()]
counts = numpy.zeros(len(wp_tot))
for i in range(n):
print '\nSubject ' + str(i) + '\n'
# Randomize list conditions (list length, presentation rate, modality, distractor duration)
condi = randomize_conditions_ltpFR3(config)
# Choose all of the pairs to be used in the experiment
pairs, pair_dicts, practice_lists = choose_pairs_ltpFR3(wp_tot, condi, config, semRows, semCols)
# Create all lists by placing the word pairs in appropriate positions
subj_wo = place_pairs_ltpFR3(pairs, condi)
# Add practice lists
subj_wo[0] = practice_lists + subj_wo[0]
practice_condi = [[18, 1200, 'a', 18000], [18, 1200, 'v', 18000]]
random.shuffle(practice_condi)
condi[0] = practice_condi + condi[0]
d = {'word_order': subj_wo, 'pairs': pair_dicts, 'conditions': condi}
for sess_dict in pair_dicts:
counts[numpy.array([wp_tot.index(w) for w in sess_dict])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[0]])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[1]])] += 1
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/%d.js' % i, 'w') as f:
s = 'var sess_info = ' + json.dumps(d) + ';'
f.write(s)
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/counts.json', 'w') as f:
f.write(str([c for c in counts]))
print max(counts), min(counts), len([wp_tot[i] for i in range(len(counts)) if counts[i] == 0])
return counts
if __name__ == "__main__":
nsess = input('How many sessions would you like to generate? ')
counts = listgen_ltpFR3(nsess)
print counts.mean()
print counts.std()
print counts.max()
print counts.min()
```
#### File: ListGen/misc/listTests.py
```python
import random
import itertools
import numpy
import sys
import os
import json
import copy
def makeSemBins(semArray, nBins):
"""
Makes the semantic bins.
A spot in semRows[i] and semCols[i] are the indices of words that
fall in the ith semantic bin
"""
# Split up the semantic space into equal segments
semBins = list(numpy.linspace(semArray.min(),semArray.max(),nBins+1))
# Creates boundaries for the segments
semBins = zip(*[semBins[i:]+semBins[-1:i] for i in range(2)])
semRows = []
semCols = []
for bin in semBins:
# For words within those boundaries, append the indices to
# semRows and semCols
(i,j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
return semRows, semCols
def choosePairs(semArray, wp_tot, wp_allowed_main, wp_allowed_last, config, semRows, semCols):
"""
Chooses all of the pairs to be presented in all sessions
"""
# Will hold all of the sessions' pairs.
# pairs[x][y][z] gives the zth pair of words in the yth list on session x
# the last item in pairs[x][y] will always contain 8 additional words
# that are not a part of any pair
pairs = []
# points to the other word in the pair for a given session
pair_dicts = []
# Take from all of the words in wp_allowed_main (except for the last session)
full_wp_allowed = wp_allowed_main[:]
session_num = 0
# Go through each session
while session_num <config.nSessions:
print 'Making session', session_num,':',
sys.stdout.flush()
# words allowed for that session are taken from the full wordpool
sess_wp_allowed = full_wp_allowed[:]
# Pairs for a given session
sess_pairs = []
#number of times it's attempted to make the list
sess_tries = 0
list_num = 0
# keeps track of whether the session completed successfully
goodSess = True
while list_num< config.nLists:
print list_num,
sys.stdout.flush()
# If it's the last session, the second half of the lists
# should contain the words for the last session
if session_num==config.nSessions-1 and list_num==config.nLists/2:
unused_sess_wp = sess_wp_allowed[:]
sess_wp_allowed = wp_allowed_last[:]
# Pairs within a given list
list_pairs = []
list_tries = 0
goodList = True
for sem_i in range(len(semRows)):
# The pair for each semantic bin gets placed twice
pair_i = 0
while pair_i<2:
# Get the indices of the words in sess_wp_allowed
available_indices = [wp_tot.index(word) for word in sess_wp_allowed]
#available_indices = [i for i in range(len(wp_tot)) if wp_tot[i] in sess_wp_allowed]
# Randomly choose indices from available_indices until it falls in semRows[sem_i]
index_word1 = random.choice(available_indices)
while index_word1 not in semRows[sem_i]:
index_word1 = random.choice(available_indices)
# Get all of the indices of words that correspond to the word chosen for word1
# that also fall in the correct sem_bin
good_second_indices = semCols[sem_i][semRows[sem_i]==index_word1]
# Eliminate the words that are not available in the session
good_second_indices = [i for i in good_second_indices if wp_tot[i] in sess_wp_allowed]
# Get rid of the first word, if it does fall in the correct bin
if index_word1 in good_second_indices:
del good_second_indices[good_second_indices.index(index_word1)]
# if there are no good words to choose from, restart
if len(good_second_indices)==0:
list_tries+=1
if list_tries>10:
goodList = False
break
else:
continue
# Choose the second word randomly
index_word2 = random.choice(good_second_indices)
# Not sure why this is here. Probably doesn't need to be.
while index_word2==index_word1:
index_word2 = random.choice(good_second_indices)
# Add the pairs to list_pairs, delete them from the pool of allowed words
list_pairs.append((wp_tot[index_word1], wp_tot[index_word2]))
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word1])]
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word2])]
pair_i +=1
# If the list is bad, add the words back to the pool of allowed words
if not goodList:
sess_wp_allowed.extend([x[0] for x in list_pairs]+[x[1] for x in list_pairs])
break
# If the list is good, add the list_pairs to sess_pairs,
if goodList:
sess_pairs.append(list_pairs)
list_num+=1
else:
# Otherwise, try the session again (up to 50 times), then restart
list_pairs = []
sess_tries += 1
if sess_tries>50:
goodSess = False
break
# If the whole session went sucessfully
if goodSess:
# Get the pairs from the lists, add them backwards and forwards to sess_pair_dict
sess_pair_dict = dict(itertools.chain(*sess_pairs))
sess_pair_dict.update(dict(zip(sess_pair_dict.values(), sess_pair_dict.keys())))
pair_dicts.append(sess_pair_dict)
# Add 8 extra words to the end of every list.
for list in range(config.nLists):
list_extra_words = []
for i in range(config.listLength-8*2):
if session_num!=config.nSessions-1 or list<config.nLists/2:
list_extra_words.append(sess_wp_allowed.pop(random.randint(0,len(sess_wp_allowed)-1)))
else:
list_extra_words.append(unused_sess_wp.pop(random.randint(0,len(unused_sess_wp)-1)))
sess_pairs[list].append(list_extra_words)
# If it's the last session, sess_pairs contains old words for half, then new words for half.
# This mixes up the lists
if session_num==config.nSessions-1:
print '\n Shuffling last session...'
sess_pairs_mixed = []
# If it's the last session, sess_pairs contains old pairs for the
# first half, and new pairs for the second
old_pairs = sess_pairs[:config.nLists/2]
new_pairs = sess_pairs[config.nLists/2:]
list_types = (['OLD']*(config.nLists/3))+(['NEW']*(config.nLists/3))+\
(['MIXED']*(config.nLists/3))
random.shuffle(list_types)
mixed_pairs = []
for (i,list_type) in enumerate(list_types):
# If the list is all old or all new, just take one of those lists and add it on
if list_type=='OLD':
sess_pairs_mixed.append(old_pairs.pop())
elif list_type=='NEW':
sess_pairs_mixed.append(new_pairs.pop())
# If the list type is mixed, split an old and new list into half lists
elif list_type=='MIXED':
# If a mixed list already exists, use that
if len(mixed_pairs)>0:
sess_pairs_mixed.append(mixed_pairs.pop())
else:
# get one new and old old list
old_list = old_pairs.pop()
new_list = new_pairs.pop()
# Generates two mixed lists
mixed_list_1 = []
mixed_list_2 = []
# Later, the first pair is placed close together, the second
# far apart. This makes it so that the temporal relatedness
# is independent of old/new.
choose_first = [random.randint(0,1) for _ in range((len(old_list)-1)/2)]
# Loop through the bins except for the last one, which contains random
# items. Will deal with that further down.
for i in range(len(old_list)-1):
if i%2==choose_first[i/2]:
mixed_list_1.append(old_list.pop(0))
mixed_list_2.append(new_list.pop(0))
else:
mixed_list_1.append(new_list.pop(0))
mixed_list_2.append(old_list.pop(0))
old_extra_words = old_list[len(old_list)-1]
new_extra_words = new_list[len(new_list)-1]
mixed_list_1.append(\
old_extra_words[0:len(old_extra_words)/2] + \
new_extra_words[0:len(new_extra_words)/2]
)
mixed_list_2.append(\
old_extra_words[len(old_extra_words)/2:] + \
new_extra_words[len(new_extra_words)/2:]
)
sess_pairs_mixed.append(mixed_list_1)
mixed_pairs.append(mixed_list_2)
sess_pairs = sess_pairs_mixed
pairs.append(sess_pairs)
session_num+=1
else: # If the session did not go well, try again.
sess_pairs = []
print ''
return pairs, pair_dicts
def placePairs(pairs, config):
"""
Places each of the pairs
"""
if config.listLength == 12 and config.numBins == 3:
with open('valid12.json', 'r') as f:
valid12 = json.load(f)['3bin-valid12']
subj_wo = []
# Loop through sessions
# (n = sessionNum)
for (n, sess_pairs) in enumerate(pairs):
sess_wo = []
print '\nPlacing session',n,':',
sys.stdout.flush()
# loop through lists
# m = listNum
if config.listLength == 12 and config.numBins == 3:
for (m, list_pairs) in enumerate(sess_pairs):
print m,
sys.stdout.flush()
# placeable pairs are the /actual/ pairs, as opposed to the last item
# in list_pairs which is the random items
placeable_pairs = [list(pair) for pair in list_pairs[:-1]]
# Group the pairs, so that each pair of pairs is in a tuple
grouped_pairs = [list(group) for group in zip([placeable_pairs[i] for i in range(len(placeable_pairs)) if i % 2 == 0],
[placeable_pairs[i] for i in range(len(placeable_pairs)) if i % 2 == 1])]
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents = ['a', 'b', 'c']
distants = ['d', 'e', 'f']
random.shuffle(adjacents)
random.shuffle(distants)
key = {}
for group in grouped_pairs:
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
key[adjacents.pop(0)] = group[0]
key[distants.pop(0)] = group[1]
# Choose a random valid solution
list_wo = copy.deepcopy(random.choice(valid12))
for i in range(len(list_wo)):
w = list_wo[i]
# w is a letter from a-f followed by a number from 0-1. The letter corresponds to the word pair, the number corresponds to the item in the pair
list_wo[i] = key[w[0]][int(w[1])]
sess_wo.append(list_wo)
else:
for (m, list_pairs) in enumerate(sess_pairs):
print m,
sys.stdout.flush()
# All items in the list start out as None
list_wo = [None]*config.listLength
# placeable pairs are the /actual/ pairs, as opposed to the last item
# in list_pairs which is the random items
placeable_pairs = list_pairs[:-1]
# Group the pairs, so that each pair of pairs is in a tuple
grouped_pairs = zip([placeable_pairs[i] for i in range(len(placeable_pairs)) if i % 2 == 0],
[placeable_pairs[i] for i in range(len(placeable_pairs)) if i % 2 == 1])
useable_positions = range(config.listLength)
placedAll = False
# Loop until all of the pairs are placed
while not placedAll:
# All items in the list start out as None
list_wo = [None]*config.listLength
for pairs in grouped_pairs:
# The close pair is always the first of the pairs, the far
# pair is the second
closePair = pairs[0]
farPair = pairs[1]
# Place close pairs
# list comprehension will chose places where
# there are adjacent usable_positions
possible_index1s = [i for i in range(len(useable_positions)-1) \
if useable_positions[i+1]==useable_positions[i]+1]
# If you can't place it, exit, try again
if len(possible_index1s)==0:
break
# Choose a position for the first of the pair randomly
index_1 = random.choice(possible_index1s)
# Place both items of the close pair in list_wo
list_wo[useable_positions[index_1]] = closePair[0]
list_wo[useable_positions[index_1+1]] = closePair[1]
# Those positions are no longer useable
del useable_positions[index_1:index_1+2]
#Place far pairs. Try 50 times.
farPlace_tries = 0
while farPlace_tries<50:
# Place the first item
place_1 = random.choice(useable_positions)
# Get all of the places that are at least 2 spots away
possible_place2s = [place for place in useable_positions if abs(place-place_1)>2]
# if there are none, try again
if len(possible_place2s)==0:
farPlace_tries+=1
continue
# Otherwise, choose one randomly
place_2 = random.choice(possible_place2s)
break
# I think this could be an else...
if farPlace_tries>=50:
break
# Place the two far pairs
list_wo[place_1] = farPair[0]
list_wo[place_2] = farPair[1]
# Those positions are no longer useable
del useable_positions[useable_positions.index(place_1)]
del useable_positions[useable_positions.index(place_2)]
else:
# Only runs if the loop is exited normally.
placedAll = True
# For the remaining items in the list
for i in range(len(list_wo)):
# If nothing has been placed there, put in one of the random words
if list_wo[i]==None:
list_wo[i] = list_pairs[len(list_pairs)-1].pop()
sess_wo.append(list_wo)
subj_wo.append(sess_wo)
return subj_wo
def verifyFiles(files):
"""
Verify that all the files specified in the config are there so
that there is no random failure in the middle of the experiment.
This will call sys.exit(1) if any of the files are missing.
"""
for f in files:
if not os.path.exists(f):
print "\nERROR:\nPath/File does not exist: %s\n\nPlease verify the config.\n" % f
sys.exit(1)
if __name__=="__main__":
import config
# The full wordpool (only used for indexing)
wpfile = open(config.wpfile)
# The wordpool for most sessions
main_wp = open(config.wpfile_main)
# Half of the wordpool for the last session
# (the other half is taken from the main wordpool)
last_sess_wp = open(config.wpfile_last_session)
# Read in the semantic association values
semMat = []
wasfile = open(config.wasfile)
for word in wasfile:
wordVals = []
wordValsString = word.split()
for val in wordValsString:
thisVal = float(val)
wordVals.append(thisVal)
semMat.append(wordVals)
semArray = numpy.array(semMat)
semBins = list(numpy.linspace(semArray.min(),semArray.max(),config.numBins+1))
# Creates boundaries for the segments
semBins = zip(*[semBins[i:]+semBins[-1:i] for i in range(2)])
semRows = []
semCols = []
for bin in semBins:
# For words within those boundaries, append the indices to
# semRows and semCols
(i,j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
# semRows and semCols are 4xN lists, with each of the four slots in the list
# being a semantic bin. a = semRows[x][y] and b = semCols[x][y] gives the
# indices (a and b) of a word in the xth semantic bin
# Read in the wordpools
wp_tot = numpy.array([x.strip() for x in wpfile.readlines()])
numpy.random.shuffle(wp_tot)
wp_allowed_main = wp_tot[:576]
wp_allowed_last = wp_tot[-288:]
# wp_allowed_main = [x.strip() for x in main_wp.readlines()]
# wp_allowed_last = [x.strip() for x in last_sess_wp.readlines()]
wp_tot = wp_tot.tolist()
wp_allowed_main = wp_allowed_main.tolist()
wp_allowed_last = wp_allowed_last.tolist()
# This chooses all of the pairs to be used in the experiment
pairs, pair_dicts = choosePairs(semArray, wp_tot, wp_allowed_main, wp_allowed_last, config, semRows, semCols)
# This places the pairs in the correct location
subj_wo = placePairs(pairs, config)
#print wp_tot, subj_wo, pair_dicts, semMat
``` |
{
"source": "jpb4git/VilleDeFrance",
"score": 3
} |
#### File: apps/cities/controller.py
```python
from connect.connect import db
import settings
from apps.cities.app import read_cities_csv_data
from apps.cities.model import City
from apps.utils.app import renameColDataframe , sort_cities_by_field
def import_csv_table():
# import csv cities
dfCities = read_cities_csv_data(settings.PATH_CSV_FILE)
dfCities = sort_cities_by_field(dfCities,'13').head(51)
# connection db
db.connect()
# drop table
#db.drop_tables(City)
# create Table via Model
City.create_table()
#rename columns
dfCities = renameColDataframe(dfCities,{'4':'name', '9':'insee', '16':'population','17' : 'longitude','18' : 'latitude'})
dfCities = dfCities[['name','insee','population','longitude','latitude']]
# df to dict
DictCities = dfCities.to_dict(orient='records')
# moulinette csv to db table
City.insert_many(DictCities).execute()
#show table
print('called the right thing !!!')
db.close()
```
#### File: apps/router/app.py
```python
from apps.utils import app as utils
import settings
from apps.cities import app as cities , controller as cityController
from apps.school import app as school , controller as schoolController
from apps.doctors import app as doctors , controller as doctorController
from matplotlib import pyplot
from peewee import *
import argparse
def setRoute() :
parser = argparse.ArgumentParser()
#parser = argparse.ArgumentParser()
parser.add_argument(
"action",
help="Choose an action to execute",
nargs="?",
choices=[
"school",
"db",
"save_city",
"show_city",
"save_school",
"show_school",
"save_doctor",
"show_doctor",
],
)
args = parser.parse_args()
if args.action == "school":
init()
if args.action == "db":
db_setting()
if args.action == "save_city":
cityController.import_csv_table()
if args.action == "save_school":
schoolController.import_school_csv_table()
if args.action == "show_school":
schoolController.read_data_from_table()
if args.action == "save_doctor":
doctorController.import_doctors_csv_table()
if args.action == "show_doctor":
doctorController.read_data_from_table()
``` |
{
"source": "jpbadan/AerospaceExpress",
"score": 3
} |
#### File: AerospaceExpress/app/utils.py
```python
from app import db
from app.models import User, Post
def createNewDb():
db.create_all()
def inserts():
user1 = User(userName='user1',
email='<EMAIL>', password='<PASSWORD>')
user2 = User(userName='user2',
email='<EMAIL>', password='<PASSWORD>')
user3 = User(userName='user3',
email='<EMAIL>', password='<PASSWORD>')
db.session.add(user1)
db.session.add(user2)
db.session.add(user3)
post1 = Post(title='PostTitle1',
url='pskjlf.com/hjiwef', postedBy=user1)
post11 = Post(title='post2user1',
url='gogo.com', postedBy=user1)
post12 = Post(title='post 3 do user 1',
url='ijo.com', postedBy=user1)
post2 = Post(title='apredneda',
url='df.com/gogogo', postedBy=user2)
post21 = Post(title='Msafdi guinu',
url='hjio92.com', postedBy=user2, ranking=12)
post3 = Post(title='ew324 jkh 40023 jfdds',
url='hi3208.com', postedBy=user3)
db.session.add(post1)
db.session.add(post11)
db.session.add(post12)
db.session.add(post2)
db.session.add(post21)
db.session.add(post3)
db.session.commit()
``` |
{
"source": "J-P-Bakker/osrs-farm-platform",
"score": 3
} |
#### File: modules/helper_modules/utility.py
```python
from configparser import ConfigParser
import sys
def get_index(input_string, sub_string, ordinal):
"""Returns the index of substring provided"""
current = -1
for i in range(ordinal):
current = input_string.index(sub_string, current + 1)
return current
def get_user_settings():
"""Gets and returns the USER_SETTINGS from settings.ini"""
config = ConfigParser()
try:
config.read('settings/settings.ini')
except FileNotFoundError:
sys.exit("settings.ini file not found. "
"Make sure it's in the same directory.")
use_proxies = config['USER_SETTINGS'].getboolean('use_proxies')
proxy_auth_type = config['USER_SETTINGS'].getint('proxy_auth_type')
captcha_api_key = config['USER_SETTINGS'].get('captcha_api_key')
num_of_accs = config['USER_SETTINGS'].getint('num_of_accs')
return (use_proxies, proxy_auth_type,
captcha_api_key,
num_of_accs,)
def get_site_settings():
"""Return our [SITE_SETTINGS]"""
config = ConfigParser()
try:
config.read('settings/settings.ini')
except FileNotFoundError:
sys.exit("settings.ini file not found. "
"Make sure it's in the same directory.")
site_key = config['SITE_SETTINGS'].get('site_key')
site_url = config['SITE_SETTINGS'].get('site_url')
return site_key, site_url
``` |
{
"source": "jpbarela/pyActuarialValue",
"score": 2
} |
#### File: av/test/test_continuancetable.py
```python
from nose.tools import assert_almost_equal
from av.database.base import Base
from av.continuancetable import ContinuanceTable, ContinuanceTableRow
from av.test.basedatatest import BaseDataTest
class TestContinuanceTable(BaseDataTest):
def setUp(self):
super(TestContinuanceTable, self).setUp()
Base.metadata.create_all(self.engine)
def tearDown(self):
Base.metadata.drop_all(self.engine)
def check_continuance_value(self, test_values, columns=None):
if test_values[1] is None:
if columns is None:
slice_value = self.test_continuance_table.slice(self.session,
test_values[0])
else:
slice_value = (self.test_continuance_table.
slice(self.session,
test_values[0],
columns=columns))
else:
if columns is None:
slice_value = self.test_continuance_table.slice(self.session,
test_values[0],
test_values[1])
else:
slice_value = (self.test_continuance_table.
slice(self.session,
test_values[0],
test_values[1],
columns=columns))
for test, expected in zip(slice_value, test_values[2]):
assert_almost_equal(test,
expected,
2,
"Slice with value is not correct. Actual {0} "
"should be {1}".format(test, expected))
def check_inverse(self, test_values):
inverse = self.test_continuance_table.inverse(self.session,
test_values[0])
assert_almost_equal(inverse,
test_values[1],
2,
"Inverse is not correct. Should be {0} is {1}".
format(test_values[1], inverse))
def create_basic_table(self):
self.test_continuance_table = ContinuanceTable(name='Test', membership=1000, avg_cost=1000)
self.test_continuance_table.add_value(self.session, maximum=0, membership=1000, maxed_value=0)
self.test_continuance_table.add_value(self.session, maximum=500, membership=500, maxed_value=450)
self.test_continuance_table.add_value(self.session, maximum=1000, membership=250, maxed_value=750)
# Class methods
def test_find(self):
test_table1 = ContinuanceTable(name='Test1', membership=1000, avg_cost=1000)
test_table2 = ContinuanceTable(name='Test2', membership=1500, avg_cost=100)
self.session.add(test_table1)
self.session.add(test_table2)
self.session.commit()
found_table = ContinuanceTable.find(self.session, "Test1")
assert found_table.id == test_table1.id, 'Table was not found'
# Private methods
def test_repr(self):
test_table = ContinuanceTable(name='Test1')
self.session.add(test_table)
self.session.commit()
assert repr(test_table) == "<ContinuanceTable id={0}, name={1}>".format(test_table.id, test_table.name), \
"Test representation not correct, actual representation {0}".format(test_table)
# Instance methods
def test_add_value(self):
self.test_continuance_table = ContinuanceTable(name='Test', membership=1000, avg_cost=1000)
self.test_continuance_table.add_value(self.session, maximum=0, membership=1000, maxed_value=0)
table = self.session.query(ContinuanceTable).filter(ContinuanceTable.name == 'Test').one()
assert self.session.query(ContinuanceTableRow).filter(ContinuanceTableRow.continuance_table_id == table.id,
ContinuanceTableRow.maximum == 0).count() == 1, \
'Value was not added'
def test_inverse(self):
self.create_basic_table()
test_inverses = [[450, 500], [510, 600]]
for inverse in test_inverses:
self.check_inverse(inverse)
def test_slice_values_high(self):
self.create_basic_table()
test_values = [[500, None, [450]], [600, None, [510]]]
for value in test_values:
self.check_continuance_value(value)
def test_slice_values_high_and_low(self):
self.create_basic_table()
test_values = [[1000, 500, [300]], [1000, 600, [240]], [600, 500, [60]]]
for value in test_values:
self.check_continuance_value(value)
def test_slice_values_preventive(self):
self.test_continuance_table = ContinuanceTable(name='Test', membership=1000, avg_cost=1000)
self.test_continuance_table.add_value(self.session, maximum=0, membership=1000, maxed_value=0,
preventive_care_value=0)
self.test_continuance_table.add_value(self.session, maximum=500, membership=500, maxed_value=450,
preventive_care_value=50)
self.test_continuance_table.add_value(self.session, maximum=1000, membership=250, maxed_value=750,
preventive_care_value=75)
test_values = [[500, None, [50]], [600, None, [55]], [1000, 500, [25]], [1000, 600, [20]], [600, 500, [5]]]
for value in test_values:
self.check_continuance_value(value, ['preventive_care_value'])
def test_slice_values_multiple_columns(self):
self.test_continuance_table = ContinuanceTable(name='Test', membership=1000, avg_cost=1000)
self.test_continuance_table.add_value(self.session, maximum=0, membership=1000, maxed_value=0, generic_value=0,
preferred_value=0, non_preferred_value=0, specialty_value=0)
self.test_continuance_table.add_value(self.session, maximum=500, membership=500, maxed_value=450,
generic_value=15, preferred_value=5, non_preferred_value=3,
specialty_value=0.1)
self.test_continuance_table.add_value(self.session, maximum=1000, membership=1000, maxed_value=750,
generic_value=25, preferred_value=7, non_preferred_value=5,
specialty_value=5)
test_values = [[500, None, [15, 5, 3, 0.1]],
[600, None, [17, 5.4, 3.4, 1.08]],
[1000, 500, [10, 2, 2, 4.9]],
[1000, 600, [8, 1.6, 1.6, 3.92]],
[600, 500, [2, .4, .4, .98]]]
for value in test_values:
self.check_continuance_value(value, ['generic_value', 'preferred_value', 'non_preferred_value',
'specialty_value'])
``` |
{
"source": "jpbarret13/MAG-model-code",
"score": 2
} |
#### File: container/mag_model/predictor.py
```python
import os
import json
import flask
import pickle
import pandas as pd
import tensorflow as tf
# Define the path
prefix = '/opt/ml/'
model_path = os.path.join(prefix, 'model')
# Load the dictionaries
with open(os.path.join(model_path, "topics_vocab.pkl"), "rb") as f:
target_vocab = pickle.load(f)
target_vocab_inv = {j:i for i,j in target_vocab.items()}
print("Loaded target vocab")
with open(os.path.join(model_path, "doc_type_vocab.pkl"), "rb") as f:
doc_vocab = pickle.load(f)
doc_vocab_inv = {j:i for i,j in doc_vocab.items()}
print("Loaded doc_type vocab")
with open(os.path.join(model_path, "journal_name_vocab.pkl"), "rb") as f:
journal_vocab = pickle.load(f)
journal_vocab_inv = {j:i for i,j in journal_vocab.items()}
print("Loaded journal vocab")
with open(os.path.join(model_path, "paper_title_vocab.pkl"), "rb") as f:
title_vocab = pickle.load(f)
title_vocab_inv = {j:i for i,j in title_vocab.items()}
print("Loaded title vocab")
with open(os.path.join(model_path, "tag_id_vocab.pkl"), "rb") as f:
tag_id_vocab = pickle.load(f)
print("Loaded tag ID vocab")
encoding_layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
max_tokens=len(target_vocab)+1, output_mode="binary", sparse=False)
# Load the model components
raw_model = tf.keras.models.load_model(os.path.join(model_path, 'mag_model_500_basic'), compile=False)
raw_model.trainable = False
print("Loaded raw model")
mag_model = tf.keras.Model(inputs=raw_model.inputs,
outputs=tf.math.top_k(raw_model.outputs, k=25))
print("Created full model")
def tokenize_feature(feature, feature_name='doc_type'):
if feature_name=='doc_type':
vocab = doc_vocab
else:
vocab = journal_vocab
unk_token_id = vocab.get('[UNK]')
none_token_id = vocab.get('[NONE]')
if feature:
token_feature = [vocab.get(feature, unk_token_id)]
else:
token_feature = [none_token_id]
return token_feature
def tokenize_title(feature):
split_feature = feature.split(" ")
vocab = title_vocab
unk_token_id = vocab.get('[UNK]')
none_token_id = vocab.get('[NONE]')
if feature:
token_feature = [vocab.get(x, unk_token_id) for x in split_feature]
else:
token_feature = [none_token_id]
return token_feature
# The flask app for serving predictions
app = flask.Flask(__name__)
@app.route('/ping', methods=['GET'])
def ping():
# Check if the classifier was loaded correctly
try:
_ = mag_model.get_layer('cls')
status = 200
except:
status = 400
return flask.Response(response= json.dumps(' '), status=status, mimetype='application/json' )
@app.route('/invocations', methods=['POST'])
def transformation():
# Get input JSON data and convert it to a DF
input_json = flask.request.get_json()
input_json = json.dumps(input_json)
input_df = pd.read_json(input_json, orient='records').reset_index()
# Tokenize data
input_df['title'] = input_df['title'].apply(lambda x: x.lower().strip())
input_df['paper_title_tok'] = input_df['title'].apply(tokenize_title)
input_df['doc_type_tok'] = input_df['doc_type'].apply(tokenize_feature, args=('doc_type',))
input_df['journal_tok'] = input_df['journal'].apply(tokenize_feature, args=('journal',))
paper_titles = tf.keras.preprocessing.sequence.pad_sequences(input_df['paper_title_tok'].to_list(), maxlen=64,
dtype='int64', padding='post',
truncating='post', value=0)
doc_types = tf.convert_to_tensor(input_df['doc_type_tok'].to_list())
journal = tf.convert_to_tensor(input_df['journal_tok'].to_list())
# Predict
model_output = mag_model([paper_titles, doc_types, journal])
scores = model_output.values.numpy()[0].tolist()
preds = model_output.indices.numpy()[0].tolist()
# Transform predicted labels into tags
all_tags = []
for score, pred in zip(scores, preds):
tags = []
scores = []
tag_ids = []
for i in range(25):
if score[i] >= 0.32:
tags.append(target_vocab_inv.get(pred[i]))
scores.append(score[i])
tag_ids.append(tag_id_vocab.get(pred[i]))
all_tags.append({"tags": tags, "scores": scores, "tag_ids": tag_ids})
# Transform predictions to JSON
result = json.dumps(all_tags)
return flask.Response(response=result, status=200, mimetype='application/json')
```
#### File: MAG-model-code/POC/mag_model_iteration_1.py
```python
import tensorflow as tf
import pandas as pd
import pickle
import os
import tensorflow_addons as tfa
from transformers import RobertaTokenizer, RobertaTokenizerFast, TFRobertaModel, TFAlbertModel
AUTO = tf.data.experimental.AUTOTUNE
# In[2]:
model_iteration = 'iteration_1'
# In[3]:
tf.config.list_physical_devices()
# In[4]:
with open(f"./{model_iteration}/vocab/topics_vocab.pkl", "rb") as f:
target_vocab = pickle.load(f)
with open(f"./{model_iteration}/vocab/doc_type_vocab.pkl", "rb") as f:
doc_vocab = pickle.load(f)
with open(f"./{model_iteration}/vocab/journal_name_vocab.pkl", "rb") as f:
journal_vocab = pickle.load(f)
# In[5]:
encoding_layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
max_tokens=len(target_vocab)+1, output_mode="binary", sparse=False)
# loss_fn = tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
loss_fn = tfa.losses.SigmoidFocalCrossEntropy(alpha=0.25, gamma=2.0,
reduction=tf.keras.losses.Reduction.NONE)
metric_1 = tf.keras.metrics.CategoricalAccuracy()
metric_2 = tf.keras.metrics.Recall()
metric_3 = tf.keras.metrics.Precision()
metric_4 = tf.keras.metrics.TopKCategoricalAccuracy(k=10)
# Eventually will use with focal loss
# In[6]:
class CustomModel(tf.keras.Model):
def train_step(self, inputs):
old_features, labels = inputs
labels = tf.RaggedTensor.from_tensor(labels, padding=0)
paper_titles = old_features[0][:,:512].to_tensor(shape=[None, 512])
paper_masks = old_features[1][:,:512].to_tensor(shape=[None, 512])
features = (paper_titles, paper_masks, old_features[2], old_features[3])
labels = encoding_layer(labels)
with tf.GradientTape() as tape:
predictions = self(features, training=True)
loss = loss_fn(labels, predictions)
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
metric_1.update_state(labels, predictions)
metric_2.update_state(labels, predictions)
metric_3.update_state(labels, predictions)
metric_4.update_state(labels, predictions)
return {"loss": loss,
"accuracy": metric_1.result(),
"recall": metric_2.result(),
"precision": metric_3.result(),
"topK15": metric_4.result()}
def test_step(self, inputs):
old_features, labels = inputs
labels = tf.RaggedTensor.from_tensor(labels, padding=0)
paper_titles = old_features[0][:,:512].to_tensor(shape=[None, 512])
paper_masks = old_features[1][:,:512].to_tensor(shape=[None, 512])
features = (paper_titles, paper_masks, old_features[2], old_features[3])
labels = encoding_layer(labels)
with tf.GradientTape() as tape:
predictions = self(features, training=False)
loss = loss_fn(labels, predictions)
metric_1.update_state(labels, predictions)
metric_2.update_state(labels, predictions)
metric_3.update_state(labels, predictions)
metric_4.update_state(labels, predictions)
return {"loss": loss,
"accuracy": metric_1.result(),
"recall": metric_2.result(),
"precision": metric_3.result(),
"topK15": metric_4.result()}
@property
def metrics(self):
return [metric_1, metric_2, metric_3]
# In[7]:
def _parse_function(example_proto):
feature_description = {
'paper_title': tf.io.RaggedFeature(tf.int64),
'paper_mask': tf.io.RaggedFeature(tf.int64),
'journal': tf.io.FixedLenFeature((1,), tf.int64),
'doc_type': tf.io.FixedLenFeature((1,), tf.int64),
'targets': tf.io.FixedLenFeature((20,), tf.int64)
}
example = tf.io.parse_single_example(example_proto, feature_description)
paper_title = example['paper_title']
paper_mask = example['paper_mask']
doc_type = example['doc_type']
journal = example['journal']
targets = example['targets']
return (paper_title, paper_mask, doc_type, journal), targets
# In[8]:
def get_dataset(path, data_type='train'):
tfrecords = [f"{path}{data_type}/{x}" for x in os.listdir(f"{path}{data_type}/") if x.endswith('tfrecord')]
tfrecords.sort()
raw_dataset = tf.data.TFRecordDataset(tfrecords[:25], num_parallel_reads=AUTO)
parsed_dataset = raw_dataset.map(_parse_function, num_parallel_calls=AUTO)
parsed_dataset = parsed_dataset .apply(tf.data.experimental.dense_to_ragged_batch(256, drop_remainder=True)).shuffle(1024)
return parsed_dataset.prefetch(AUTO)
# In[9]:
file_path = f'./{model_iteration}/tfrecords/'
# In[10]:
train_ds = get_dataset(file_path, 'train')
val_ds = get_dataset(file_path, 'val')
# In[11]:
mirrored_strategy = tf.distribute.MirroredStrategy()
# In[12]:
with mirrored_strategy.scope():
# model = TFAlbertModel.from_pretrained('albert-base-v2')
# model.layers[0].trainable = False
# Model Inputs
paper_title_input_ids = tf.keras.layers.Input((512,), dtype=tf.int64, name='paper_title_ids')
paper_title_att_mask = tf.keras.layers.Input((512,), dtype=tf.int64, name='paper_title_mask')
doc_type_id = tf.keras.layers.Input((1,), dtype=tf.int64, name='doc_type_id')
journal_id = tf.keras.layers.Input((1,), dtype=tf.int64, name='journal_id')
# Using HF Model for Title Representation
# paper_title_embs = model(input_ids = paper_title_input_ids,
# attention_mask=paper_title_att_mask,
# output_hidden_states=True,
# training=False).last_hidden_state
# Embedding Layers
paper_title_embs = tf.keras.layers.Embedding(input_dim=30001,
output_dim=512,
mask_zero=False,
trainable=True,
name="title_embedding")(paper_title_input_ids)
doc_embs = tf.keras.layers.Embedding(input_dim=len(doc_vocab)+1,
output_dim=32,
mask_zero=False,
name="doc_type_embedding")(doc_type_id)
journal_embs = tf.keras.layers.Embedding(input_dim=len(journal_vocab)+1,
output_dim=128,
mask_zero=False,
name="journal_embedding")(journal_id)
# First layer
dense_output = tf.keras.layers.Dense(1024, activation='relu',
kernel_regularizer='L2', name="dense_1")(paper_title_embs)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_1")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_1")(dense_output)
dense_output_flat = tf.keras.layers.GlobalAveragePooling1D(name="title_pooling_layer")(dense_output)
doc_flat = tf.keras.layers.GlobalAveragePooling1D(name="doc_pooling_layer")(doc_embs)
journal_flat = tf.keras.layers.GlobalAveragePooling1D(name="journal_pooling_layer")(journal_embs)
concat_output = tf.concat(values=[dense_output_flat, journal_flat, doc_flat], axis=1)
# Second layer
dense_output = tf.keras.layers.Dense(1024, activation='relu',
kernel_regularizer='L2', name="dense_2")(concat_output)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_2")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_2")(dense_output)
# Third Layer
dense_output = tf.keras.layers.Dense(256, activation='relu',
kernel_regularizer='L2', name="dense_3")(dense_output)
dense_output = tf.keras.layers.Dropout(0.20, name="dropout_3")(dense_output)
dense_output = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_norm_3")(dense_output)
# dense_output_flat = tf.keras.layers.GlobalAveragePooling1D(name="title_pooling_layer")(dense_output)
# Output Layer
final_output = tf.keras.layers.Dense(len(target_vocab)+1, activation="sigmoid",
name="cls")(dense_output)
test_model = CustomModel(inputs=[paper_title_input_ids, paper_title_att_mask, doc_type_id, journal_id],
outputs=final_output, name='test_model')
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# In[13]:
test_model.compile(optimizer=optimizer)
# In[14]:
test_model.summary()
# In[15]:
callbacks = [tf.keras.callbacks.ModelCheckpoint(f'./models/{model_iteration}/{model_iteration}_first_try',
save_best_only=False, save_weights_only=False)]
# ## First try (with all variables and Albert model output)
# In[ ]:
history = test_model.fit(train_ds, epochs=1, validation_data=val_ds, verbose=1, callbacks=callbacks)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# ## ARCHIVE: Baseline Second Try (trainable embeddings)
# In[23]:
history = test_model.fit(train_ds, epochs=5, validation_data=val_ds, verbose=1, callbacks=callbacks)
# In[ ]:
# In[ ]:
# In[ ]:
``` |
{
"source": "jpbarrette/itables",
"score": 2
} |
#### File: itables/tests/test_downsample.py
```python
import pytest
import itertools
import pandas as pd
from itables.downsample import downsample
def large_tables(N=1000):
return [pd.DataFrame(5, columns=range(N), index=range(N)),
pd.DataFrame(3.14159, columns=range(N), index=range(N)),
pd.DataFrame("abcdefg", columns=range(N), index=range(N))]
@pytest.mark.parametrize('df,max_rows', itertools.product(large_tables(), [99, 100]))
def test_max_rows(df, max_rows):
dn = downsample(df, max_rows=max_rows)
assert len(dn.index) == max_rows
pd.testing.assert_index_equal(dn.columns, df.columns)
@pytest.mark.parametrize('df,max_columns', itertools.product(large_tables(), [99, 100]))
def test_max_columns(df, max_columns):
dn = downsample(df, max_columns=max_columns)
pd.testing.assert_index_equal(dn.index, df.index)
assert len(dn.columns) == max_columns
@pytest.mark.parametrize('df,max_bytes', itertools.product(large_tables(), [10, 1e2, 1e3, 1e4, 1e5]))
def test_max_bytes(df, max_bytes):
dn = downsample(df, max_bytes=max_bytes)
assert dn.values.nbytes <= max_bytes
assert dn.values.nbytes > max_bytes / 2
@pytest.mark.parametrize('df', large_tables())
def test_max_one_byte(df, max_bytes=1):
dn = downsample(df, max_bytes=max_bytes)
assert len(dn.columns) == len(dn.index) == 1
assert dn.iloc[0, 0] == '...'
``` |
{
"source": "jpbarrette/moman",
"score": 3
} |
#### File: finenight/python/error.py
```python
class Error(Exception):
def __init__(self, string):
self.string = string
def __str__(self):
return self.string
class StateError(Error):
"""This error is raised when a state is invalid"""
class AlphabetError(Error):
"""This error is raised when the alphabet of a FSA is invalid"""
class ConstructionError(Error):
"""This error is raised when we encounter a problem when
construction a FSA.
"""
class NotImplemented(Error):
"""This error is raised when the implementation of the function
is incomplete
"""
``` |
{
"source": "jp-barron/Susi_Simulation",
"score": 2
} |
#### File: Susi_Simulation/tools/fei4_tdc_analysis.py
```python
import tables as tb
import numpy as np
import progressbar
import os
import math
import sys
import glob
from pybar.analysis.analyze_raw_data import AnalyzeRawData
import pybar.scans.analyze_source_scan_tdc_data as tdc_analysis
def analyze_hits(input_file_hits):
with AnalyzeRawData(raw_data_file=None, analyzed_data_file=input_file_hits) as analyze_raw_data:
analyze_raw_data.create_source_scan_hist = True
analyze_raw_data.create_cluster_hit_table = True
analyze_raw_data.create_cluster_table = True
analyze_raw_data.create_cluster_size_hist = True
analyze_raw_data.create_cluster_tot_hist = True
analyze_raw_data.create_tdc_hist = True
analyze_raw_data.analyze_hit_table(analyzed_data_out_file=input_file_hits[:-3] + '_analyzed.h5')
analyze_raw_data.plot_histograms(pdf_filename=input_file_hits[:-3], analyzed_data_file=input_file_hits[:-3] + '_analyzed.h5')
def analyze_tdc(hit_file, calibration_filename=None, col_span=[5, 75], row_span=[10, 320]):
# Data files
hit_cut_file = hit_file[:-3] + '_cut_hits.h5'
hit_cut_analyzed_file = hit_file[:-3] + '_cut_hits_analyzed.h5'
# Selection criterions
hit_selection = '(column > %d) & (column < %d) & (row > %d) & (row < %d)' % (col_span[0] + 1, col_span[1] - 1, row_span[0] + 5, row_span[1] - 5) # deselect edge pixels for better cluster size cut
hit_selection_conditions = ['(n_cluster==1)', '(n_cluster==1) & (cluster_size == 1)', '(n_cluster==1) & (cluster_size == 1) & ((tot > 12) | ((TDC * 1.5625 - tot * 25 < 100) & (tot * 25 - TDC * 1.5625 < 100))) & %s' % hit_selection]
event_status_select_mask = 0b0000111111111111
event_status_condition = 0b0000000100000000 # trigger, tdc word and perfect event structure required
tdc_analysis.histogram_tdc_hits(hit_file, hit_selection_conditions, event_status_select_mask, event_status_condition, calibration_filename, max_tdc=1500, n_bins=1000)
if __name__ == "__main__":
arguments = sys.argv
if len(arguments) < 2:
print 'Please provide the base file name of the root data files (e.g. threshold_ for threshold_2000.root)'
raise SystemExit
base_file_name = arguments[1]
calibration_filename = r'/home/davidlp/geant4/SourceSim-build/converter/calibration_data/hit_or_calibration_calibration.h5'
file_names = glob.glob(base_file_name + '*_interpreted.h5')
file_names.sort()
for file_name in file_names:
analyze_hits(file_name)
file_names = glob.glob(base_file_name + '*_interpreted_analyzed.h5')
file_names.sort()
for file_name in file_names:
analyze_tdc(file_name, calibration_filename)
``` |
{
"source": "jpbarto/amazon-sagemaker-stock-prediction",
"score": 3
} |
#### File: amazon-sagemaker-stock-prediction/notebooks/deepar_util.py
```python
import io
import math
import json
import s3fs
import boto3
import datetime
import pandas as pd
import numpy as np
import sagemaker
import matplotlib
import matplotlib.pyplot as plt
# Function to Format DBG stock market data into a format suitable for DeepAR algorithm
def deeparize(stockdata, stocksymbols, interval, metrices = None):
data_feed = pd.DataFrame()
data_feed['CalcDateTime'] = pd.to_datetime(pd.Series(sorted(list(stockdata.CalcDateTime.unique()))),infer_datetime_format=True)
data_feed.index = data_feed['CalcDateTime']
data_feed.drop('CalcDateTime', axis=1, inplace = True)
for mnemonic in stocksymbols:
mnemonic_data = stockdata[stockdata.Mnemonic == mnemonic].copy()
mnemonic_data.index = mnemonic_data['CalcDateTime']
mnemonic_data = mnemonic_data.sort_index()
mnemonic_data = mnemonic_data.iloc[:,-6:]
if metrices is None:
metrices = mnemonic_data.columns.values
for col in metrices:
metric_col = mnemonic_data[col].to_frame()
metric_col.columns = ["{}-{}".format(mnemonic,col)]
data_feed = data_feed.add(metric_col, fill_value=0)
data_feed = data_feed.resample(interval).mean()
data_feed.fillna(method='backfill', limit=1, inplace=True)
data_feed.fillna(method='ffill', inplace=True)
data_feed.fillna(value=0, inplace=True)
return data_feed
# Function to load resampled stock data from a specified S3 location
def load_resampled_from_s3(interval, bucket, s3_data_key, mnemonics=None, metrices = None):
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=bucket, Key="{}/{}/resampled_stockdata.csv".format(s3_data_key, interval))
loaded = pd.read_csv(io.BytesIO(obj['Body'].read()), parse_dates=True)
if mnemonics is None:
mnemonics = list(loaded.Mnemonic.unique())
return deeparize(loaded, mnemonics, interval, metrices), mnemonics
# Function to plot specified metrices for specified stock, each separate plot
def metrics_plot(mnemonics, metrics = None, data=None, interval = None, bucket = None, s3_key = None):
if data is None and interval is not None and bucket is not None and s3_key is not None:
data, symbols = load_resampled_from_s3(interval, bucket, s3_key)
if metrics is None:
metrics = ['MinPrice', 'MaxPrice', 'StartPrice', 'EndPrice', 'TradedVolume', 'NumberOfTrades']
fig, axs = plt.subplots(math.ceil((len(metrics) * len(mnemonics))/3), 3, figsize=(20, 20), sharex=True)
axx = axs.ravel()
i = 0
for mnemonic in mnemonics:
for metric in metrics:
data["{}-{}".format(mnemonic,metric)].head()
data["{}-{}".format(mnemonic,metric)].plot(ax=axx[i])
axx[i].set_xlabel("date")
axx[i].set_ylabel("{}-{}".format(mnemonic,metric))
axx[i].grid(which='minor', axis='x')
axx[i].set_xticklabels(data.index, rotation=90)
i = i+1
# Function to plot specified metrices for specified stock, all superimposed on a single plot
matplotlib.rcParams['figure.figsize'] = (25, 17) # use bigger graphs
def timeseries_plot(mnemonics, metrics, data=None, interval = None, bucket = None, s3_key = None):
if data is None and interval is not None and bucket is not None and s3_key is not None:
data, symbols = load_resampled_from_s3(interval, bucket, s3_key)
ax = None
for mnemonic in mnemonics:
selected = pd.DataFrame()
selected['CalcDateTime'] = pd.Series(sorted(list(data.index.unique())))
selected.index = selected['CalcDateTime']
selected = selected.sort_index()
selected.drop('CalcDateTime', axis=1, inplace = True)
for metric in metrics:
selected[metric] = data["{}-{}".format(mnemonic,metric)]
selected_columns = list(selected.columns)
for i, column in enumerate(selected_columns):
selected_columns[i] = "{}-{}".format(mnemonic, column)
selected.columns = selected_columns
ax = selected.plot( ax = ax)
ax.set_xticklabels(data.index, rotation=90)
# Function to convert data frames containing time series data to JSON serialized data that DeepAR works with
def json_serialize(data, start, end, target_column, covariate_columns, interval):
timeseries = {}
for i, col in enumerate(data.columns):
metric = col[col.find('-')+1:]
stock = col[:col.find('-')]
if metric == target_column:
if stock in timeseries.keys():
timeseries[stock]["target"] = data.iloc[:,i][start:end]
else:
timeseries[stock] = {}
timeseries[stock]["start"] = str(pd.Timestamp(datetime.datetime.strptime(str(start), "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d %H:%M:%S"), freq = interval))
timeseries[stock]["target"] = data.iloc[:,i][start:end]
print("Time series for {} added".format(stock))
elif metric in covariate_columns:
if stock in timeseries.keys():
if "dynamic_feat" in timeseries[stock]:
dynamic_feat = timeseries[stock]["dynamic_feat"]
dynamic_feat.append(data.iloc[:,i][start:end])
else:
dynamic_feat = []
dynamic_feat.append(data.iloc[:,i][start:end])
timeseries[stock]["dynamic_feat"] = dynamic_feat
else:
timeseries[stock] = {}
dynamic_feat = []
dynamic_feat.append(data.iloc[:,i])
timeseries[stock]["dynamic_feat"] = dynamic_feat
print("Dynamic Feature - {} for {} added".format(metric, stock))
else:
pass
json_data = [
{
"start": ts["start"],
"target": ts["target"].tolist(),
"dynamic_feat": [feat.tolist() for feat in ts["dynamic_feat"]]
}
for ts in timeseries.values()
]
return json_data
# Function to first split the data into training and test sets, and then to JSON serialize both sets
def generate_train_test_set(data, target_column, covariate_columns, interval, train_test_split=0.9, num_test_windows=4):
num_samples = len(data.index.values)
num_train = int(train_test_split * num_samples)
num_test = int((num_samples - num_train)/num_test_windows)
print("Sample Size = {}, Training Set: {}, Test Set: {} * {}".format(num_samples, num_train, num_test_windows, num_test))
train_start_dt = data.index[0]
train_end_dt = data.index[num_train - 1]
print("Training Set: Starts at - {}, Ends at - {}".format(train_start_dt, train_end_dt))
train_data = json_serialize(data, train_start_dt, train_end_dt, target_column, covariate_columns, interval)
test_data = []
test_start_date = train_start_dt
for i in range(num_test_windows):
test_end_dt = data.index.values[num_train + i*num_test - 1]
test_data.extend(json_serialize(data, test_start_date, test_end_dt, target_column, covariate_columns, interval))
return train_data, test_data, train_start_dt, train_end_dt
#Function to write JSON serialized training and test data into S3 bucket, which will later be fed to training container
def write_dicts_to_file(data, interval, bucket, path, channel):
fs = s3fs.S3FileSystem()
with fs.open("{}/{}/{}/{}/{}.json".format(bucket, path, interval, channel, channel), 'wb') as fp:
for d in data:
fp.write(json.dumps(d).encode("utf-8"))
fp.write("\n".encode('utf-8'))
return "s3://{}/{}/{}/{}/".format(bucket, path, interval, channel)
# Class that allows making requests using pandas Series objects rather than raw JSON strings
class DeepARPredictor(sagemaker.predictor.RealTimePredictor):
def __init__(self, *args, **kwargs):
super().__init__(*args, content_type=sagemaker.content_types.CONTENT_TYPE_JSON, **kwargs)
def predict(self, ts, cat=None, dynamic_feat=None,
num_samples=100, return_samples=False, quantiles=["0.1", "0.5", "0.9"]):
"""Requests the prediction of for the time series listed in `ts`, each with the (optional)
corresponding category listed in `cat`.
ts -- `pandas.Series` object, the time series to predict
cat -- integer, the group associated to the time series (default: None)
num_samples -- integer, number of samples to compute at prediction time (default: 100)
return_samples -- boolean indicating whether to include samples in the response (default: False)
quantiles -- list of strings specifying the quantiles to compute (default: ["0.1", "0.5", "0.9"])
Return value: list of `pandas.DataFrame` objects, each containing the predictions
"""
prediction_time = ts.index[-1] + 1
quantiles = [str(q) for q in quantiles]
req = self.__encode_request(ts, cat, dynamic_feat, num_samples, return_samples, quantiles)
res = super(DeepARPredictor, self).predict(req)
return self.__decode_response(res, ts.index.freq, prediction_time, return_samples)
def __encode_request(self, ts, cat, dynamic_feat, num_samples, return_samples, quantiles):
instance = series_to_dict(ts, cat if cat is not None else None, dynamic_feat if dynamic_feat else None)
configuration = {
"num_samples": num_samples,
"output_types": ["quantiles", "samples"] if return_samples else ["quantiles"],
"quantiles": quantiles
}
http_request_data = {
"instances": [instance],
"configuration": configuration
}
return json.dumps(http_request_data).encode('utf-8')
def __decode_response(self, response, freq, prediction_time, return_samples):
# we only sent one time series so we only receive one in return
# however, if possible one will pass multiple time series as predictions will then be faster
predictions = json.loads(response.decode('utf-8'))['predictions'][0]
prediction_length = len(next(iter(predictions['quantiles'].values())))
prediction_index = pd.DatetimeIndex(start=prediction_time, freq=freq, periods=prediction_length)
if return_samples:
dict_of_samples = {'sample_' + str(i): s for i, s in enumerate(predictions['samples'])}
else:
dict_of_samples = {}
return pd.DataFrame(data={**predictions['quantiles'], **dict_of_samples}, index=prediction_index)
def set_frequency(self, freq):
self.freq = freq
def encode_target(ts):
return [x if np.isfinite(x) else "NaN" for x in ts]
def series_to_dict(ts, cat=None, dynamic_feat=None):
"""Given a pandas.Series object, returns a dictionary encoding the time series.
ts -- a pands.Series object with the target time series
cat -- an integer indicating the time series category
Return value: a dictionary
"""
obj = {"start": str(ts.index[0]), "target": encode_target(ts)}
if cat is not None:
obj["cat"] = cat
if dynamic_feat is not None:
obj["dynamic_feat"] = dynamic_feat
return obj
# Function to create a data structure to invoke prediction for a given stock and within a given time range
def query_for_stock(stock_to_predict, target_column, covariate_columns, data, prediction_length, start = None, end = None):
if start is None:
start = data.index.values[0]
if end is None:
end = data.index.values[-1]
startloc = data.index.get_loc(start)
endloc = data.index.get_loc(end)
stockts = None
ts = None
dynamic_feat = []
for i, col in enumerate(data.columns):
stock = col[:col.find('-')]
metric = col[col.find('-')+1:]
if stock == stock_to_predict:
if metric == target_column:
ts = data.iloc[:,i][startloc:endloc-prediction_length]
stockts = data.iloc[:,i][:]
print("Time series - {} for {} selected".format(metric, stock))
elif metric in covariate_columns:
dynamic_feat.append(data.iloc[:,i][startloc:endloc].tolist())
print("Dynamic Feature - {} for {} selected".format(metric, stock))
else:
pass
return ts, dynamic_feat, stockts
def plot_predicted_observed_at_quantile(ts, observed, predicted, quantile):
ax = None
ax = observed.plot( ax = ax, legend=True, label="Given" )
ax.set_xticklabels(observed.index, rotation=90)
#for col in prediction.columns:
predicted = ts.append(predicted[quantile])
predicted.plot(ax = ax, legend=True, label="Predicted")
def plot(
predictor,
stock_id,
mnemonics,
target_ts,
target_column,
covariate_columns,
prediction_length,
plot_history,
cat=None,
dynamic_feat=None,
forecast_date=None,
show_samples=False,
confidence=75
):
if forecast_date is None:
forecast_date = target_ts.index[-1]
print("calling served model to generate predictions starting from {}".format(str(forecast_date)))
assert(confidence > 50 and confidence < 100)
low_quantile = 0.5 - confidence * 0.005
up_quantile = confidence * 0.005 + 0.5
ts, dynamic_feat, stockts = query_for_stock(mnemonics[stock_id], target_column, covariate_columns, target_ts, prediction_length, end=forecast_date)
args = {
"ts": ts,
"return_samples": show_samples,
"quantiles": [low_quantile, 0.5, up_quantile],
"num_samples": 100
}
if dynamic_feat is not None:
args["dynamic_feat"] = dynamic_feat
fig = plt.figure(figsize=(20, 6))
ax = plt.subplot(2, 1, 1)
else:
fig = plt.figure(figsize=(20, 3))
ax = plt.subplot(1,1,1)
if cat is not None:
args["cat"] = cat
ax.text(0.9, 0.9, 'cat = {}'.format(cat), transform=ax.transAxes)
# call the end point to get the prediction
prediction = predictor.predict(**args)
# plot the samples
if show_samples:
for key in prediction.keys():
if "sample" in key:
prediction[key].plot(color='lightskyblue', alpha=0.2, label='_nolegend_')
# plot the target
target_section = stockts[forecast_date-plot_history:forecast_date+prediction_length]
target_section.plot(color="black", label='target')
# plot the confidence interval and the median predicted
ax.fill_between(
prediction[str(low_quantile)].index,
prediction[str(low_quantile)].values,
prediction[str(up_quantile)].values,
color="b", alpha=0.3, label='{}% confidence interval'.format(confidence)
)
prediction["0.5"].plot(color="b", label='P50')
ax.legend(loc=2)
# fix the scale as the samples may change it
#ax.set_ylim(target_section.min() * 0.5, target_section.max() * 1.5)
ax.set_ylim(ts.min(), ts.max())
'''
if dynamic_feat is not None:
for i, f in enumerate(dynamic_feat, start=1):
ax = plt.subplot(len(dynamic_feat) * 2, 1, len(dynamic_feat) + i, sharex=ax)
feat_ts = pd.Series(
index=pd.DatetimeIndex(start=target_ts.index[0], freq=target_ts.index.freq, periods=len(f)),
data=f
)
feat_ts[forecast_date-plot_history:forecast_date+prediction_length].plot(ax=ax, color='g')
'''
``` |
{
"source": "jpbarto/aws_automation_workshop",
"score": 2
} |
#### File: lab3_compliance_enforcement/src/remediate_entities.py
```python
import boto3
policy_arn = 'arn:aws:iam::012345678901:policy/ManagedPolicy'
iam = boto3.client ('iam')
def remediate_role (rolename):
iam.attach_role_policy (RoleName = rolename, PolicyArn = policy_arn)
return True
def handler (event, context):
print ("Processing event: {}".format (event))
event_detail = {}
if 'detail' in event:
event_detail = event['detail']
report = {
'Enforced': False
}
if 'eventName' in event_detail:
print ("Processing event {}".format (event_detail['eventName']))
if 'eventName' in event_detail and event_detail['eventName'] == 'CreateRole':
rolename = event_detail['requestParameters']['roleName']
report['RoleName'] = rolename
report['Enforced'] = remediate_role (rolename)
else:
report['Reason'] = 'No role creation found'
print (report)
return report
```
#### File: lab4_compliance_report/src/check_policy_enforcement.py
```python
import boto3
from datetime import datetime
import json
cfg = boto3.client ('config')
iam = boto3.client ('iam')
COMPLIANCE_STATES = {
'COMPLIANT': 'COMPLIANT',
'NON_COMPLIANT': 'NON_COMPLIANT',
'NOT_APPLICABLE': 'NOT_APPLICABLE'
}
policy_arn = 'arn:aws:iam::012345678901:policy/ManagedPolicy'
# Checks whether the invoking event is ScheduledNotification
def is_scheduled (event):
return (event['messageType'] == 'ScheduledNotification')
def get_role_policies (rolename, marker = None):
policies = []
if marker is None:
policy_resp = iam.list_attached_role_policies (RoleName = rolename, MaxItems = 100)
else:
policy_resp = iam.list_attached_role_policies (RoleName = rolename, MaxItems = 100, Marker = marker)
marker = None
for policy in policy_resp['AttachedPolicies']:
policies.append (policy['PolicyArn'])
if 'IsTruncated' in policy_resp and policy_resp['IsTruncated']:
marker = policy_resp['Marker']
return (policies, marker)
def evaluate_role (rolename):
role_policies = []
(role_policies, marker) = get_role_policies (rolename)
while marker is not None:
(policies, marker) = get_role_policies (rolename, marker = marker)
role_policies += policies
if policy_arn in role_policies:
return COMPLIANCE_STATES['COMPLIANT']
return COMPLIANCE_STATES['NON_COMPLIANT']
def get_roles ():
token = None
resources = []
(rsrcs, token) = get_resources ('AWS::IAM::Role', next_token = token)
resources += rsrcs
while token is not None:
(rsrcs, token) = get_resources ('AWS::IAM::Role', next_token = token)
resources += rsrcs
return resources
def get_resources (rsrc_type, next_token):
if next_token is not None:
resp = cfg.list_discovered_resources (
resourceType = rsrc_type,
includeDeletedResources = False,
nextToken = next_token
)
else:
resp = cfg.list_discovered_resources (
resourceType = rsrc_type,
includeDeletedResources = False
)
rsrcs = resp['resourceIdentifiers']
next_token = None
if 'nextToken' in resp:
next_token = resp['nextToken']
return (rsrcs, next_token)
# Receives the event and context from AWS Lambda. You can copy this handler and use it in your own
# code with little or no change.
def handler (event, context):
print ("Processing event: {}".format (event))
return_message = 'Invoked for a notification other than Scheduled Notification... Ignoring.'
invoking_event = json.loads (event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads (event['ruleParameters'])
result_token = event['resultToken']
if (is_scheduled (invoking_event)):
evaluations = []
eval_time = datetime.now ()
roles = get_roles ()
for role in roles:
compliance = evaluate_role (role['resourceName'])
evaluations.append ({
'ComplianceResourceType': role['resourceType'],
'ComplianceResourceId': role['resourceId'],
'ComplianceType': compliance,
'OrderingTimestamp': eval_time
})
for i in range(0,len(evaluations),50):
cfg.put_evaluations (
Evaluations = evaluations[i:(i+50)],
ResultToken = result_token
)
return_message = "Evaluationed {} roles".format (len(roles))
print (return_message)
return return_message
``` |
{
"source": "jpbarto/cluster_python_eval",
"score": 2
} |
#### File: jpbarto/cluster_python_eval/node0.py
```python
import zmq
import time
import threading
import json
name = 'bob'
def cluster_manager (context, join_uri):
nodes = [name]
join_sock = context.socket (zmq.REP)
join_sock.bind (join_uri)
while True:
message = join_sock.recv ()
req = json.loads (message)
if 'type' in req and req['type'] == 'JOIN' and req['node'] not in nodes:
nodes.append (req['node'])
resp = {'type': 'ACK', 'nodes': nodes}
join_sock.send (json.dumps(resp))
ctx = zmq.Context (1)
thread = threading.Thread (target = cluster_manager, args = (ctx, 'tcp://*:5560'))
thread.start ()
``` |
{
"source": "jpbelleau/hdfs-over-ftp-slider",
"score": 2
} |
#### File: package/scripts/hdfsftp.py
```python
import sys
from resource_management import *
class HDFSFTP(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env):
import os
import params
env.set_params(params)
keystore_path = params.app_root + "/" + params.keystore_file
File(format("{params.log_dir}/hdfs-over-ftp.log"),
mode=0666,
owner=params.app_user,
group=params.user_group
)
TemplateConfig(format("{app_root}/log4j.xml"), owner = params.app_user, group = params.user_group)
#TemplateConfig(format("{app_root}/hdfs-over-ftp.properties"), owner = params.app_user, group = params.user_group)
PropertiesFile(format("{app_root}/hdfs-over-ftp.properties"),
properties = params.config['configurations']['hdfsftp'],
owner = params.app_user,
group = params.user_group
)
PropertiesFile(format("{app_root}/users.properties"),
properties = params.config['configurations']['usersprops'],
owner = params.app_user,
group = params.user_group
)
if not os.path.exists(keystore_path):
Execute(format("{hdfs_bin} dfs -get {keystore_in_hdfs} {keystore_path}"),
user=params.app_user)
File(keystore_path,
mode=0600,
group=params.app_user,
owner=params.user_group,
replace=False)
def start(self, env):
import params
env.set_params(params)
self.configure(env)
process_cmd = format("{java64_home}/jre/bin/java {params.java_opts} -Dcom.sun.management.jmxremote.port={params.jmxport} -Dcom.sun.management.jmxremote.rmi.port={params.jmxport} -cp {params.app_root}/:{params.app_root}/lib/* org.apache.hadoop.contrib.ftp.HdfsOverFtpServer --approot {params.app_root} > {params.log_dir}/hdfsftp-output.log 2>&1")
#user=params.app_user,
Execute(process_cmd,
logoutput=True,
wait_for_finish=False,
pid_file=params.pid_file
)
def stop(self, env):
import params
env.set_params(params)
def status(self, env):
import params
env.set_params(params)
check_process_status(params.pid_file)
if __name__ == "__main__":
HDFSFTP().execute()
``` |
{
"source": "jpbelleau/openaigym_pong",
"score": 3
} |
#### File: jpbelleau/openaigym_pong/pong_runner.py
```python
import argparse
import gym
import matplotlib.pyplot as plt
from matplotlib.pyplot import draw, pause
from random import choice
from pong_player import Player
# Runs Pong training, testing, and playing
# Step returns:
# Num Description
# 0 Observation - screen (210, 160, 3)
# 1 Reward (-1 [lost point], 0, or 1 [won point])
# 2 Game over (boolean)
# 3 {'ale.lives': 0}
# Actions:
# Type: Discrete(5)
# Num Action
# 0 NOOP
# 1 FIRE
# 2 RIGHT (for Pong - up)
# 3 LEFT (for Pong - down)
# 4 RIGHTFIRE
# 5 LEFTFIRE
class PRun:
def __init__(self, screenscale=2, showtesting=False):
# Reduce screen dimensions by this scale factor
# Reduce details for processing speed
self.screenscale = screenscale
# Show screens while running
self.showtesting = showtesting
# Number of output classes
self.num_classes = 3
# Number of stacked LSTM layers
self.num_layers = 1
# Zero disables
self.dropout = 0
# How many frames to run before training / playing
# At the start of a game
self.startup_frames = 10
# Current results - correct == wins / incorrect == losses
self.results = {"correct": 0, "incorrect": 0, "perccorrect": 0, "loss": 0.0}
def reformat(self, x1, fullscreen=False):
# Screens from files are partly reformatted already
if fullscreen == True:
# Trim based on fullscreen
# Summing all the colors, shape is (210,160)
y1 = x1.sum(axis=2)
# True/False, background is False, 233 is background color
y2 = y1 != 233
y3 = y2.astype(int)
x1 = y3[
34:194,
]
y4 = x1[:, 16:144] # original shape is 160,128 - this has opp paddle
# y4 = x1[:,20:144] # shape is 160,124 - no opp paddle
r, c = y4.shape
y5 = y4.reshape(
r // self.screenscale,
self.screenscale,
c // self.screenscale,
self.screenscale,
)
y6 = y5.transpose([0, 2, 1, 3])
y7 = y6.sum(axis=(2, 3))
y8 = y7 != 0
y9 = y8.astype(float)
return y9
def startup(self, env, n, player, reset=True, lastscreen=None):
# Run a few frames (for the beginning of the point)
# Keep these for normalizing the number of sequences later
# Only reset if no previous screen (new game)
if reset == False:
x1 = lastscreen
else:
x1 = env.reset()
for _ in range(n):
act = choice([0, 2, 3])
# Associate the screen we saw with the action taken
player.store(self.reformat(x1, fullscreen=True), act)
x1, _, _, _ = env.step(act)
# Return the last screen
return x1
def setvars(
self,
subcommand,
numpoints=0,
loadpath=None,
learnrate=0.001,
savepath=None,
savebasename=None,
loadmodel=None,
randfiles=False,
numgames=0,
rendergame=False,
forcecpu=False,
):
# Bring in settings
self.subcommand = subcommand
self.numpoints = numpoints
self.loadpath = loadpath
self.learnrate = learnrate
self.savepath = savepath
self.savebasename = savebasename
self.loadmodel = loadmodel
self.randfiles = randfiles
self.numgames = numgames
self.rendergame = rendergame
self.forcecpu = forcecpu
self.numepochs = 0
if self.numpoints > 0:
self.numepochs = self.numpoints
elif self.numgames > 0:
self.numepochs = self.numgames
else:
print("Both number of points and number of games cannot be zero")
return False
return True
def run(self):
# Run the proper function based on the subcommand
# So far, no frame skip has performed better than the standard
# In test / train this is used to get the original screen size
env = gym.make("PongNoFrameskip-v0")
# Instantiate player
pplayer = Player()
numimgseq = pplayer.numimgseq
if self.startup_frames < numimgseq:
print(
"Startup frames needs to be larger than the number of sequences per batch"
)
return False
screen = self.startup(env, self.startup_frames, pplayer)
imgw = pplayer.mem[0][0].shape[0]
imgh = pplayer.mem[0][0].shape[1]
if self.subcommand != "play":
env.close()
hidden_size = int(imgw * imgh)
pplayer.initnet(
num_classes=self.num_classes,
imgw=imgw,
imgh=imgh,
hidden_size=hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
learnrate=self.learnrate,
modelfile=self.loadmodel,
forcecpu=self.forcecpu,
)
if self.showtesting == True:
im = plt.imshow(self.reformat(screen, fullscreen=True))
if self.subcommand != "play":
# Load training data from files
if pplayer.loaddata(self.loadpath, self.randfiles, self.numpoints) == False:
return False
righttot = wrongtot = 0
pointsfortot = pointsvstot = 0
if self.subcommand == "train":
print(
"Num point Right Wrong Ratio RightTotal WrongTotal RatioTotal Alpha Loss"
)
elif self.subcommand == "train":
print(
"Num point Right Wrong Ratio RightTotal WrongTotal RatioTotal"
)
elif self.subcommand == "play":
print(
"Num game Me Opp MeTotal OppTotal RatioTotal MvHoldTotal MvUpTotal MvDownTotal"
)
# Loop per point (train / test) or game (play) depending on command
for i in range(self.numepochs):
numpoints = i + 1
if self.subcommand != "play":
# Prepare data
# Need to separate inputs (screens) and answers (actions)
meminputs = []
memanswers = []
for (img, act) in pplayer.alldata[i]:
if self.showtesting == True:
im.set_data(img)
draw()
pause(0.001)
meminputs.append(self.reformat(img))
memanswers.append(act)
# Run this point screen sequence
right, wrong = pplayer.replayscreens(
meminputs, memanswers, mode=self.subcommand
)
righttot += right
wrongtot += wrong
rat = right / (right + wrong)
rattot = righttot / (righttot + wrongtot)
if self.subcommand == "train":
print(
f"{numpoints:5d} {right:6d} {wrong:6d} {rat:3.3f} {righttot:9d} {wrongtot:6d} {rattot:5.3f} {pplayer.getalpha():6.3f} {pplayer.currloss:6.3f}"
)
self.results["loss"] = pplayer.currloss
elif self.subcommand == "test":
print(
f"{numpoints:5d} {right:6d} {wrong:6d} {rat:3.3f} {righttot:9d} {wrongtot:6d} {rattot:5.3f}"
)
self.results["correct"] = right
self.results["incorrect"] = wrong
self.results["perccorrect"] = rat
else:
# Gotta be play
pointsfor = pointsvs = 0
movedict = {"net": [0, 0, 0]}
# Point loop
while True:
screen = self.reformat(screen, fullscreen=True)
# Need to combine into a sequence
# Create as a subarray of a sequence of images
# Basically make this a batch size of 1
mvimgseq = []
mvimgseq.append([])
mvimgseq[0].append(screen)
for i in range(numimgseq - 1):
mvimgseq[0].insert(0, (pplayer.mem[i][0]))
if self.showtesting == True:
for img in mvimgseq:
im.set_data(img)
draw()
pause(0.001)
# Get the move form the neural net
act = pplayer.getmove(mvimgseq)
# Keeping track for how many hold, up, down actions
if act > 1:
movedict["net"][act - 1] += 1
else:
movedict["net"][act] += 1
# Store this screen to be used in future sequences
pplayer.store(screen, act)
# Render game if desired
if self.rendergame == True:
env.render()
# Send our action to the env and get the output screen
screen, x2, x3, _ = env.step(act)
# Point over?
if int(x2) != 0:
# End of the game?
if x3 == False:
if x2 < 0:
pointsvs += 1
else:
pointsfor += 1
# Clear the screen memory
pplayer.mem = []
print(
f'{numpoints:5d} {pointsfor:6d} {pointsvs:6d} ------ ------ --- {movedict["net"][0]:4d} {movedict["net"][1]:4d} {movedict["net"][2]:4d}'
)
# Prep for the next point
screen = self.startup(
env, numimgseq, pplayer, reset=False, lastscreen=screen
)
# End of the game?
if x3 == True:
pointsfortot += pointsfor
pointsvstot += pointsvs
rat = pointsfortot / (pointsfortot + pointsvstot)
print(
f'{numpoints:5d} {pointsfor:6d} {pointsvs:6d} {pointsfortot:9d} {pointsvstot:6d} {rat:3.3f} {movedict["net"][0]:4d} {movedict["net"][1]:4d} {movedict["net"][2]:4d}'
)
self.results["correct"] = pointsfortot
self.results["incorrect"] = pointsvstot
self.results["perccorrect"] = rat
break
# Prepare for next game
screen = self.startup(env, numimgseq, pplayer)
if self.savepath != None:
# Save the model
pplayer.savenet(self.savepath, self.savebasename)
if self.rendergame == True:
env.close()
return True
if __name__ == "__main__":
# Setup argument parser
parser = argparse.ArgumentParser(
description="Runs Pong training, testing, and playing"
)
subparsers = parser.add_subparsers(help="Mode subcommands", dest="subcommand")
# Subcommand train
parser_train = subparsers.add_parser("train", help="Training commands")
parser_train_r = parser_train.add_argument_group("required arguments")
parser_train_r.add_argument(
"--numpoints",
required=True,
type=int,
help="Number of point files to use for training",
)
parser_train_r.add_argument(
"--loadpath", required=True, help="Directory to load screen sequences"
)
parser_train_r.add_argument(
"--learnrate", required=True, type=float, help="Learning rate"
)
parser_train.add_argument("--savepath", help="Directory to save model")
parser_train.add_argument(
"--savebasename", help="Base file name for model and optium saves"
)
parser_train.add_argument(
"--randfiles", action="store_true", help="Randomize file selection"
)
parser_train.add_argument("--forcecpu", action="store_true", help="Only use CPU")
# Subcommand test
parser_test = subparsers.add_parser("test", help="Testing commands")
parser_test_r = parser_test.add_argument_group("required arguments")
parser_test_r.add_argument(
"--numpoints",
required=True,
type=int,
help="Number of point files to use for testing",
)
parser_test_r.add_argument(
"--loadpath", required=True, help="Directory to load screen sequences"
)
parser_test_r.add_argument(
"--loadmodel", required=True, help="Full path to model to test"
)
parser_test.add_argument(
"--randfiles", action="store_true", help="Randomize file selection"
)
parser_test.add_argument("--forcecpu", action="store_true", help="Only use CPU")
# Subcommand play
parser_play = subparsers.add_parser("play", help="Play commands")
parser_play_r = parser_play.add_argument_group("required arguments")
parser_play_r.add_argument(
"--numgames", required=True, type=int, help="Number of games to play"
)
parser_play_r.add_argument(
"--loadmodel", required=True, help="Full path to model to play"
)
parser_play.add_argument(
"--rendergame", action="store_true", help="Render the game while playing"
)
parser_play.add_argument("--forcecpu", action="store_true", help="Only use CPU")
args = parser.parse_args()
dargs = vars(args)
# Instantiate a runner
prunner = PRun(screenscale=2, showtesting=False)
# Setup variables
if prunner.setvars(**dargs) == False:
exit
# Run the command
if prunner.run() == False:
exit
``` |
{
"source": "jpbempel/spring-petclinic",
"score": 3
} |
#### File: spring-petclinic/scripts/percentiles.py
```python
import sys
output_filename = sys.argv[1]
input_files = sys.argv[2:]
def get_percentile(percentile, values):
count = len(values)
return values[int(count * percentile)]
latencies = []
percentiles = []
with open(output_filename, "w") as output_file:
headers = [input_file.replace('results_', '').replace('.csv', '') for input_file in input_files]
output_file.write(','.join(headers) + '\n')
for input_filename in input_files:
with open(input_filename) as input_file:
for line in input_file:
cols = line.split(',')
if cols[0] == "starttransfer": # header
continue
latencies.append(cols[0])
latencies.sort()
percentile_dist = [get_percentile(0.1, latencies),
get_percentile(0.2, latencies),
get_percentile(0.3, latencies),
get_percentile(0.4, latencies),
get_percentile(0.5, latencies),
get_percentile(0.6, latencies),
get_percentile(0.7, latencies),
get_percentile(0.8, latencies),
get_percentile(0.9, latencies),
get_percentile(0.95, latencies),
get_percentile(0.99, latencies)]
percentiles.append(percentile_dist)
for percentile_no in range(11): # number of percentiles
line_values = []
for input_no in range(len(percentiles)):
line_values.append(percentiles[input_no][percentile_no])
line = ','.join(line_values) + '\n'
output_file.write(line)
``` |
{
"source": "jpbernius/eat-api",
"score": 3
} |
#### File: eat-api/src/entities.py
```python
import json
import re
from typing import Dict, Optional, Sequence
class Dish:
def __init__(self, name, price, ingredients, dish_type):
self.name = name
try:
self.price = float(price)
except ValueError:
self.price = price
self.ingredients = ingredients
self.dish_type = dish_type
def __repr__(self):
if type(self.price) is not str:
return "%s %s %s: %.2f€" % (self.dish_type, self.name, str(sorted(self.ingredients)), self.price)
else:
return "%s %s %s: %s" % (self.dish_type, self.name, str(sorted(self.ingredients)), self.price)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.name == other.name
and self.price == other.price
and self.ingredients == other.ingredients
and self.dish_type == other.dish_type)
return False
def to_json_obj(self):
return {"name": self.name,
"price": self.price,
"ingredients": sorted(self.ingredients),
"dish_type": self.dish_type}
def __hash__(self):
# http://stackoverflow.com/questions/4005318/how-to-implement-a-good-hash-function-in-python
return (hash(self.name) << 1) ^ hash(self.price) ^ hash(frozenset(self.ingredients))
class Menu:
def __init__(self, menu_date, dishes):
self.menu_date = menu_date
self.dishes = dishes
def __repr__(self):
menu_str = str(self.menu_date) + ": " + str(self.dishes)
return menu_str
def __eq__(self, other):
if isinstance(other, self.__class__):
dishes_equal = set(self.dishes) == set(other.dishes)
date_equal = self.menu_date == other.menu_date
return dishes_equal and date_equal
return False
def remove_duplicates(self):
unique = []
seen = set()
for d in self.dishes:
if d not in seen:
unique.append(d)
seen.add(d)
self.dishes = unique
class Week:
def __init__(self, calendar_week, year, days):
self.calendar_week = calendar_week
self.year = year
self.days = days
def __repr__(self):
week_str = "Week %s-%s" % (self.year, self.calendar_week)
for day in self.days:
week_str += "\n %s" % day
return week_str
def to_json_obj(self):
return {"number": self.calendar_week, "year": self.year,
"days": [{"date": str(menu.menu_date), "dishes": [dish.to_json_obj() for dish in menu.dishes]} for menu in
self.days]}
def to_json(self):
week_json = json.dumps(
self.to_json_obj(),
ensure_ascii=False, indent=4)
return week_json
@staticmethod
def to_weeks(menus):
weeks = {}
for menu_key in menus:
menu = menus[menu_key]
menu_date = menu.menu_date
# get calendar week
calendar_week = menu_date.isocalendar()[1]
# get year of the calendar week. watch out that for instance jan 01 can still be in week 52 of the
# previous year
year_of_calendar_week = menu_date.year - 1 \
if calendar_week == 52 and menu_date.month == 1 else menu_date.year
# append menus to respective week
week = weeks.get(calendar_week, Week(calendar_week, year_of_calendar_week, []))
week.days.append(menu)
weeks[calendar_week] = week
return weeks
class Ingredients:
ingredient_lookup = {
"GQB" : "Certified Quality - Bavaria",
"MSC" : "Marine Stewardship Council",
"1" : "with dyestuff",
"2" : "with preservative",
"3" : "with antioxidant",
"4" : "with flavor enhancers",
"5" : "sulphured",
"6" : "blackened (olive)",
"7" : "waxed",
"8" : "with phosphate",
"9" : "with sweeteners",
"10" : "contains a source of phenylalanine",
"11" : "with sugar and sweeteners",
"13" : "with cocoa-containing grease",
"14" : "with gelatin",
"99" : "with alcohol",
"f" : "meatless dish",
"v" : "vegan dish",
"S" : "with pork",
"R" : "with beef",
"K" : "with veal",
"G" : "with poultry", # mediziner mensa
"W" : "with wild meat", # mediziner mensa
"L" : "with lamb", # mediziner mensa
"Kn" : "with garlic",
"Ei" : "with chicken egg",
"En" : "with peanut",
"Fi" : "with fish",
"Gl" : "with gluten-containing cereals",
"GlW" : "with wheat",
"GlR" : "with rye",
"GlG" : "with barley",
"GlH" : "with oats",
"GlD" : "with spelt",
"Kr" : "with crustaceans",
"Lu" : "with lupines",
"Mi" : "with milk and lactose",
"Sc" : "with shell fruits",
"ScM" : "with almonds",
"ScH" : "with hazelnuts",
"ScW" : "with Walnuts",
"ScC" : "with cashew nuts",
"ScP" : "with pistachios",
"Se" : "with sesame seeds",
"Sf" : "with mustard",
"Sl" : "with celery",
"So" : "with soy",
"Sw" : "with sulfur dioxide and sulfites",
"Wt" : "with mollusks",
}
"""A dictionary of all ingredients (from the Studentenwerk) with their description."""
fmi_ingredient_lookup = {
"Gluten" : "Gl",
"Laktose" : "Mi",
"Milcheiweiß" : "Mi",
"Milch" : "Mi",
"Ei" : "Ei",
"Hühnerei" : "Ei",
"Soja" : "So",
"Nüsse" : "Sc",
"Erdnuss" : "En",
"Sellerie" : "Sl",
"Fisch" : "Si",
"Krebstiere" : "Kr",
"Weichtiere" : "Wt",
"Sesam" : "Se",
"Senf" : "Sf",
}
mediziner_ingredient_lookup = {
"1" : "1",
"2" : "2",
"3" : "3",
"4" : "4",
"5" : "5",
"6" : "6",
"7" : "7",
"8" : "8",
"9" : "9",
"A" : "99",
"B" : "Gl",
"C" : "Kr",
"E" : "Fi",
"F" : "Fi",
"G" : "G",
"H" : "En",
"K" : "K",
"L" : "L",
"M" : "So",
"N" : "Mi",
"O" : "Sc",
"P" : "Sl",
"R" : "R",
"S" : "S",
"T" : "Sf",
"U" : "Se",
"V" : "Sw",
"W" : "W",
"X" : "Lu",
"Y" : "Ei",
"Z" : "Wt",
}
def __init__(self, location: str) -> None:
self.location = location
self.ingredient_set = set()
def _values_lookup(self, values: Sequence[str], lookup: Optional[Dict[str, str]]) -> None:
"""
Normalizes ingredients to the self.ingredient_lookup codes.
Args:
values: A sequence of ingredients codes.
lookup: If needed, a mapping from a canteen specific ingredients codes to the self.ingredient_lookup codes.
"""
for value in values:
# ignore empty values
if not value or value.isspace():
continue
if (not lookup and value not in self.ingredient_lookup) or (lookup and value not in lookup):
# sometimes the ‘,’ is missing between the ingredients (especially with IPP) and we try to split again
# with capital letters.
split_values = re.findall(r'[a-züöäA-ZÜÖÄ][^A-ZÜÖÄ]*', value)
if split_values:
self._values_lookup(split_values, lookup)
continue
else:
print("Unknown ingredient for " + self.location + " found: " + str(value))
continue
if lookup:
self.ingredient_set.add(lookup[value])
else:
self.ingredient_set.add(value)
def parse_ingredients(self, values: str) -> None:
"""
Parse and creates a normalized list of ingredients.
Args:
values: String with comma separated ingredients codes.
"""
values = values.strip()
split_values = values.split(',')
# check for special parser/ingredient translation required
if self.location == "fmi-bistro":
self._values_lookup(split_values, self.fmi_ingredient_lookup)
elif self.location == "mediziner-mensa":
self._values_lookup(split_values, self.mediziner_ingredient_lookup)
# default to the "Studentenwerk" ingredients
# "ipp-bistro" also uses the "Studentenwerk" ingredients since all
# dishes contain the same ingredients
else:
self._values_lookup(split_values, None)
def __hash__(self):
return hash(frozenset(self.ingredient_set))
```
#### File: eat-api/src/util.py
```python
from datetime import datetime
date_pattern = "%d.%m.%Y"
cli_date_format = "dd.mm.yyyy"
def parse_date(date_str):
return datetime.strptime(date_str, date_pattern).date()
def make_duplicates_unique(names_with_duplicates):
counts = [1] * len(names_with_duplicates)
checked_names = []
for i, name in enumerate(names_with_duplicates):
if name in checked_names:
counts[i] += 1
checked_names.append(name)
names_without_duplicates = names_with_duplicates
for i, count in enumerate(counts):
if count > 1:
names_without_duplicates[i] += " (%s)" % count
return names_without_duplicates
``` |
{
"source": "jpbernius/tumcsbot",
"score": 3
} |
#### File: tumcsbot/src/migrate.py
```python
import argparse
import sqlite3 as sqlite
def migrate(db_path: str, sql_script_path: str) -> None:
"""Apply the given sql_script to the given database."""
connection: sqlite.Connection = sqlite.connect(db_path)
cursor: sqlite.Cursor = connection.cursor()
with open(sql_script_path, 'r') as sql_script:
cursor.executescript(sql_script.read())
connection.commit()
connection.close()
def main() -> None:
argument_parser = argparse.ArgumentParser(description = __doc__)
argument_parser.add_argument(
'db_path', metavar = 'DB_PATH', help = 'path to the bot\'s database'
)
argument_parser.add_argument(
'script', metavar = 'SQL_SCRIPT', help = 'the migration script to execute'
)
args: argparse.Namespace = argument_parser.parse_args()
migrate(args.db_path, args.script)
print('Successfully applied migrations.')
if __name__ == '__main__':
main()
```
#### File: src/tests/test_client.py
```python
import unittest
from typing import Any, ClassVar, Dict, Optional
from tumcsbot.client import Client as TUMCSBotClient
class ClientGetUserIdsFromAttributeTest(unittest.TestCase):
class Client(TUMCSBotClient):
def __init__(self) -> None:
pass
def get_users(self, request: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
return get_users()
_client: ClassVar[Client]
@classmethod
def setUpClass(cls) -> None:
cls._client = cls.Client()
def test_get_user_ids_from_attribute(self) -> None:
self.assertEqual(
self._client.get_user_ids_from_attribute('not_existing_attribute', [1, 2, 3]), []
)
self.assertEqual(
self._client.get_user_ids_from_attribute('delivery_email', ['<EMAIL>']), [1]
)
self.assertEqual(
self._client.get_user_ids_from_attribute(
'delivery_email', ['<EMAIL>', '<EMAIL>']
),
[1, 3]
)
self.assertEqual(
self._client.get_user_ids_from_attribute(
'delivery_email', ['<EMAIL>', '<EMAIL>']
),
[1]
)
self.assertEqual(
self._client.get_user_ids_from_attribute(
'delivery_email', ['<EMAIL>', '<EMAIL>'], case_sensitive = False
),
[1, 3]
)
self.assertEqual(
self._client.get_user_ids_from_attribute('user_id', [1, 3]),
[1, 3]
)
self.assertEqual(
self._client.get_user_ids_from_attribute('user_id', [2, 3, 4], case_sensitive = False),
[2, 3]
)
self.assertEqual(
self._client.get_user_ids_from_attribute('full_name', ['abc']), [1, 2]
)
def test_get_user_ids_from_display_names(self) -> None:
self.assertEqual(
self._client.get_user_ids_from_attribute('full_name', ['abc']),
self._client.get_user_ids_from_display_names(['abc'])
)
self.assertEqual(
self._client.get_user_ids_from_attribute('full_name', ['aBc']),
self._client.get_user_ids_from_display_names(['aBc'])
)
def test_get_user_ids_from_emails(self) -> None:
self.assertEqual(
self._client.get_user_ids_from_attribute(
'delivery_email', ['<EMAIL>', '<EMAIL>'], case_sensitive = False
),
self._client.get_user_ids_from_emails(['<EMAIL>', '<EMAIL>'])
)
def get_users() -> Dict[str, Any]:
return {
'result': 'success',
'members': [
{
'delivery_email': '<EMAIL>',
'full_name': 'abc',
'user_id': 1,
},
{
'delivery_email': '<EMAIL>',
'full_name': 'abc',
'user_id': 2,
},
{
'delivery_email': '<EMAIL>',
'full_name': 'ghi',
'user_id': 3,
},
]
}
```
#### File: tumcsbot/plugins/archive_streams.py
```python
from inspect import cleandoc
from typing import Any, Dict, Iterable, List, Optional, Union
from tumcsbot.lib import split, validate_and_return_regex, Response
from tumcsbot.plugin import CommandPlugin
class ArchiveStreams(CommandPlugin):
plugin_name = 'archive_streams'
syntax = 'archive_streams <stream_regex>...'
description = cleandoc(
"""
Archive streams according to the given regular expressions, which have
to match the full stream name.
Note that only empty streams will be archived.
[administrator/moderator rights needed]
Example (note the quoting!):
```text
archive_streams "Test.*" "ABC \\d* class"
```
"""
)
def handle_message(
self,
message: Dict[str, Any],
**kwargs: Any
) -> Union[Response, Iterable[Response]]:
if not self.client.user_is_privileged(message['sender_id']):
return Response.admin_err(message)
stream_regexes: Optional[List[Any]] = split(
message['command'], converter = [validate_and_return_regex]
)
if stream_regexes is None or None in stream_regexes:
return Response.build_message(message, 'Found invalid regular expressions.')
response: List[str] = []
for stream_regex in stream_regexes:
streams: List[str] = self.client.get_streams_from_regex(stream_regex)
removed: int = 0
for stream in streams:
result: Dict[str, Any] = self.client.get_stream_id(stream)
if result['result'] != 'success':
continue
stream_id: int = result['stream_id']
# Check if stream is empty.
result = self.client.get_messages({
'anchor': 'oldest',
'num_before': 0,
'num_after': 1,
'narrow': [
{'operator': 'stream', 'operand': stream_id}
]
})
if result['result'] != 'success' or result['messages']:
continue
# Archive the stream: https://zulip.com/help/archive-a-stream
result = self.client.delete_stream(stream_id)
if result['result'] == 'success':
removed += 1
response.append('"%s" - found %d matching streams, removed %d'
% (stream_regex, len(streams), removed))
return Response.build_message(message, '\n'.join(response))
```
#### File: tumcsbot/plugins/help.py
```python
from inspect import cleandoc
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
from tumcsbot.lib import Response
from tumcsbot.plugin import PluginContext, CommandPlugin
class Help(CommandPlugin):
"""Provide a help command plugin."""
plugin_name = 'help'
syntax = 'help'
description = 'Post a help message to the requesting user.'
_help_overview_template: str = cleandoc(
"""
Hi {}!
Use `help <command name>` to get more information about a \
certain command.
Please consider that my command line parsing is comparable to \
the POSIX shell. So in order to preserve arguments containing \
whitespace characters from splitting, they need to be quoted. \
Special strings such as regexes containing backslash sequences \
may require single quotes instead of double quotes.
Currently, I understand the following commands:
{}
Have a nice day! :-)
"""
)
def __init__(self, plugin_context: PluginContext) -> None:
super().__init__(plugin_context)
self.help_info: List[Tuple[str, str, str]] = self._get_help_info(
plugin_context.command_plugin_classes
)
def handle_message(
self,
message: Dict[str, Any],
**kwargs: Any
) -> Union[Response, Iterable[Response]]:
command: str = message['command'].strip()
if not command:
return self._help_overview(message)
return self._help_command(message, command)
@staticmethod
def _format_description(description: str) -> str:
"""Format the usage description of a command."""
# Remove surrounding whitespace.
description.strip()
return description
@staticmethod
def _format_syntax(syntax: str) -> str:
"""Format the syntax string of a command."""
return '```text\n' + syntax.strip() + '\n```\n'
def _get_help_info(
self,
commands: List[Type[CommandPlugin]]
) -> List[Tuple[str, str, str]]:
"""Get help information from each command.
Return a list of tuples (command name, syntax, description).
"""
result: List[Tuple[str, str, str]] = []
for command in commands:
name: str = command.plugin_name
syntax_data, description_data = command.get_usage()
syntax: str = self._format_syntax(syntax_data)
description: str = self._format_description(description_data)
result.append((name, syntax, description))
# Sort by name.
return sorted(result, key = lambda tuple: tuple[0])
def _help_command(
self,
message: Dict[str, Any],
command: str
) -> Union[Response, Iterable[Response]]:
info_tuple: Optional[Tuple[str, str, str]] = None
for ituple in self.help_info:
if ituple[0] == command:
info_tuple = ituple
break
if info_tuple is None:
return Response.command_not_found(message)
help_message: str = '\n'.join(info_tuple[1:])
return Response.build_message(
message, help_message, msg_type = 'private', to = message['sender_email']
)
def _help_overview(
self,
message: Dict[str, Any]
) -> Union[Response, Iterable[Response]]:
# Get the command names.
help_message: str = '\n'.join(map(lambda tuple: '- ' + tuple[0], self.help_info))
return Response.build_message(
message,
self._help_overview_template.format(message['sender_full_name'], help_message),
msg_type = 'private',
to = message['sender_email']
)
```
#### File: tumcsbot/plugins/search.py
```python
import urllib.parse
from typing import Any, Dict, Iterable, Union
from tumcsbot.lib import Response
from tumcsbot.plugin import CommandPlugin
class Search(CommandPlugin):
plugin_name = 'search'
syntax = 'search <string>'
description = 'Get a url to a search for "string" in all public streams.'
msg_template: str = 'Hi, I hope that these search results may help you: {}'
path: str = '#narrow/streams/public/search/'
def handle_message(
self,
message: Dict[str, Any],
**kwargs: Any
) -> Union[Response, Iterable[Response]]:
# Get search string and quote it.
search: str = urllib.parse.quote(message['command'], safe = '')
# Fix strange behavior of Zulip which does not accept literal periods.
search = search.replace('.', '%2E')
# Get host url (removing trailing 'api/').
base_url: str = self.client.base_url[:-4]
# Build the full url.
url: str = base_url + self.path + search
# Remove requesting message.
self.client.delete_message(message['id'])
return Response.build_message(
message, self.msg_template.format(url)
)
```
#### File: tumcsbot/plugins/source.py
```python
from typing import Any, Dict, Iterable, Union
from tumcsbot.lib import Response
from tumcsbot.plugin import CommandPlugin
class Source(CommandPlugin):
plugin_name = 'source'
syntax = 'source'
description = 'Post the link to the repository of my source code.'
def handle_message(
self,
message: Dict[str, Any],
**kwargs: Any
) -> Union[Response, Iterable[Response]]:
return Response.build_message(
message, 'https://github.com/ro-i/tumcsbot'
)
```
#### File: tumcsbot/plugins/sql.py
```python
from inspect import cleandoc
from typing import Any, Dict, Iterable, List, Tuple, Union
from tumcsbot.lib import DB, Response
from tumcsbot.plugin import CommandPlugin, PluginContext
class Source(CommandPlugin):
plugin_name = 'sql'
syntax = cleandoc(
"""
sql <sql_script>
or sql list
"""
)
description = cleandoc(
"""
Access the internal database of the bot read-only.
The `list` command is a shortcut to list all tables.
[administrator/moderator rights needed]
"""
)
_list_sql: str = 'select * from sqlite_master where type = "table"'
def __init__(self, plugin_context: PluginContext) -> None:
super().__init__(plugin_context)
# Get own read-only (!!!) database connection.
self._db = DB(read_only = True)
def handle_message(
self,
message: Dict[str, Any],
**kwargs: Any
) -> Union[Response, Iterable[Response]]:
result_sql: List[Tuple[Any, ...]]
if not self.client.user_is_privileged(message['sender_id']):
return Response.admin_err(message)
try:
if message['command'] == 'list':
result_sql = self._db.execute(self._list_sql)
else:
result_sql = self._db.execute(message['command'])
except Exception as e:
return Response.build_message(message, str(e))
result: str = '```text\n' + '\n'.join(map(str, result_sql)) + '\n```'
return Response.build_message(message, result)
```
#### File: tumcsbot/plugins/update.py
```python
import logging
import os
import subprocess as sp
from inspect import cleandoc
from pathlib import Path
from typing import Any, Dict, Iterable, List, Union
from tumcsbot.lib import Response
from tumcsbot.plugin import CommandPlugin
class Update(CommandPlugin):
plugin_name = 'update'
syntax = 'update'
description = cleandoc(
"""
Update the bot. You may want to restart it afterwards.
[administrator/moderator rights needed]
"""
)
_git_pull_cmd: List[str] = ['git', 'pull']
_timeout: int = 15
def handle_message(
self,
message: Dict[str, Any],
**kwargs: Any
) -> Union[Response, Iterable[Response]]:
if not self.client.user_is_privileged(message['sender_id']):
return Response.admin_err(message)
# Get the dirname of this file (which is located in the git repo).
git_dir: Path = Path(__file__).parent.absolute()
try:
os.chdir(git_dir)
except Exception as e:
logging.exception(e)
return Response.build_message(
message,
f'Cannot access the directory of my git repo {git_dir}. Please contact the admin.'
)
# Execute command and capture stdout and stderr into one stream (stdout).
try:
result: sp.CompletedProcess[Any] = sp.run(
self._git_pull_cmd, stdout = sp.PIPE, stderr = sp.STDOUT,
text = True, timeout = self._timeout,
)
except sp.TimeoutExpired:
return Response.build_message(
message, f'{self._git_pull_cmd} failed: timeout ({self._timeout} seconds) expired'
)
return Response.build_message(
message,
f'Return code: {result.returncode}\nOutput:\n```text\n{result.stdout}\n```'
)
``` |
{
"source": "jpbetz/test-infra",
"score": 3
} |
#### File: gubernator/github/classifier_test.py
```python
import json
import unittest
import classifier
class DeduperTest(unittest.TestCase):
@staticmethod
def dedup(obj):
return classifier.Deduper().dedup(obj)
def test_types(self):
a = (u'foo', 2, {'bar': ['foo', 'bar']})
self.assertEqual(self.dedup(a), a)
def test_dedupe(self):
# Python interns strings in structs, so...
a = ['foo', 'foo']
self.assertIs(a[0], a[1])
# Use json.loads to get around it
b = json.loads('["foo", "foo"]')
self.assertIsNot(b[0], b[1])
# When deduplicated, the strings are now the same object.
c = self.dedup(b)
self.assertIs(c[0], c[1])
class MergedTest(unittest.TestCase):
def test_merged(self):
self.assertEqual(classifier.get_merged(zip('abcd', [
{'issue': {'n': 1, 'a': 2}},
{'pull_request': {'n': 2, 'b': 3}},
{'c': 4},
{'issue': {'n': 3, 'd': 4},
'pull_request': {'n': 4, 'e': 5}}
], [0] * 4)), {'n': 4, 'a': 2, 'b': 3, 'd': 4, 'e': 5})
def diffs_to_events(*diffs):
events = []
for diff in diffs:
label = {'name': diff[1:], 'color': '#fff'}
if diff[0] == '+':
action = 'labeled'
elif diff[0] == '-':
action = 'unlabeled'
events.append(('pull_request',
{'action': action,
'label': label}, 0))
return events
class LabelsTest(unittest.TestCase):
def expect_labels(self, events, names):
labels = classifier.get_labels(events)
self.assertEqual(sorted(labels.keys()), sorted(names))
def test_empty(self):
self.expect_labels([('comment', {'body': 'no labels here'}, 0)], [])
def test_colors(self):
self.assertEqual(classifier.get_labels(
[('c', {'issue':
{'labels': [{'name': 'foo', 'color': '#abc'}]}
}, 0)]),
{'foo': '#abc'})
def test_labeled_action(self):
self.expect_labels(diffs_to_events('+a'), ['a'])
self.expect_labels(diffs_to_events('+a', '+a'), ['a'])
self.expect_labels(diffs_to_events('+a', '-a'), [])
self.expect_labels(diffs_to_events('+a', '+b', '-c', '-b'), ['a'])
def test_issue_overrides_action(self):
labels = [{'name': 'x', 'color': 'y'}]
self.expect_labels(diffs_to_events('+a') +
[('other_event', {'issue': {'labels': labels}}, 0)], ['x'])
def test_labeled_action_missing_label(self):
self.expect_labels([('pull_request', {'action': 'labeled'}, 0)], [])
def make_comment_event(num, name, msg='', event='issue_comment',
action='created', ts=None):
return event, {
'action': action,
'sender': {'login': name},
'comment': {
'id': num,
'user': {'login': name},
'body': msg,
'created_at': ts,
}
}, ts
class CalculateTest(unittest.TestCase):
def test_classify(self):
# A quick integration test to ensure that all the sub-parts are included.
# If this test fails, a smaller unit test SHOULD fail as well.
self.assertEqual(classifier.classify([
('pull_request', {
'pull_request': {
'state': 'open',
'user': {'login': 'a'},
'assignees': [{'login': 'b'}],
'title': 'some fix',
'head': {'sha': 'abcdef'},
'additions': 1,
'deletions': 1,
}
}, 1),
make_comment_event(1, 'k8s-bot',
'failure in https://k8s-gubernator.appspot.com/build/bucket/job/123/', ts=2),
('pull_request', {
'action': 'labeled',
'label': {'name': 'release-note-none', 'color': 'orange'},
}, 3),
make_comment_event(2, 'k8s-merge-robot', '<!-- META={"approvers":["o"]} -->', ts=4),
], {'e2e': ['failure', None, 'stuff is broken']}
),
(True, True, ['a', 'b', 'o'],
{
'author': 'a',
'approvers': ['o'],
'assignees': ['b'],
'additions': 1,
'deletions': 1,
'attn': {'a': 'fix tests', 'b': 'needs review#0#0', 'o': 'needs approval'},
'title': 'some fix',
'labels': {'release-note-none': 'orange'},
'head': 'abcdef',
'needs_rebase': False,
'status': {'e2e': ['failure', None, 'stuff is broken']},
'xrefs': ['/bucket/job/123'],
}))
def test_distill(self):
self.assertEqual(classifier.distill_events([
make_comment_event(1, 'a', ts=1),
make_comment_event(2, 'b', ts=2),
make_comment_event(1, 'a', action='deleted', ts=3),
make_comment_event(3, 'c', event='pull_request_review_comment', ts=4),
make_comment_event(4, 'k8s-bot', ts=4),
('pull_request', {'action': 'synchronize', 'sender': {'login': 'auth'}}, 5),
('pull_request', {'action': 'labeled', 'sender': {'login': 'rev'},
'label': {'name': 'lgtm'}}, 6),
]),
[
('comment', 'b', 2),
('comment', 'c', 4),
('push', 'auth', 5),
('label lgtm', 'rev', 6),
])
def test_calculate_attention(self):
def expect(payload, events, expected_attn):
self.assertEqual(classifier.calculate_attention(events, payload),
expected_attn)
def make_payload(author, assignees=None, labels=None, **kwargs):
ret = {'author': author, 'assignees': assignees or [], 'labels': labels or []}
ret.update(kwargs)
return ret
expect(make_payload('alpha', needs_rebase=True), [],
{'alpha': 'needs rebase'})
expect(make_payload('beta', labels={'release-note-label-needed'}), [],
{'beta': 'needs release-note label'})
expect(make_payload('gamma', status={'ci': ['failure', '', '']}), [],
{'gamma': 'fix tests'})
expect(make_payload('gamma', status={'ci': ['failure', '', '']}),
[('comment', 'other', 1)],
{'gamma': 'address comments#1#1'})
expect(make_payload('delta', ['epsilon']), [],
{'epsilon': 'needs review#0#0'})
expect(make_payload('alpha', ['alpha']), [('comment', 'other', 1)],
{'alpha': 'address comments#1#1'})
expect(make_payload('alpha', approvers=['owner']), [],
{'owner': 'needs approval'})
def test_author_state(self):
def expect(events, result):
self.assertEqual(classifier.get_author_state('author', events),
result)
expect([], ('waiting', 0, 0))
expect([('comment', 'author', 1)], ('waiting', 0, 0))
expect([('comment', 'other', 1)], ('address comments', 1, 1))
expect([('comment', 'other', 1), ('push', 'author', 2)], ('waiting', 2, 2))
expect([('comment', 'other', 1), ('comment', 'author', 2)], ('waiting', 2, 2))
expect([('comment', 'other', 1), ('comment', 'other', 2)], ('address comments', 1, 2))
def test_assignee_state(self):
def expect(events, result):
self.assertEqual(classifier.get_assignee_state('me', 'author', events),
result)
expect([], ('needs review', 0, 0))
expect([('comment', 'other', 1)], ('needs review', 0, 0))
expect([('comment', 'me', 1)], ('waiting', 1, 1))
expect([('label lgtm', 'other', 1)], ('needs review', 0, 0))
expect([('label lgtm', 'me', 1)], ('waiting', 1, 1))
expect([('comment', 'me', 1), ('push', 'author', 2)], ('needs review', 2, 2))
expect([('comment', 'me', 1), ('comment', 'author', 2)], ('needs review', 2, 2))
expect([('comment', 'me', 1), ('comment', 'author', 2), ('comment', 'author', 3)],
('needs review', 2, 3))
def test_xrefs(self):
def expect(body, comments, result):
self.assertEqual(result, classifier.get_xrefs(
[{'comment': c} for c in comments], {'body': body}))
def fail(path):
return 'foobar https://k8s-gubernator.appspot.com/build%s asdf' % path
expect(None, [], [])
expect('something', [], [])
expect(fail('/a/b/34/'), [], ['/a/b/34'])
expect(None, [fail('/a/b/34/')], ['/a/b/34'])
expect(fail('/a/b/34/'), [fail('/a/b/34]')], ['/a/b/34'])
expect(fail('/a/b/34/)'), [fail('/a/b/35]')], ['/a/b/34', '/a/b/35'])
def test_reviewers(self):
def expect(events, result):
self.assertEqual(result, classifier.get_reviewers(events))
def mk(*specs):
out = []
for event, action, body in specs:
body = dict(body) # copy
body['action'] = action
out.append((event, body, 0))
return out
expect([], set())
user_a = {'requested_reviewer': {'login': 'a'}}
expect(mk(('pull_request', 'review_requested', user_a)), {'a'})
expect(mk(('pull_request', 'review_request_removed', user_a)), set())
expect(mk(('pull_request', 'review_requested', user_a),
('pull_request', 'review_request_removed', user_a)), set())
def test_approvers(self):
def expect(comment, result):
self.assertEqual(result, classifier.get_approvers([{
'author': 'k8s-merge-robot', 'comment': comment}]))
expect('nothing', [])
expect('before\n<!-- META={approvers:[someone]} -->', ['someone'])
expect('<!-- META={approvers:[someone,else]} -->', ['someone', 'else'])
expect('<!-- META={approvers:[someone,else]} -->', ['someone', 'else'])
# The META format is *supposed* to be JSON, but a recent change broke it.
# Support both formats so it can be fixed in the future.
expect('<!-- META={"approvers":["username"]} -->\n', ['username'])
class CommentsTest(unittest.TestCase):
def test_basic(self):
self.assertEqual(classifier.get_comments([make_comment_event(1, 'aaa', 'msg', ts=2016)]),
[{'author': 'aaa', 'comment': 'msg', 'timestamp': 2016}])
def test_deleted(self):
self.assertEqual(classifier.get_comments([
make_comment_event(1, 'aaa', 'msg', 2016),
make_comment_event(1, None, None, None, action='deleted'),
make_comment_event(2, '', '', '', action='deleted')]),
[])
def test_edited(self):
self.assertEqual(classifier.get_comments([
make_comment_event(1, 'aaa', 'msg', ts=2016),
make_comment_event(1, 'aaa', 'redacted', ts=2016.1, action='edited')]),
[{'author': 'aaa', 'comment': 'redacted', 'timestamp': 2016.1}])
if __name__ == '__main__':
unittest.main()
```
#### File: test-infra/jenkins/attach_agent.py
```python
import ConfigParser
import sys
# Todo(krzyzacy): fix the import error
# sudo pip install jenkinsapi
from jenkinsapi import jenkins # pylint: disable=import-error
EXCLUSIVE = True
SHARED = False
# TODO: Add 'scalability' label to heavy to not abuse 'build' label.
# TODO(fejta): add light/heavy/pr to tags and replace nodes
INFO = {
'heavy': ('build unittest', 1, EXCLUSIVE),
'light': ('node e2e', 10, EXCLUSIVE),
'pr': ('pull', 1, SHARED),
}
def info(host, kind):
"""Get host info."""
labels, executors, exclusive = INFO[kind]
return {
'credential_description': 'Jenkins GCE ssh key',
'exclusive': exclusive,
'host': host,
'java_path': '',
'jvm_options': '',
'labels': labels,
'max_num_retries': 0,
'node_description': '',
'num_executors': executors,
'port': 22,
'prefix_start_slave_cmd': '',
'remote_fs': '/var/lib/jenkins',
'retry_wait_time': 0,
'suffix_start_slave_cmd': '',
}
def create(api, host, config):
"""Create agent."""
delete(api, host)
print 'Creating %s...' % host,
print api.nodes.create_node(host, config)
def delete(api, host):
"""Delete agent."""
if host in api.nodes:
print 'Deleting %s...' % host,
print api.delete_node(host)
def creds(path, section):
"""An ini file with a section per master.
Should look something like this:
[master-a]
user=foo
key=7deadbeef1234098
[master-b]
user=bar
key=7deadbeef9999999
"""
config = ConfigParser.SafeConfigParser()
config.read(path)
return config.get(section, 'user'), config.get(section, 'key')
if __name__ == '__main__':
CMD, HOST, KIND, INI, AGENT = sys.argv[1:] # pylint: disable=unbalanced-tuple-unpacking
USER, KEY = creds(INI, AGENT)
J = jenkins.Jenkins('http://localhost:8080', USER, KEY)
if sys.argv[1] == 'delete':
delete(J, HOST)
else:
create(J, HOST, info(HOST, KIND))
```
#### File: test-infra/jobs/move_extract.py
```python
import json
import os
import re
import sys
ORIG_CWD = os.getcwd() # Checkout changes cwd
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def sort():
"""Sort config.json alphabetically."""
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
with open(test_infra('jobs/config.json'), 'r+') as fp:
configs = json.loads(fp.read())
regexp = re.compile('|'.join([
r'^GINKGO_TEST_ARGS=(.*)$|^SKEW_KUBECTL=(y)$'
]))
problems = []
for job, values in configs.items():
if values.get('scenario') != 'kubernetes_e2e':
continue
if 'args' not in values:
continue
args = values['args']
new_args = [a for a in args if a != '--test_args=None']
if new_args != args:
args = new_args
values['args'] = args
if any('None' in a for a in args):
problems.append('Bad flag with None: %s' % job)
continue
if any(a.startswith('--test_args=') for a in args):
continue
with open(test_infra('jobs/%s.env' % job)) as fp:
env = fp.read()
tests = None
skew = False
lines = []
for line in env.split('\n'):
mat = regexp.search(line)
if not mat:
lines.append(line)
continue
group, now_skew = mat.groups()
if group:
if tests:
problems.append('Duplicate %s' % job)
break
tests = group
continue
if now_skew:
if skew:
problems.append('Duplicate skew %s' % job)
skew = now_skew
else:
new_args = []
stop = False
for arg in args:
these = None
add = True
if (
arg == '--env-file=jobs/pull-kubernetes-federation-e2e-gce.env'
and not job == 'pull-kubernetes-federation-e2e-gce'):
these = r'--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]' # pylint: disable=line-too-long
elif (
arg == '--env-file=jobs/pull-kubernetes-e2e.env'
and not job.startswith('pull-kubernetes-federation-e2e-gce')):
these = r'--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]' # pylint: disable=line-too-long
elif arg == '--env-file=jobs/suite/slow.env':
these = r'--ginkgo.focus=\[Slow\] --ginkgo.skip=\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]' # pylint: disable=line-too-long
elif arg == '--env-file=jobs/suite/serial.env':
these = r'--ginkgo.focus=\[Serial\]|\[Disruptive\] --ginkgo.skip=\[Flaky\]|\[Feature:.+\]' # pylint: disable=line-too-long
add = False
elif arg == '--env-file=jobs/suite/default.env':
these = r'--ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]' # pylint: disable=line-too-long
if add:
new_args.append(arg)
if not these:
continue
if tests:
problems.append('Duplicate end %s' % job)
stop = True
break
tests = these
if stop:
continue
args = new_args
testing = '--test=false' not in args
if not testing:
if skew:
problems.append('Cannot skew kubectl without tests %s' % job)
if tests:
problems.append('Cannot --test_args when --test=false %s' % job)
continue
if skew:
path = '--kubectl-path=../kubernetes_skew/cluster/kubectl.sh'
if tests:
tests = '%s %s' % (tests, path)
else:
tests = path
if tests:
args.append('--test_args=%s' % tests)
values['args'] = args
with open(test_infra('jobs/%s.env' % job), 'w') as fp:
fp.write('\n'.join(lines))
with open(test_infra('jobs/config.json'), 'w') as fp:
fp.write(json.dumps(configs, sort_keys=True, indent=2, separators=(',', ': ')))
fp.write('\n')
if not problems:
sys.exit(0)
print >>sys.stderr, '%d problems' % len(problems)
print '\n'.join(problems)
if __name__ == '__main__':
sort()
```
#### File: mungegithub/issue-labeler/simple_app.py
```python
import os
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
from flask import Flask, request
from sklearn.feature_extraction import FeatureHasher
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
app = Flask(__name__)
#Parameters
team_fn = './models/trained_teams_model.pkl'
component_fn = './models/trained_components_model.pkl'
logFile = '/tmp/issue-labeler.log'
logSize = 1024*1024*100
numFeatures = 262144
myLoss = 'hinge'
myAlpha = .1
myPenalty = 'l2'
myHasher = FeatureHasher(input_type='string', n_features=numFeatures, non_negative=True)
myStemmer = PorterStemmer()
tokenizer = RegexpTokenizer(r'\w+')
stopwords = []
try:
if not stopwords:
stop_fn = './stopwords.txt'
with open(stop_fn, 'r') as fp:
stopwords = list([word.strip() for word in fp])
except: # pylint:disable=bare-except
#don't remove any stopwords
stopwords = []
@app.errorhandler(500)
def internal_error(exception):
return str(exception), 500
@app.route("/", methods=['POST'])
def get_labels():
"""
The request should contain 2 form-urlencoded parameters
1) title : title of the issue
2) body: body of the issue
It returns a team/<label> and a component/<label>
"""
title = request.form.get('title', '')
body = request.form.get('body', '')
tokens = tokenize_stem_stop(" ".join([title, body]))
team_mod = joblib.load(team_fn)
comp_mod = joblib.load(component_fn)
vec = myHasher.transform([tokens])
tlabel = team_mod.predict(vec)[0]
clabel = comp_mod.predict(vec)[0]
return ",".join([tlabel, clabel])
def tokenize_stem_stop(inputString):
inputString = inputString.encode('utf-8')
curTitleBody = tokenizer.tokenize(inputString.decode('utf-8').lower())
return map(myStemmer.stem, filter(lambda x: x not in stopwords, curTitleBody))
@app.route("/update_models", methods=['PUT'])
def update_model():
"""
data should contain three fields
titles: list of titles
bodies: list of bodies
labels: list of list of labels
"""
data = request.json
titles = data.get('titles')
bodies = data.get('bodies')
labels = data.get('labels')
tTokens = []
cTokens = []
team_labels = []
component_labels = []
for (title, body, label_list) in zip(titles, bodies, labels):
tLabel = filter(lambda x: x.startswith('team'), label_list)
cLabel = filter(lambda x: x.startswith('component'), label_list)
tokens = tokenize_stem_stop(" ".join([title, body]))
if tLabel:
team_labels += tLabel
tTokens += [tokens]
if cLabel:
component_labels += cLabel
cTokens += [tokens]
tVec = myHasher.transform(tTokens)
cVec = myHasher.transform(cTokens)
if team_labels:
if os.path.isfile(team_fn):
team_model = joblib.load(team_fn)
team_model.partial_fit(tVec, np.array(team_labels))
else:
#no team model stored so build a new one
team_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
team_model.fit(tVec, np.array(team_labels))
if component_labels:
if os.path.isfile(component_fn):
component_model = joblib.load(component_fn)
component_model.partial_fit(cVec, np.array(component_labels))
else:
#no comp model stored so build a new one
component_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
component_model.fit(cVec, np.array(component_labels))
joblib.dump(team_model, team_fn)
joblib.dump(component_model, component_fn)
return ""
def configure_logger():
FORMAT = '%(asctime)-20s %(levelname)-10s %(message)s'
file_handler = RotatingFileHandler(logFile, maxBytes=logSize, backupCount=3)
formatter = logging.Formatter(FORMAT)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if __name__ == "__main__":
configure_logger()
app.run(host="0.0.0.0")
``` |
{
"source": "jpbirdy/Detectron",
"score": 2
} |
#### File: detectron/datasets/cityscapes_json_dataset_evaluator.py
```python
import cv2
import logging
import os
import uuid
import pycocotools.mask as mask_util
from detectron.core.config import cfg
from detectron.datasets.dataset_catalog import get_raw_dir
logger = logging.getLogger(__name__)
def evaluate_masks(
json_dataset,
all_boxes,
all_segms,
output_dir,
use_salt=True,
cleanup=False
):
if cfg.CLUSTER.ON_CLUSTER:
# On the cluster avoid saving these files in the job directory
output_dir = '/tmp'
res_file = os.path.join(
output_dir, 'segmentations_' + json_dataset.name + '_results')
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
results_dir = os.path.join(output_dir, 'results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
os.environ['CITYSCAPES_DATASET'] = get_raw_dir(json_dataset.name)
os.environ['CITYSCAPES_RESULTS'] = output_dir
# Load the Cityscapes eval script *after* setting the required env vars,
# since the script reads their values into global variables (at load time).
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling \
as cityscapes_eval
roidb = json_dataset.get_roidb()
for i, entry in enumerate(roidb):
im_name = entry['image']
basename = os.path.splitext(os.path.basename(im_name))[0]
txtname = os.path.join(output_dir, basename + 'pred.txt')
with open(txtname, 'w') as fid_txt:
if i % 10 == 0:
logger.info('i: {}: {}'.format(i, basename))
for j in range(1, len(all_segms)):
clss = json_dataset.classes[j]
clss_id = cityscapes_eval.name2label[clss].id
segms = all_segms[j][i]
boxes = all_boxes[j][i]
if segms == []:
continue
masks = mask_util.decode(segms)
for k in range(boxes.shape[0]):
score = boxes[k, -1]
mask = masks[:, :, k]
pngname = os.path.join(
'results',
basename + '_' + clss + '_{}.png'.format(k))
# write txt
fid_txt.write('{} {} {}\n'.format(pngname, clss_id, score))
# save mask
cv2.imwrite(os.path.join(output_dir, pngname), mask * 255)
logger.info('Evaluating...')
cityscapes_eval.main([])
return None
```
#### File: detectron/utils/keypoints.py
```python
import cv2
import numpy as np
from detectron.core.config import cfg
import detectron.utils.blob as blob_utils
def get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
return keypoints, keypoint_flip_map
def get_person_class_index():
"""Index of the person class in COCO."""
return 1
def flip_keypoints(keypoints, keypoint_flip_map, keypoint_coords, width):
"""Left/right flip keypoint_coords. keypoints and keypoint_flip_map are
accessible from get_keypoints().
"""
flipped_kps = keypoint_coords.copy()
for lkp, rkp in list(keypoint_flip_map.items()):
lid = keypoints.index(lkp)
rid = keypoints.index(rkp)
flipped_kps[:, :, lid] = keypoint_coords[:, :, rid]
flipped_kps[:, :, rid] = keypoint_coords[:, :, lid]
# Flip x coordinates
flipped_kps[:, 0, :] = width - flipped_kps[:, 0, :] - 1
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = np.where(flipped_kps[:, 2, :] == 0)
flipped_kps[inds[0], 0, inds[1]] = 0
return flipped_kps
def flip_heatmaps(heatmaps):
"""Flip heatmaps horizontally."""
keypoints, flip_map = get_keypoints()
heatmaps_flipped = heatmaps.copy()
for lkp, rkp in list(flip_map.items()):
lid = keypoints.index(lkp)
rid = keypoints.index(rkp)
heatmaps_flipped[:, rid, :, :] = heatmaps[:, lid, :, :]
heatmaps_flipped[:, lid, :, :] = heatmaps[:, rid, :, :]
heatmaps_flipped = heatmaps_flipped[:, :, :, ::-1]
return heatmaps_flipped
def heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = cfg.KRCNN.INFERENCE_MIN_SIZE
xy_preds = np.zeros(
(len(rois), 4, cfg.KRCNN.NUM_KEYPOINTS), dtype=np.float32)
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = cv2.resize(
maps[i], (roi_map_width, roi_map_height),
interpolation=cv2.INTER_CUBIC)
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
for k in range(cfg.KRCNN.NUM_KEYPOINTS):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
xy_preds[i, 2, k] = roi_map[k, y_int, x_int]
xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int]
return xy_preds
def keypoints_to_heatmap_labels(keypoints, rois):
"""Encode keypoint location in the target heatmap for use in
SoftmaxWithLoss.
"""
# Maps keypoints from the half-open interval [x1, x2) on continuous image
# coordinates to the closed interval [0, HEATMAP_SIZE - 1] on discrete image
# coordinates. We use the continuous <-> discrete conversion from Heckbert
# 1990 ("What is the coordinate of a pixel?"): d = floor(c) and c = d + 0.5,
# where d is a discrete coordinate and c is a continuous coordinate.
assert keypoints.shape[2] == cfg.KRCNN.NUM_KEYPOINTS
shape = (len(rois), cfg.KRCNN.NUM_KEYPOINTS)
heatmaps = blob_utils.zeros(shape)
weights = blob_utils.zeros(shape)
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = cfg.KRCNN.HEATMAP_SIZE / (rois[:, 2] - rois[:, 0])
scale_y = cfg.KRCNN.HEATMAP_SIZE / (rois[:, 3] - rois[:, 1])
for kp in range(keypoints.shape[2]):
vis = keypoints[:, 2, kp] > 0
x = keypoints[:, 0, kp].astype(np.float32)
y = keypoints[:, 1, kp].astype(np.float32)
# Since we use floor below, if a keypoint is exactly on the roi's right
# or bottom boundary, we shift it in by eps (conceptually) to keep it in
# the ground truth heatmap.
x_boundary_inds = np.where(x == rois[:, 2])[0]
y_boundary_inds = np.where(y == rois[:, 3])[0]
x = (x - offset_x) * scale_x
x = np.floor(x)
if len(x_boundary_inds) > 0:
x[x_boundary_inds] = cfg.KRCNN.HEATMAP_SIZE - 1
y = (y - offset_y) * scale_y
y = np.floor(y)
if len(y_boundary_inds) > 0:
y[y_boundary_inds] = cfg.KRCNN.HEATMAP_SIZE - 1
valid_loc = np.logical_and(
np.logical_and(x >= 0, y >= 0),
np.logical_and(
x < cfg.KRCNN.HEATMAP_SIZE, y < cfg.KRCNN.HEATMAP_SIZE))
valid = np.logical_and(valid_loc, vis)
valid = valid.astype(np.int32)
lin_ind = y * cfg.KRCNN.HEATMAP_SIZE + x
heatmaps[:, kp] = lin_ind * valid
weights[:, kp] = valid
return heatmaps, weights
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
def nms_oks(kp_predictions, rois, thresh):
"""Nms based on kp predictions."""
scores = np.mean(kp_predictions[:, 2, :], axis=1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = compute_oks(
kp_predictions[i], rois[i], kp_predictions[order[1:]],
rois[order[1:]])
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def compute_oks(src_keypoints, src_roi, dst_keypoints, dst_roi):
"""Compute OKS for predicted keypoints wrt gt_keypoints.
src_keypoints: 4xK
src_roi: 4x1
dst_keypoints: Nx4xK
dst_roi: Nx4
"""
sigmas = np.array([
.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87,
.87, .89, .89]) / 10.0
vars = (sigmas * 2)**2
# area
src_area = (src_roi[2] - src_roi[0] + 1) * (src_roi[3] - src_roi[1] + 1)
# measure the per-keypoint distance if keypoints visible
dx = dst_keypoints[:, 0, :] - src_keypoints[0, :]
dy = dst_keypoints[:, 1, :] - src_keypoints[1, :]
e = (dx**2 + dy**2) / vars / (src_area + np.spacing(1)) / 2
e = np.sum(np.exp(-e), axis=1) / e.shape[1]
return e
``` |
{
"source": "jpbitt/Scripts-Python",
"score": 4
} |
#### File: Scripts-Python/Python/case_pedrasEpotes.py
```python
import random as rd
import math as mt
import numpy as np
tabela = [] #TABELA ONDE SERÃO ARMAZENADOS OS DADOS
#---------------------------
# RECEBE AS INFORMAÇÕES DO USUÁRIO
#---------------------------
def recebe_valores():
print("Joãozinho deseja distruibuir um numero N de pedras em um numero M de potes")
pedra = int(input("Forneça o número de PEDRAS: "))
pote = int(input("Forneça o número de POTES: "))
print("------------------------------")
print("O número de {} pedras e {} potes".format(pedra,pote))
return pedra,pote
#---------------------------
# VALIDA AS INFORMAÇÕES DO USUARIO, DEVE ATENDER A REGRA "O NUMERO DE PEDRAS DEVE SER MAIOR QUE O NUMERO DE POTES"
#---------------------------
def valida_dados(pedra,pote):
while True:
print("------------------------------")
print("VALIDANDO OS NUMEROS INSERIDOS")
print("------------------------------")
if pote <= 0:
print("ERRO DE VALIDAÇÃO")
print("O NUMERO DE POTES DEVE SER MAIOR QUE 0")
pote = int(input("Por favor, forneça o número de POTES correto: "))
elif pedra <= 1:
print("ERRO DE VALIDAÇÃO")
print("O NUMERO DE PEDRAS DEVE SER MAIOR QUE 1")
pedra = int(input("Por favor, forneça o número de PEDRAS correto: "))
elif pote >= pedra:
print("ERRO DE VALIDAÇÃO")
print("O NUMERO DE POTES DEVE SER MENOR QUE O NUMERO DE PEDRAS")
pote = int(input("Por favor, forneça o número de POTES correto: "))
else:
print("VALIDAÇÃO CONCLUIDA COM SUCESSO")
print("------------------------------")
print("Joãozinho tem {} pedras e {} potes".format(pedra,pote))
print("------------------------------")
break
return pedra,pote
#---------------------------
# ORGANIZA AS PEDRAS EM CADA POTE
#---------------------------
def gera_tabela(tabela,pedra,pote):
lista = list(range(1,pedra-pote+2))
for i in range(mt.factorial(pedra)):
linha = [] # criamos a linha
for j in range(pote):
valor = rd.choice(lista) # gera valor aleatorio para a linha
linha.append(valor)
tabela.append(linha)
return tabela,pedra,pote
#---------------------------
# FUNÇÃO MAIN
#---------------------------
def main():
for w in range(1):
pedra,pote = recebe_valores()
pedra,pote = valida_dados(pedra,pote)
gera_tabela(tabela,pedra,pote)
tabela_organizada = [] # TABELA ONDE ARMAZENA OS DADOS ORGANIZADOS
for k in range(len(tabela)):
if sum(tabela[k]) == pedra: # FILTRA CADA LINHA SOMATORIO DE CADA LINHA IGUAL AO NUMERO DE PEDRAS
tabela_organizada.append(tabela[k])
a = np.array(tabela_organizada)
print("TABELA COM AS MANEIRAS QUE JOÃOZINHO PODE ORGANIZAR SUAS PEDRAS:")
print(np.unique(a, axis=0))
print("Quantidade de maneiras possiveis: {}".format(len(np.unique(a, axis=0))))
if __name__ == "__main__":
main()
``` |
{
"source": "jpbm/probabilism",
"score": 3
} |
#### File: probabilism/src/resourcelims.py
```python
import resource
import platform
import sys
def memory_limit(percentage: float):
"""
只在linux操作系统起作用
"""
if platform.system() != "Linux":
print('Only works on linux!')
return
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (get_memory() * 1024 * percentage, hard))
def get_memory():
with open('/proc/meminfo', 'r') as mem:
free_memory = 0
for i in mem:
sline = i.split()
if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
free_memory += int(sline[1])
return free_memory
def memory(percentage=0.8):
def decorator(function):
def wrapper(*args, **kwargs):
memory_limit(percentage)
try:
function(*args, **kwargs)
except MemoryError:
mem = get_memory() / 1024 /1024
print('Remain: %.2f GB' % mem)
sys.stderr.write('\n\nERROR: Memory Exception\n')
sys.exit(1)
return wrapper
return decorator
@memory(percentage=0.7)
def main():
print('Memory usage is limited to 70%.')
``` |
{
"source": "jpbonson/APIAuthenticationCourse",
"score": 3
} |
#### File: jpbonson/APIAuthenticationCourse/main.py
```python
from app.basic_auth import requires_basic_auth
from app.simple_token_auth import generate_simple_token, requires_simple_token_auth
from app.jwt_token_auth import generate_jwt_token, requires_jwt_token_auth
from flask import Flask
app = Flask(__name__)
@app.route('/basic_access', methods=['GET'])
@requires_basic_auth
def secret_page_basic():
return "Accessed using Basic authentication!\n"
@app.route('/simple_token', methods=['POST'])
def get_simple_token():
return generate_simple_token()
@app.route('/simple_token_access', methods=['GET'])
@requires_simple_token_auth
def secret_page_simple_token():
return "Accessed using simple token authentication!\n"
@app.route('/token', methods=['POST'])
def get_jwt_token():
return generate_jwt_token()
@app.route('/token_access', methods=['GET'])
@requires_jwt_token_auth
def secret_page_jwt_token():
return "Accessed using OAuth + JWT token authentication!\n"
@app.route("/", methods=['GET'])
def hello():
return "Welcome to the public route!\n"
``` |
{
"source": "jpbonson/SBBReinforcementLearning",
"score": 3
} |
#### File: SBB/core/pareto_dominance_for_teams.py
```python
from diversity_maintenance import DiversityMaintenance
from ..utils.helpers import is_nearly_equal_to
from ..config import Config
class ParetoDominanceForTeams():
"""
Pareto dominance: Given a set of objectives, a solution is said to Pareto dominate another if the
first is not inferior to the second in all objectives, and, additionally, there is at least one
objective where it is better.
This code is based on Stephen's version of the C++ SBB, that was focused in using pareto for
multi-objective between fitness and novelty for the teams.
"""
@staticmethod
def run(teams_population, novelty, teams_to_keep):
front, dominateds = ParetoDominanceForTeams._pareto_front(teams_population, novelty)
pareto_front = list(front)
keep_solutions = list(front)
remove_solutions = list(dominateds)
if len(keep_solutions) < teams_to_keep: # must include some teams from dominateds
keep_solutions, remove_solutions = ParetoDominanceForTeams._balance_pareto_front_to_up(dominateds, keep_solutions, remove_solutions, teams_to_keep)
if len(keep_solutions) > teams_to_keep: # must discard some teams from front
keep_solutions, remove_solutions = ParetoDominanceForTeams._balance_pareto_front_to_down(front, keep_solutions, remove_solutions, teams_to_keep)
return keep_solutions, remove_solutions, pareto_front
@staticmethod
def _pareto_front(teams_population, novelty):
"""
Finds the pareto front, i.e. the pareto dominant solutions.
"""
for team in teams_population:
team.dom_by_ = 0
team.dom_of_ = 0
front = []
dominateds = []
for teamA in teams_population:
for teamB in teams_population:
# check if there are teams that have a better or equal [fitness, novelty] and that are better in at least
# one of the dimensions. If yes, then teamA is dominated by these teams.
if ParetoDominanceForTeams._is_dominated(teamA, teamB, novelty):
teamA.dom_by_ += 1
teamB.dom_of_ += 1
if teamA not in dominateds:
dominateds.append(teamA)
if teamA.dom_by_ == 0:
front.append(teamA)
# use this score to balance the teams between remove and keep
for team in teams_population:
team.submission_score_ = team.dom_by_/float(len(teams_population)) # use it to add teams to the front (the lower, the better)
team.dominance_score_ = team.dom_of_/float(len(teams_population)) # use it to remove teams from the front (the higher, the better)
return front, dominateds
@staticmethod
def _is_dominated(teamA, teamB, novelty):
"""
Check if a solution is domninated or equal to another, assuming that higher results are better than lower ones.
"""
if (teamB.fitness_ >= teamA.fitness_ and teamB.diversity_[novelty] >= teamA.diversity_[novelty] and
((teamB.fitness_ > teamA.fitness_ and not is_nearly_equal_to(teamA.fitness_, teamB.fitness_)) or
(teamB.diversity_[novelty] > teamA.diversity_[novelty] and
not is_nearly_equal_to(teamA.diversity_[novelty], teamB.diversity_[novelty])))):
return True
return False
@staticmethod
def _balance_pareto_front_to_up(dominateds, keep_solutions, remove_solutions, teams_to_keep):
available = [team for team in dominateds if team.fitness_ > 0.0]
if len(available) < teams_to_keep:
not_available = [team for team in dominateds if team.fitness_ == 0.0]
available += not_available[:teams_to_keep-len(available)]
sorted_solutions = sorted(available, key=lambda solution: solution.submission_score_, reverse = False) # worse ones first
for solution in sorted_solutions:
if solution not in keep_solutions:
keep_solutions.append(solution)
remove_solutions.remove(solution)
if len(keep_solutions) == teams_to_keep:
break
return keep_solutions, remove_solutions
@staticmethod
def _balance_pareto_front_to_down(front, keep_solutions, remove_solutions, teams_to_keep):
sorted_solutions = sorted(front, key=lambda solution: solution.dominance_score_, reverse = True) # better ones first
for solution in sorted_solutions:
keep_solutions.remove(solution)
remove_solutions.append(solution)
if len(keep_solutions) == teams_to_keep:
break
return keep_solutions, remove_solutions
```
#### File: SBB/core/team.py
```python
import random
import numpy
import copy
import json
from collections import Counter, defaultdict
from program import Program
from ..environments.reinforcement.default_opponent import DefaultOpponent
from ..utils.helpers import round_value, round_array, flatten
from ..config import Config
def reset_teams_ids():
global next_team_id
next_team_id = 0
def get_team_id():
global next_team_id
next_team_id += 1
return next_team_id
class Team(DefaultOpponent):
OPPONENT_ID = "sbb"
def __init__(self, generation, programs, environment, team_id = None):
if team_id is None:
self.team_id_ = get_team_id()
else:
self.team_id_ = team_id
self.generation = generation
super(Team, self).__init__(self.__repr__())
self.programs = []
for program in programs:
self._add_program(program)
self.environment = environment
self.fitness_ = -1
self.score_validation_ = -1
self.score_champion_ = -1
self.extra_metrics_ = {}
self.active_programs_ = [] # only for training, used for genotype diversity
self.validation_active_programs_ = [] # for training and validation
self.memory_actions_per_points_ = {}
self.results_per_points_ = {}
self.results_per_points_for_validation_ = {}
self.diversity_ = {}
self.encodings_ = {} # only used by reinforcement learning
self.last_selected_program_ = None
def _add_program(self, program):
self.programs.append(program)
program.add_team(self)
def initialize(self, seed):
"""
This method is called by the reinforcement learning environments to set
the opponent configurations before a match. This class implements this
method only because it inherits DefaultOpponent.
"""
pass
def reset_registers(self):
for program in self.programs:
program.reset_registers()
def execute(self, point_id, inputs, valid_actions, is_training, update_profile = True, force_reset = False):
if not self._actions_are_available(valid_actions):
return None
# if there is a least one program that can produce a valid action, execute the programs
if is_training:
# run the programs
if Config.RESTRICTIONS['use_memmory_for_actions'] and point_id in self.memory_actions_per_points_:
return self.memory_actions_per_points_[point_id]
else:
selected_program = self._select_program(inputs, valid_actions, force_reset)
output_class = selected_program.get_action_result(point_id, inputs, valid_actions, is_training)
if Config.RESTRICTIONS['use_memmory_for_actions']:
self.memory_actions_per_points_[point_id] = output_class
if selected_program not in self.active_programs_:
self.active_programs_.append(selected_program)
return output_class
else: # just run the code without changing the attributes or using memmory
selected_program = self._select_program(inputs, valid_actions, force_reset)
self.last_selected_program_ = selected_program.program_id_
if selected_program not in self.validation_active_programs_:
self.validation_active_programs_.append(selected_program)
return selected_program.get_action_result(point_id, inputs, valid_actions, is_training)
def _actions_are_available(self, valid_actions):
"""
Test if there are at least one program in the team that is able to provide a valid action
If there is no such program, return None, so that the environment will use a default action
"""
actions = flatten([p.get_raw_actions() for p in self.programs])
possible_action = set(actions).intersection(valid_actions)
if len(possible_action) == 0:
return False
return True
def _select_program(self, inputs, valid_actions, force_reset):
"""
Generates the outputs for all programs and order them. The team checks if the first
action is valid before submitting it to the environment. If it is not valid, then
the second best action will be tried, and so on until a valid action is obtained.
"""
partial_outputs = []
valid_programs = []
for program in self.programs:
actions = program.get_raw_actions()
possible_action = set(actions).intersection(valid_actions)
if len(possible_action) > 0:
partial_outputs.append(program.execute(inputs, force_reset))
valid_programs.append(program)
selected_program = valid_programs[partial_outputs.index(max(partial_outputs))]
return selected_program
def mutate(self, programs_population):
"""
Generates mutation chances and mutate the team if it is a valid mutation.
"""
if Config.USER['advanced_training_parameters']['use_agressive_mutations']:
mutation_chance = 1
while (mutation_chance > random.random()
and len(self.programs) > Config.USER['training_parameters']['team_size']['min']):
self._randomly_remove_program()
mutation_chance = mutation_chance * Config.USER['training_parameters']['mutation']['team']['remove_program']
mutation_chance = 1
while (mutation_chance > random.random()
and len(self.programs) < Config.USER['training_parameters']['team_size']['max']):
self._randomly_add_program(programs_population)
mutation_chance = mutation_chance * Config.USER['training_parameters']['mutation']['team']['add_program']
else:
if len(self.programs) > Config.USER['training_parameters']['team_size']['min']:
mutation_chance = random.random()
if mutation_chance <= Config.USER['training_parameters']['mutation']['team']['remove_program']:
self._randomly_remove_program()
if len(self.programs) < Config.USER['training_parameters']['team_size']['max']:
mutation_chance = random.random()
if mutation_chance <= Config.USER['training_parameters']['mutation']['team']['add_program']:
self._randomly_add_program(programs_population)
to_mutate = []
while len(to_mutate) == 0:
for program in self.programs:
mutation_chance = random.random()
if mutation_chance <= Config.USER['training_parameters']['mutation']['team']['mutate_program']:
to_mutate.append(program)
for program in to_mutate:
clone = Program(self.generation, copy.deepcopy(program.instructions), program.action)
clone.mutate()
self._add_program(clone)
programs_population.append(clone)
if self._is_ok_to_remove(program):
self.remove_program(program)
return programs_population
def _randomly_remove_program(self):
"""
Remove a program from the team. A program can be removed only if removing it will
maintain ['team_size']['min'] distinct actions in the team.
"""
while True:
candidate_to_remove = random.choice(self.programs)
if self._is_ok_to_remove(candidate_to_remove):
self.remove_program(candidate_to_remove)
return
def _is_ok_to_remove(self, program_to_remove):
actions = [p.action for p in self.programs]
actions.remove(program_to_remove.action)
if len(set(actions)) >= Config.USER['training_parameters']['team_size']['min']:
return True
return False
def _randomly_add_program(self, programs_population):
candidate_program = random.choice(programs_population)
if candidate_program not in self.programs:
self._add_program(candidate_program)
def remove_program(self, program):
program.remove_team(self)
self.programs.remove(program)
if program in self.active_programs_:
self.active_programs_.remove(program)
if program in self.validation_active_programs_:
self.validation_active_programs_.remove(program)
def remove_references(self):
"""
Remove all references from this object to other objects, so it can be safely deleted.
"""
for p in self.programs:
p.remove_team(self)
def prune_partial(self):
inactive_programs = list(set(self.programs) - set(self.active_programs_))
while len(inactive_programs) > 0:
candidate_to_remove = random.choice(inactive_programs)
if self._is_ok_to_remove(candidate_to_remove):
self.remove_program(candidate_to_remove)
return
else:
inactive_programs.remove(candidate_to_remove)
def prune_total(self):
inactive_programs = list(set(self.programs) - set(self.active_programs_))
for program in inactive_programs:
self.remove_program(program)
def quick_metrics(self):
validation_active_teams_members_ids = [p.__repr__() for p in self.validation_active_programs_]
training_active_teams_members_ids = [p.__repr__() for p in self.active_programs_]
msg = self.__repr__()
all_programs_training_info = ["A" if p in self.active_programs_ else "I" for p in self.programs]
all_programs_validation_info = ["A" if p in self.validation_active_programs_ else "I" for p in self.programs]
all_programs_info = []
for p,t,v in zip(self.programs, all_programs_training_info, all_programs_validation_info):
all_programs_info.append(p.__repr__()+"-"+t+v)
msg += "\n\nteam members ("+str(len(self.programs))+"): "+str(all_programs_info)
msg += "\n(Obs.: (0:0, 0)-AI means that this program was Active in training and Inactive in validation)"
if Config.USER['task'] == 'classification':
msg += ("\n\nfitness: "+str(round_value(self.fitness_))+", "
"champion score: "+str(round_value(self.score_champion_)))
else:
msg += ("\n\nfitness: "+str(round_value(self.fitness_))+", "
"validation score: "+str(round_value(self.score_validation_))+", "
"champion score: "+str(round_value(self.score_champion_)))
msg += "\n\ninputs distribution: "+str(self.inputs_distribution())
msg += "\n"
msg += self.environment.metrics_.metrics_for_team(self)
return msg
def inputs_distribution(self):
inputs = []
if len(self.active_programs_) > 0:
for program in self.active_programs_:
if len(program.inputs_list_) > 0:
inputs += program.inputs_list_
else:
for program in self.programs:
if len(program.inputs_list_) > 0:
inputs += program.inputs_list_
inputs_dist = Counter(inputs)
return inputs_dist
def dict(self):
info = {}
info['team_id'] = self.team_id_
info['generation'] = self.generation
if not Config.USER['advanced_training_parameters']['second_layer']['enabled']:
info['programs_type'] = 'atomic'
else:
info['programs_type'] = 'meta'
programs_json = []
for program in self.programs:
programs_json.append(program.dict())
info['programs'] = programs_json
return info
def json(self):
return json.dumps(self.dict())
def __repr__(self):
return "("+str(self.team_id_)+"-"+str(self.generation)+")"
def __str__(self):
text = "TEAM "+self.__repr__()
text += "\n\n\n######## METRICS\n"
text += self.quick_metrics()
text += "\n\n\n######## PROGRAMS (ACTIVE)"
for p in self.active_programs_:
text += "\n"+str(p)
text += "\n\n\n######## PROGRAMS (INACTIVE)"
inactive_programs = list(set(self.programs) - set(self.active_programs_))
if inactive_programs:
for p in inactive_programs:
text += "\n"+str(p)
else:
text += "\n[No inactive programs]"
return text
```
#### File: environments/classification/classification_environment.py
```python
import random
import numpy
from collections import Counter
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score
from classification_point import ClassificationPoint
from classification_metrics import ClassificationMetrics
from ..default_environment import DefaultEnvironment
from ..default_point import reset_points_ids
from ...utils.helpers import round_array, flatten
from ...config import Config
class ClassificationEnvironment(DefaultEnvironment):
"""
This environment encapsulates all methods to deal with a classification task.
"""
def __init__(self):
reset_points_ids()
self.point_population_ = None
train, test = self._initialize_datasets()
self.train_population_ = self._dataset_to_points(train)
self.test_population_ = self._dataset_to_points(test)
self.trainset_class_distribution_ = Counter([p.output for p in self.train_population_])
self.testset_class_distribution_ = Counter([p.output for p in self.test_population_])
self.total_actions_ = len(self.testset_class_distribution_)
self.total_inputs_ = len(self.train_population_[0].inputs)
self.trainset_per_action_ = self._get_data_per_action(self.train_population_)
Config.RESTRICTIONS['total_actions'] = self.total_actions_
Config.RESTRICTIONS['total_raw_actions'] = self.total_actions_
Config.RESTRICTIONS['total_inputs'] = self.total_inputs_
Config.RESTRICTIONS['use_memmory_for_actions'] = True # since for the same input, the output label is always the same
# ensures that the point population will be balanced:
total_samples_per_criteria = (Config.USER['training_parameters']['populations']['points']
/self.total_actions_)
Config.USER['training_parameters']['populations']['points'] = (total_samples_per_criteria
*self.total_actions_)
self.metrics_ = ClassificationMetrics(self)
def _initialize_datasets(self):
"""
Read from file and normalize the train and tests sets.
"""
dataset_filename = Config.USER['classification_parameters']['dataset']
print("\nReading inputs from data: "+dataset_filename)
train = self._read_space_separated_file(Config.USER['classification_parameters']['working_path']
+dataset_filename+".train")
test = self._read_space_separated_file(Config.USER['classification_parameters']['working_path']
+dataset_filename+".test")
normalization_params = self._get_normalization_params(train, test)
train = self._normalize(normalization_params, train)
test = self._normalize(normalization_params, test)
return train, test
def _read_space_separated_file(self, file_path):
"""
Read files separated by space (example: 0.015 0.12 0.082 0.146 3)
"""
with open(file_path) as f:
content = f.readlines()
content = [x.strip('\n').strip() for x in content]
content = [x.split(' ') for x in content]
X = [x[:-1] for x in content]
Y = [x[-1:] for x in content]
self.action_mapping_ = self._create_action_mapping(Y)
Y = self._apply_action_mapping(Y)
content = numpy.append(X, Y, axis = 1)
content = [[float(y) for y in x]for x in content]
return content
def _create_action_mapping(self, Y):
action_mapping_ = {}
labels = sorted(set(flatten(Y)))
for i, label in enumerate(labels):
action_mapping_[label] = i
return action_mapping_
def _apply_action_mapping(self, Y):
return [[self.action_mapping_[y] for y in x]for x in Y]
def _get_normalization_params(self, train, test):
"""
Get the mean and range for each column from the total dataset (train+test), excluding the labels column.
"""
normalization_params = []
data = numpy.append(train, test, axis = 0)
attributes_len = len(data[0])
for index in range(attributes_len-1): # dont get normalization parameters for the labels column
column = data[:,index]
normalization_params.append({'min':min(column), 'range':max(column)-min(column)})
return normalization_params
def _normalize(self, normalization_params, data):
"""
Normalize all columns, except the labels, using the normalization parameters.
"""
normalized_data = []
for line in data:
new_line = []
for i, cell in enumerate(line):
if not i == len(line)-1: # dont normalize the labels column
if normalization_params[i]['range'] == 0.0:
cell = 0.0
else:
cell = ((cell-normalization_params[i]['min'])
/float(normalization_params[i]['range'])
*Config.RESTRICTIONS['multiply_normalization_by'])
new_line.append(cell)
normalized_data.append(new_line)
return normalized_data
def _dataset_to_points(self, data):
"""
Use dataset to create point population.
"""
population = []
for index, item in enumerate(data):
population.append(ClassificationPoint(numpy.array(item[:-1]), item[-1]))
return population
def _get_data_per_action(self, point_population):
subsets_per_class = []
for class_index in range(self.total_actions_):
values = [point for point in point_population if point.output == class_index]
subsets_per_class.append(values)
return subsets_per_class
def reset(self):
self.point_population_ = None
def setup(self, teams_population):
"""
Get a sample of the training dataset to create the point population. If it is the first generation
of the run, just gets random samples for each action of the dataset. For the next generations, it
replaces some of the points in the sample for new points.
"""
total_samples_per_class = Config.USER['training_parameters']['populations']['points']/self.total_actions_
if not self.point_population_: # first sampling of the run
# get random samples per class
samples_per_class = []
for subset in self.trainset_per_action_:
samples_per_class.append(self._sample_subset(subset, total_samples_per_class))
else: # uses attributes defined in evaluate_point_population()
self._remove_points(flatten(self.samples_per_class_to_remove_), teams_population)
samples_per_class = self.samples_per_class_to_keep_
# ensure that the sampling is balanced for all classes, using oversampling for the ones with less than the minimum samples
for sample in samples_per_class:
while len(sample) < total_samples_per_class:
sample += self._sample_subset(sample, total_samples_per_class-len(sample))
sample = flatten(samples_per_class) # join samples per class
random.shuffle(sample)
self.point_population_ = sample
self._check_for_bugs()
def _sample_subset(self, subset, sample_size):
if len(subset) <= sample_size:
sample = subset
else:
sample = random.sample(subset, sample_size)
return sample
def _remove_points(self, points_to_remove, teams_population):
"""
Remove the points to remove from the teams, in order to save memory.
"""
for team in teams_population:
for point in points_to_remove:
if point.point_id_ in team.results_per_points_:
team.results_per_points_.pop(point.point_id_)
if point.point_id_ in team.memory_actions_per_points_:
team.memory_actions_per_points_.pop(point.point_id_)
def _check_for_bugs(self):
if len(self.point_population_) != Config.USER['training_parameters']['populations']['points']:
raise ValueError("The size of the points population changed during selection! "
"You got a bug! (it is: "+str(len(self.point_population_))+", "
"should be: "+str(Config.USER['training_parameters']['populations']['points'])+")")
def evaluate_point_population(self, teams_population):
current_subsets_per_class = self._get_data_per_action(self.point_population_)
total_samples_per_class = Config.USER['training_parameters']['populations']['points']/self.total_actions_
samples_per_class_to_keep = int(round(total_samples_per_class
*(1.0-Config.USER['training_parameters']['replacement_rate']['points'])))
kept_subsets_per_class = []
removed_subsets_per_class = []
# obtain the data points that will be kept and that will be removed for each subset using uniform probability
total_samples_per_class_to_add = total_samples_per_class - samples_per_class_to_keep
for i, subset in enumerate(current_subsets_per_class):
kept_subsets = random.sample(subset, samples_per_class_to_keep) # get points that will be kept
kept_subsets += self._sample_subset(self.trainset_per_action_[i], total_samples_per_class_to_add) # add new points
kept_subsets_per_class.append(kept_subsets)
removed_subsets_per_class.append(list(set(subset) - set(kept_subsets))) # find the remvoed points
self.samples_per_class_to_keep_ = kept_subsets_per_class
self.samples_per_class_to_remove_ = removed_subsets_per_class
def evaluate_teams_population_for_training(self, teams_population):
for team in teams_population:
self.evaluate_team(team, Config.RESTRICTIONS['mode']['training'])
def evaluate_team(self, team, mode):
"""
Evaluate the team using the environment inputs.
"""
if mode == Config.RESTRICTIONS['mode']['training']:
population = self.point_population_
is_training = True
else:
population = self.test_population_
is_training = False
outputs = []
for point in population:
output = team.execute(point.point_id_, point.inputs, range(Config.RESTRICTIONS['total_raw_actions']),
is_training)
outputs.append(output)
if is_training:
if output == point.output:
result = 1 # correct
else:
result = 0 # incorrect
team.results_per_points_[point.point_id_] = result
Y = [p.output for p in population]
score, extra_metrics = self._calculate_team_metrics(outputs, Y, is_training)
if is_training:
team.fitness_ = score
else:
team.score_champion_ = score
team.extra_metrics_ = extra_metrics
def _calculate_team_metrics(self, predicted_outputs, desired_outputs, is_training = False):
recall = recall_score(desired_outputs, predicted_outputs, average = None)
macro_recall = numpy.mean(recall)
extra_metrics = {}
if not is_training: # to avoid wasting time processing metrics when they are not necessary
extra_metrics['recall_per_action'] = round_array(recall)
extra_metrics['accuracy'] = accuracy_score(desired_outputs, predicted_outputs)
extra_metrics['confusion_matrix'] = confusion_matrix(desired_outputs, predicted_outputs)
return macro_recall, extra_metrics
def validate(self, current_generation, teams_population):
fitness = [p.fitness_ for p in teams_population]
best_team = teams_population[fitness.index(max(fitness))]
self.evaluate_team(best_team, Config.RESTRICTIONS['mode']['champion'])
return best_team
```
#### File: reinforcement/poker/poker_match.py
```python
import os
from match_state import MatchState
from poker_config import PokerConfig
from opponent_model import OpponentModel
from ....core.diversity_maintenance import DiversityMaintenance
from ....utils.helpers import round_value
from ....config import Config
class PokerMatch():
def __init__(self, team, opponent, point, mode, match_id):
self.team = team
self.opponent = opponent
self.point = point
self.mode = mode
if mode == Config.RESTRICTIONS['mode']['training']:
self.is_training = True
else:
self.is_training = False
self.match_id = match_id
self.opponent_indeces = {
0: 1,
1: 0,
}
self.players_info = {
0: {
'player': None,
'match_state': None,
'key': None,
'chips': 0.0,
'folded': False,
},
1: { # dealer/button
'player': None,
'match_state': None,
'key': None,
'chips': 0.0,
'folded': False,
}
}
self.pot = 0.0
self._setup_debug_files()
def _setup_debug_files(self):
if Config.USER['debug']['output_path'] is None:
Config.USER['debug']['output_path'] = 'SBB/environments/poker/logs/'
self.debug_file = None
if Config.USER['debug']['enabled']:
path = Config.USER['debug']['output_path']+'matches_output/'
if not os.path.exists(path):
os.makedirs(path)
filename = self.mode+"_"+str(self.match_id)+"_"+str(self.team.__repr__())
self.debug_file = open(path+filename+'.log','w')
def run(self):
### Setup helpers
if not self.is_training:
self.team.extra_metrics_['played_last_hand'] = True
self.team.encodings_['encoding_custom_info_per_match'].append(str(self.point.seed_))
self.team.encodings_['encoding_custom_info_per_match'].append(str(self.point.players['team']['position']))
self.opponent.initialize(self.point.seed_)
### Setup match
if self.point.players['team']['position'] == 0:
self.players_info[0]['player'] = self.team
self.players_info[0]['match_state'] = MatchState(self.point, player_key = 'team')
self.players_info[0]['id'] = self.team.__repr__()
self.players_info[0]['key'] = 'team'
self.players_info[1]['player'] = self.opponent
self.players_info[1]['match_state'] = MatchState(self.point, player_key = 'opponent')
self.players_info[1]['id'] = self.opponent.opponent_id
self.players_info[1]['key'] = 'opponent'
sbb_position = 0
opponent_position = 1
else:
self.players_info[1]['player'] = self.team
self.players_info[1]['match_state'] = MatchState(self.point, player_key = 'team')
self.players_info[1]['id'] = self.team.__repr__()
self.players_info[1]['key'] = 'team'
self.players_info[0]['player'] = self.opponent
self.players_info[0]['match_state'] = MatchState(self.point, player_key = 'opponent')
self.players_info[0]['id'] = self.opponent.opponent_id
self.players_info[0]['key'] = 'opponent'
sbb_position = 1
opponent_position = 0
if Config.USER['debug']['enabled']:
self.debug_file.write("PokerSBB Game: Hold'em Limit\n")
self.debug_file.write("Table '"+str(self.match_id)+"' 2-max Seat #2 is the button\n")
m = "Seat 1: "+self.players_info[0]['id']+" ("+str(MatchState.maximum_winning())+" chips)"
if sbb_position == 0:
m += " [SBB]"
self.debug_file.write(m+"\n")
m = "Seat 2: "+self.players_info[1]['id']+" ("+str(MatchState.maximum_winning())+" chips)"
if sbb_position == 1:
m += " [SBB]"
self.debug_file.write(m+"\n")
### Apply blinds (forced bets made before the cards are dealt)
# since it is a heads-up, the dealer posts the small blind, and the non-dealer places the big blind
# The small blind is usually equal to half of the big blind.
# The big blind is equal to the minimum bet.
big_blind = PokerConfig.CONFIG['small_bet']
small_blind = big_blind/2.0
self.players_info[0]['chips'] -= big_blind
self.pot += big_blind
self.players_info[1]['chips'] -= small_blind # dealer/button
self.pot += small_blind
if Config.USER['debug']['enabled']:
self.debug_file.write(self.players_info[1]['id']+": posts small blind "+str(small_blind)+"\n")
self.debug_file.write(self.players_info[0]['id']+": posts big blind "+str(big_blind)+"\n")
### Starting match
self.rounds = [[], [], [], []]
if Config.USER['debug']['enabled']:
self.debug_file.write("*** HOLE CARDS ***\n")
self.round_id = 0 # preflop
result = self._run_poker_round(starter_player_index = 1, initial_bet = small_blind,
default_bet = PokerConfig.CONFIG['small_bet'])
if result == "next_round":
if Config.USER['debug']['enabled']:
self.debug_file.write("*** FLOP *** "+str(self.point.board_cards_[:3])+"\n")
self.round_id = 1 # flop
result = self._run_poker_round(starter_player_index = 0, initial_bet = 0.0,
default_bet = PokerConfig.CONFIG['small_bet'])
if result == "next_round":
if Config.USER['debug']['enabled']:
self.debug_file.write("*** TURN *** "+str(self.point.board_cards_[:4])+"\n")
self.round_id = 2 # turn
result = self._run_poker_round(starter_player_index = 0, initial_bet = 0.0,
default_bet = PokerConfig.CONFIG['big_bet'])
river_bet = 0.0
river_default_bet = PokerConfig.CONFIG['big_bet']
river_starter_player_index = 0
if result == "next_round":
if Config.USER['debug']['enabled']:
self.debug_file.write("*** RIVER *** "+str(self.point.board_cards_)+"\n")
self.round_id = 3 # river
result = self._run_poker_round(starter_player_index = river_starter_player_index,
initial_bet = river_bet, default_bet = river_default_bet)
showdown_happened = False
if result == "next_round": # showdown
showdown_happened = True
if Config.USER['debug']['enabled']:
self.debug_file.write("*** SHOW DOWN ***\n")
self.debug_file.write(self.players_info[0]['id']+": "
"shows "+str(self.players_info[0]['match_state'].hole_cards)+" "
"(HS: "+str(self.players_info[0]['match_state'].hand_strength[3])+")\n")
self.debug_file.write(self.players_info[1]['id']+": "
"shows "+str(self.players_info[1]['match_state'].hole_cards)+" "
"(HS: "+str(self.players_info[1]['match_state'].hand_strength[3])+")\n")
player0_hs = self.players_info[0]['match_state'].hand_strength[3]
player1_hs = self.players_info[1]['match_state'].hand_strength[3]
if player0_hs > player1_hs:
self.players_info[0]['chips'] += self.pot
if Config.USER['debug']['enabled']:
self.debug_file.write(self.players_info[0]['id']+" collected"
" "+str(self.pot)+" from main pot\n")
showdown_winner = 0
elif player0_hs < player1_hs:
self.players_info[1]['chips'] += self.pot
if Config.USER['debug']['enabled']:
self.debug_file.write(self.players_info[1]['id']+" collected"
" "+str(self.pot)+" from main pot\n")
showdown_winner = 1
else:
self.players_info[0]['chips'] += self.pot/2.0
self.players_info[1]['chips'] += self.pot/2.0
if Config.USER['debug']['enabled']:
self.debug_file.write("Draw! The players shared "+str(self.pot)+" from main pot\n")
showdown_winner = -1
if result == "player_folded":
if Config.USER['debug']['enabled']:
if self.players_info[0]['folded']:
last_player = self.players_info[1]['id']
else:
last_player = self.players_info[0]['id']
self.debug_file.write(last_player+" collected "+str(self.pot)+" from pot\n")
self.debug_file.write(last_player+": doesn't show hand\n")
if Config.USER['debug']['enabled']:
self.debug_file.write("*** SUMMARY ***\n")
self.debug_file.write("Total pot "+str(self.pot)+" | Rake 0\n")
self.debug_file.write("Board "+str(self.point.board_cards_)+"\n")
if self.players_info[0]['folded']:
status0 = "folded"
status1 = "collected "+str(self.pot)
elif self.players_info[1]['folded']:
status0 = "collected "+str(self.pot)
status1 = "folded"
elif showdown_winner == 0:
status0 = "showed and won "+str(self.pot)
status1 = "showed and lost"
elif showdown_winner == 1:
status0 = "showed and lost"
status1 = "showed and won "+str(self.pot)
elif showdown_winner == -1:
status0 = "showed and won "+str(self.pot/2.0)
status1 = "showed and won "+str(self.pot/2.0)
else:
raise ValueError("Unrecognized game final status.")
self.debug_file.write("Seat 1: "+self.players_info[0]['id']+" "+status0+"\n")
self.debug_file.write("Seat 2: "+self.players_info[1]['id']+" "+status1+"\n")
self.debug_file.write("\n\n### Point Information: "+str(self.point)+"\n")
player_actions = self.players_info[sbb_position]['match_state'].actions
opponent_actions = self.players_info[opponent_position]['match_state'].actions
self._get_opponent_model_for_team().update_overall_agressiveness(self.round_id, player_actions,
opponent_actions, self.point.label_, showdown_happened)
if self.opponent.opponent_id == 'hall_of_fame':
self._get_opponent_model_for_hall_of_fame().update_overall_agressiveness(self.round_id,
opponent_actions, player_actions, self.point.label_, showdown_happened)
if self.team.opponent_id == 'bayesian_opponent' or self.team.opponent_id == 'sbb_bayesian_opponent':
self.team.update_opponent_actions(opponent_actions)
if self.opponent.opponent_id == 'bayesian_opponent' or self.opponent.opponent_id == 'sbb_bayesian_opponent':
self.opponent.update_opponent_actions(player_actions)
if self.is_training:
original_player_actions = [PokerConfig.CONFIG['inverted_action_mapping'][a] for a in player_actions]
if Config.USER['reinforcement_parameters']['environment_parameters']['weights_per_action']:
bin_label = DiversityMaintenance.define_bin_for_actions(original_player_actions)
self.team.encodings_['encoding_for_pattern_of_actions_per_match'].append(bin_label)
sbb_chips = self.players_info[sbb_position]['chips']
opponent_chips = self.players_info[opponent_position]['chips']
normalized_value = self._normalize_winning(float(sbb_chips))
if Config.USER['debug']['enabled']:
self.debug_file.write("\n\n### Result Information: ")
self.debug_file.write("\nmatch: "+str(self.match_id))
self.debug_file.write("\nsbb_chips: "+str(sbb_chips))
self.debug_file.write("\nopponent_chips: "+str(opponent_chips))
self.debug_file.write("\nnormalized_value: "+str(normalized_value))
self.point.teams_results_.append(normalized_value)
self._get_chips_for_team().append(normalized_value)
if self.opponent.opponent_id == "hall_of_fame":
self._get_chips_for_hall_of_fame().append(self._normalize_winning(float(opponent_chips)))
if Config.USER['debug']['enabled']:
self.debug_file.close()
return normalized_value
def _run_poker_round(self, starter_player_index, initial_bet, default_bet):
last_action = None
current_index = starter_player_index
bet = initial_bet
last_action_was_a_bet = False
while True:
opponent_actions = self.players_info[self.opponent_indeces[current_index]]['match_state'].actions
action = self._execute_player(self.players_info[current_index]['player'],
self.players_info[current_index]['match_state'], bet, opponent_actions, current_index)
self.rounds[self.round_id].append(action)
if action == 'f':
if self.players_info[current_index]['key'] == 'team' and not self.is_training and self.round_id == 0:
self.players_info[current_index]['player'].extra_metrics_['played_last_hand'] = False
if Config.USER['debug']['enabled']:
self.debug_file.write(self.players_info[current_index]['id']+": "
"folds (pot: "+str(self.pot)+")\n")
self.players_info[self.opponent_indeces[current_index]]['chips'] += self.pot
self.players_info[current_index]['folded'] = True
return "player_folded"
elif action == 'c':
self.players_info[current_index]['chips'] -= bet
self.pot += bet
if Config.USER['debug']['enabled']:
self.debug_file.write(self.players_info[current_index]['id']+": "
"calls "+str(bet)+" (pot: "+str(self.pot)+")\n")
bet = 0.0
if last_action_was_a_bet:
return "next_round"
else:
last_action_was_a_bet = True
elif action == 'r':
self.players_info[current_index]['chips'] -= bet
self.pot += bet
bet = default_bet
self.players_info[current_index]['chips'] -= bet
self.pot += bet
if Config.USER['debug']['enabled']:
self.debug_file.write(self.players_info[current_index]['id']+": "
"raises "+str(default_bet)+" (pot: "+str(self.pot)+")\n")
last_action_was_a_bet = False
else:
raise ValueError("Invalid action.")
current_index = self.opponent_indeces[current_index]
def _valid_actions(self):
valid = [0, 1]
max_raises_overall = MatchState.MAX_BETS
# check if can raise
if self.round_id == 0:
max_raises = max_raises_overall-1
else:
max_raises = max_raises_overall
raises = 0
for action in self.rounds[self.round_id]:
if action == 'r':
raises += 1
if raises < max_raises:
valid.append(2)
return valid
def _execute_player(self, player, match_state, bet, opponent_actions, current_index):
if (match_state.player_key == 'team' and not player.opponent_id == 'bayesian_opponent'
and not player.opponent_id == 'bayesian_tester'):
inputs = match_state.inputs_for_team(self.pot, bet, self._get_chips_for_team(), self.round_id)
inputs += self._get_opponent_model_for_team().inputs(match_state.actions, opponent_actions)
else:
if player.opponent_id == 'hall_of_fame':
inputs = match_state.inputs_for_team(self.pot, bet, self._get_chips_for_hall_of_fame(),
self.round_id)
inputs += self._get_opponent_model_for_hall_of_fame().inputs(match_state.actions,
opponent_actions)
else:
inputs = match_state.inputs_for_rule_based_opponents(bet, self.round_id)
if Config.USER['debug']['enabled']:
if match_state.player_key == 'team' or self.opponent.opponent_id == 'hall_of_fame':
self.debug_file.write(" >> registers:"
" "+str([(p.program_id_, [round_value(r, 2) for r in p.general_registers_]) for p in player.programs])+"\n")
self.debug_file.write(" >> inputs: "+str(inputs)+"\n")
action = player.execute(self.point.point_id_, inputs, self._valid_actions(), self.is_training)
if Config.USER['debug']['enabled']:
if match_state.player_key == 'team' or self.opponent.opponent_id == 'hall_of_fame':
self.debug_file.write(" << program: "+str(player.last_selected_program_)+"\n")
if action is None:
action = 1
if match_state.player_key == 'team' and self.is_training:
player.encodings_['encoding_for_actions_per_match'].append(str(action))
action = PokerConfig.CONFIG['action_mapping'][action]
if match_state.player_key == 'team' and self.is_training:
player.encodings_['encoding_custom_info_per_match'].append(str(DiversityMaintenance.define_bin_for_value(match_state.hand_strength[self.round_id], is_normalized = True)))
player.encodings_['encoding_custom_info_per_match'].append(str(DiversityMaintenance.define_bin_for_value(match_state.effective_potential[self.round_id], is_normalized = True)))
player.encodings_['encoding_custom_info_per_match'].append(str(action))
match_state.actions.append(action)
return action
def _get_opponent_model_for_team(self):
opponent_id = self.opponent.opponent_id
if opponent_id not in self.team.opponent_model:
self.team.opponent_model[opponent_id] = OpponentModel()
return self.team.opponent_model[opponent_id]
def _get_chips_for_team(self):
opponent_id = self.opponent.opponent_id
if opponent_id not in self.team.chips:
self.team.chips[opponent_id] = []
return self.team.chips[opponent_id]
def _get_opponent_model_for_hall_of_fame(self):
opponent_id = self.team.team_id_
if opponent_id not in self.opponent.opponent_model:
self.opponent.opponent_model[opponent_id] = OpponentModel()
return self.opponent.opponent_model[opponent_id]
def _get_chips_for_hall_of_fame(self):
opponent_id = self.team.team_id_
if opponent_id not in self.opponent.chips:
self.opponent.chips[opponent_id] = []
return self.opponent.chips[opponent_id]
def _normalize_winning(self, value):
max_winning = MatchState.maximum_winning()
max_losing = -max_winning
return (value - max_losing)/float(max_winning - max_losing)
```
#### File: environments/reinforcement/reinforcement_metrics.py
```python
import numpy
from collections import defaultdict
from ..default_metrics import DefaultMetrics
from ...utils.helpers import round_value
from ...utils.helpers import round_value, round_array, flatten, accumulative_performances, rank_teams_by_accumulative_score
from ...config import Config
class ReinforcementMetrics(DefaultMetrics):
def __init__(self, environment):
self.environment_ = environment
def metrics_for_team(self, team):
msg = ""
if team.extra_metrics_:
msg += "\n\n### Reinforcement Learning-specific metrics for the best team:"
if 'champion_score' in team.extra_metrics_:
msg += ("\n\nscore per opponent (except hall of fame) (champion): "
""+str(team.score_champion_))
total_opponents = Config.USER['reinforcement_parameters']['environment_parameters']['validation_opponents_labels']+['hall_of_fame']
for key in team.extra_metrics_['opponents']:
if key in total_opponents:
msg += "\n"+key+": "+str(team.extra_metrics_['champion_opponents'][key])
if 'validation_score' in team.extra_metrics_:
msg += "\n\nscore per opponent (validation): "+str(team.score_validation_)
for key in team.extra_metrics_['validation_opponents']:
msg += "\n"+key+": "+str(team.extra_metrics_['validation_opponents'][key])
if 'training_opponents' in team.extra_metrics_:
msg += "\n\nscore per opponent (training): "+str(round_value(team.fitness_))
total_opponents = Config.USER['reinforcement_parameters']['environment_parameters']['training_opponents_labels']+['hall_of_fame']
for key in team.extra_metrics_['training_opponents']:
if key in total_opponents:
msg += "\n"+key+": "+str(team.extra_metrics_['training_opponents'][key])
return msg
def initialize_attributes_for_run_info(self, run_info):
run_info.global_mean_validation_score_per_validation_ = []
run_info.global_max_validation_score_per_validation_ = []
run_info.global_opponent_results_per_validation_ = []
run_info.hall_of_fame_per_validation_ = []
run_info.global_fitness_per_opponent_per_generation_ = defaultdict(list)
run_info.final_teams_validations_ = []
run_info.final_teams_validations_ids_ = []
run_info.individual_performance_in_last_generation_ = defaultdict(list)
run_info.accumulative_performance_in_last_generation_ = defaultdict(list)
run_info.ids_for_acc_performance_in_last_generation_ = defaultdict(list)
run_info.accumulative_performance_summary_ = {}
def generate_output_for_attributes_for_run_info(self, run_info):
msg = ""
msg += "\n\n\n\n#################### Reinforcement Learning-specific Metrics:"
msg += "\n\n\n##### GLOBAL METRICS PER VALIDATION"
msg += "\n\nGlobal Mean Validation Score per Validation: "+str(run_info.global_mean_validation_score_per_validation_)
msg += "\nGlobal Max. Validation Score per Validation: "+str(run_info.global_max_validation_score_per_validation_)
msg += "\n\nGlobal Opponent Results per Validation"
for key in run_info.global_opponent_results_per_validation_[-1]:
msg += "\n - "+str(key)+": "+str([item[key] if key in item else 0.0 for item in run_info.global_opponent_results_per_validation_])
if Config.USER['reinforcement_parameters']['hall_of_fame']['enabled']:
msg += "\n\nHall of Fame per Validation: "+str(run_info.hall_of_fame_per_validation_)
msg += "\n\n\n##### GLOBAL METRICS PER TRAINING"
msg += "\n\nGlobal Fitness Score per Training (per opponent):"
msg += "\n - predefined:"
for opponent in self.environment_.opponent_names_for_training_:
msg += "\n - "+str(opponent)+": "+str(run_info.global_fitness_per_opponent_per_generation_[opponent])
if Config.USER['reinforcement_parameters']['hall_of_fame']['enabled']:
msg += "\n - hall of fame:"
hall_of_fame = [x for x in run_info.global_fitness_per_opponent_per_generation_ if x not in self.environment_.opponent_names_for_training_]
for key in hall_of_fame:
msg += "\n - "+str(key)+": "+str(run_info.global_fitness_per_opponent_per_generation_[key])
msg += "\n\n\n##### FINAL TEAMS METRICS"
msg += "\n\nFinal Teams Validations: "+str(run_info.final_teams_validations_)
msg += "\nFinal Teams Ids: "+str(run_info.final_teams_validations_ids_)
msg += "\n\n\n##### ACCUMULATIVE PERFORMANCES"
for metric in run_info.individual_performance_in_last_generation_:
msg += "\n\nOverall Accumulative Results ("+str(metric)+"):"
msg += "\n- Individual Team Performance: "+str(run_info.individual_performance_in_last_generation_[metric])
msg += "\n- Accumulative Team Performance: "+str(run_info.accumulative_performance_in_last_generation_[metric])
msg += "\n- Team ids: "+str(run_info.ids_for_acc_performance_in_last_generation_[metric])
msg += "\n\n\n##### TEAMS RANKED BY ACCUMULATIVE PERFORMANCE"
msg += "\n\nTeams Ranked by Accumulative Score per Metric"
for metric in run_info.accumulative_performance_summary_:
msg += "\n - metric: "+str(metric)+" (len: "+str(len(run_info.accumulative_performance_summary_[metric]['overall']['ids_only']))+"):"
msg += "\n - Rank: "+str(run_info.accumulative_performance_summary_[metric]['overall']['rank'])
msg += "\n - Team ids: "+str(run_info.accumulative_performance_summary_[metric]['overall']['ids_only'])
return msg
def quick_metrics(self):
msg = ""
msg += "\n### Environment Info:"
msg += "\ntotal inputs: "+str(self.environment_.total_inputs_)
msg += "\ntotal actions: "+str(self.environment_.total_actions_)
msg += "\ntraining opponents: "+str(self.environment_.opponent_names_for_training_)
return msg
def store_per_generation_metrics(self, run_info, teams_population, current_generation, previous_diversity):
super(ReinforcementMetrics, self).store_per_generation_metrics(run_info, teams_population, current_generation, previous_diversity)
older_teams = [team for team in teams_population if team.generation != current_generation]
opponents = older_teams[0].extra_metrics_['training_opponents'].keys()
for opponent in opponents:
mean_fitness_per_opponent = round_value(numpy.mean([team.extra_metrics_['training_opponents'][opponent] for team in older_teams]), 3)
run_info.global_fitness_per_opponent_per_generation_[opponent].append(mean_fitness_per_opponent)
def store_per_validation_metrics(self, run_info, best_team, teams_population, programs_population, current_generation):
super(ReinforcementMetrics, self).store_per_validation_metrics(run_info, best_team, teams_population, programs_population, current_generation)
older_teams = [team for team in teams_population if team.generation != current_generation]
validation_score_mean = round_value(numpy.mean([team.score_validation_ for team in older_teams]))
run_info.temp_info_['validation_score_mean'] = validation_score_mean
opponent_means = {}
for key in older_teams[0].extra_metrics_['validation_opponents']:
opponent_means[key] = round_value(numpy.mean([t.extra_metrics_['validation_opponents'][key] for t in older_teams]))
if 'hall_of_fame' in best_team.extra_metrics_['champion_opponents']:
opponent_means['hall_of_fame(champion)'] = best_team.extra_metrics_['champion_opponents']['hall_of_fame']
run_info.global_mean_validation_score_per_validation_.append(validation_score_mean)
run_info.global_max_validation_score_per_validation_.append(round_value(max([team.score_validation_ for team in older_teams])))
run_info.global_opponent_results_per_validation_.append(opponent_means)
run_info.final_teams_validations_ = [team.score_validation_ for team in older_teams]
if Config.USER['reinforcement_parameters']['hall_of_fame']['enabled']:
run_info.hall_of_fame_per_validation_.append([p.__repr__() for p in self.environment_.hall_of_fame()])
def print_per_validation_metrics(self, run_info, best_team):
super(ReinforcementMetrics, self).print_per_validation_metrics(run_info, best_team)
print "\n\nglobal validation score (mean): "+str(run_info.temp_info_['validation_score_mean'])
if Config.USER['reinforcement_parameters']['hall_of_fame']['enabled']:
print "\nHall of Fame: "+str(run_info.hall_of_fame_per_validation_[-1])
def store_per_run_metrics(self, run_info, best_team, teams_population, pareto_front, current_generation):
super(ReinforcementMetrics, self).store_per_run_metrics(run_info, best_team, teams_population, pareto_front, current_generation)
self._calculate_accumulative_performances(run_info, teams_population, current_generation)
self._summarize_accumulative_performances(run_info)
self._generate_second_layer_files(run_info, teams_population)
older_teams = [team for team in teams_population if team.generation != current_generation]
run_info.final_teams_validations_ids_ = [team.__repr__() for team in older_teams]
# to ensure validation metrics exist for all teams in the hall of fame
if Config.USER['reinforcement_parameters']['hall_of_fame']['enabled']:
print "Validating hall of fame..."
self.environment_.validate(current_generation, self.environment_.hall_of_fame())
def _calculate_accumulative_performances(self, run_info, teams_population, current_generation):
older_teams = [team for team in teams_population if team.generation != current_generation]
metric = 'score'
sorting_criteria = lambda x: x.score_validation_
get_results_per_points = lambda x: x.results_per_points_for_validation_
point_ids = [point.point_id_ for point in self.environment_.validation_point_population_]
individual_performance, accumulative_performance, teams_ids = accumulative_performances(older_teams, point_ids, sorting_criteria, get_results_per_points)
run_info.individual_performance_in_last_generation_[metric] = individual_performance
run_info.accumulative_performance_in_last_generation_[metric] = accumulative_performance
run_info.ids_for_acc_performance_in_last_generation_[metric] = teams_ids
def _summarize_accumulative_performances(self, run_info):
metric = 'score'
run_info.accumulative_performance_summary_[metric] = {}
ind_score = run_info.individual_performance_in_last_generation_[metric]
acc_score = run_info.accumulative_performance_in_last_generation_[metric]
ids = run_info.ids_for_acc_performance_in_last_generation_[metric]
rank = rank_teams_by_accumulative_score(ind_score, acc_score, ids)
run_info.accumulative_performance_summary_[metric]['overall'] = {}
run_info.accumulative_performance_summary_[metric]['overall']['rank'] = rank
run_info.accumulative_performance_summary_[metric]['overall']['ids_only'] = sorted([r[0] for r in rank])
def _generate_second_layer_files(self, run_info, teams_population):
top5_overall_ids = [r[0] for r in run_info.accumulative_performance_summary_['score']['overall']['rank'][:5]]
top10_overall_ids = [r[0] for r in run_info.accumulative_performance_summary_['score']['overall']['rank'][:10]]
top15_overall_ids = [r[0] for r in run_info.accumulative_performance_summary_['score']['overall']['rank'][:15]]
if len(top5_overall_ids) == 5:
run_info.second_layer_files_['top5_overall'] = [t for t in teams_population if t.__repr__() in top5_overall_ids]
if len(top5_overall_ids) == 10:
run_info.second_layer_files_['top10_overall'] = [t for t in teams_population if t.__repr__() in top10_overall_ids]
if len(top5_overall_ids) == 15:
run_info.second_layer_files_['top15_overall'] = [t for t in teams_population if t.__repr__() in top15_overall_ids]
run_info.second_layer_files_['all'] = teams_population
def generate_overall_metrics_output(self, run_infos):
msg = super(ReinforcementMetrics, self).generate_overall_metrics_output(run_infos)
msg += "\n\n\n###### Reinforcement Learning-specific Metrics:"
score_means, score_stds = self._process_scores([run.global_mean_validation_score_per_validation_ for run in run_infos])
msg += "\n\nGlobal Mean Validation Score per Validation:"
msg += "\nmean: "+str(score_means)
if not Config.USER['verbose']['dont_show_std_deviation_in_reports']:
msg += "\nstd. deviation: "+str(score_stds)
score_means, score_stds = self._process_scores([run.global_max_validation_score_per_validation_ for run in run_infos])
msg += "\n\nGlobal Max. Validation Score per Validation:"
msg += "\nmean: "+str(score_means)
if not Config.USER['verbose']['dont_show_std_deviation_in_reports']:
msg += "\nstd. deviation: "+str(score_stds)
msg += "\n\nGlobal Fitness per Opponent per Training:"
for key in self.environment_.opponent_names_for_training_:
score_means, score_stds = self._process_scores([run.global_fitness_per_opponent_per_generation_[key] for run in run_infos])
msg += "\n- "+str(key)+":"
msg += "\n- mean: "+str(round_array(score_means, 2))
if not Config.USER['verbose']['dont_show_std_deviation_in_reports']:
msg += "\n- std. deviation: "+str(round_array(score_stds, 2))
for run_id, run in enumerate(run_infos):
valid_names = [t.__repr__() for t in run.hall_of_fame_in_last_generation_]
for key in run.global_fitness_per_opponent_per_generation_.keys():
if key in valid_names:
msg += "\n- run "+str(run_id+1)+", "+str(key)+": "+str(run.global_fitness_per_opponent_per_generation_[key])
msg += "\n\nFinal Teams Validations: "+str(flatten([round_array(run.final_teams_validations_, 3) for run in run_infos]))
msg += "\n"
msg += self._generate_overall_metrics_output_for_acc_curves(run_infos)
msg += self._generate_summary(run_infos)
return msg
def _generate_summary(self, run_infos): # Obs.: Everything here is duplicated code
msg = "\n\n\n\n######### SUMMARY:"
score_means, score_stds = self._process_scores([run.global_mean_fitness_per_generation_ for run in run_infos])
msg += "\n\nGlobal Mean Fitness Score per Training:"
msg += "\nmean: "+str(round_array(score_means, 3))
score_means, score_stds = self._process_scores([run.global_max_fitness_per_generation_ for run in run_infos])
msg += "\n\nGlobal Max. Fitness Score per Training:"
msg += "\nmean: "+str(round_array(score_means, 3))
score_means, score_stds = self._process_scores([run.global_mean_validation_score_per_validation_ for run in run_infos])
msg += "\n\nGlobal Mean Validation Score per Validation:"
msg += "\nmean: "+str(score_means)
score_means, score_stds = self._process_scores([run.global_max_validation_score_per_validation_ for run in run_infos])
msg += "\n\nGlobal Max. Validation Score per Validation:"
msg += "\nmean: "+str(score_means)
score_means, score_stds = self._process_scores([run.champion_score_per_validation_ for run in run_infos])
msg += "\n\nChampion Score per Validation:"
msg += "\nmean: "+str(score_means)
msg += "\n\n\n### Summary Per Run"
best_scores = [run.global_mean_fitness_per_generation_[-1] for run in run_infos]
msg += "\n\nGlobal Mean Fitness Score per Training per Run:"
msg += "\n"+str(best_scores)
msg += "\nmean: "+str(round_value(numpy.mean(best_scores)))
best_scores = [run.global_max_fitness_per_generation_[-1] for run in run_infos]
msg += "\n\nGlobal Max. Fitness Score per Training per Run:"
msg += "\n"+str(best_scores)
msg += "\nmean: "+str(round_value(numpy.mean(best_scores)))
best_scores = [run.global_mean_validation_score_per_validation_[-1] for run in run_infos]
msg += "\n\nGlobal Mean Validation Score per Validation per Run:"
msg += "\n"+str(best_scores)
msg += "\nmean: "+str(round_value(numpy.mean(best_scores)))
best_scores = [run.global_max_validation_score_per_validation_[-1] for run in run_infos]
msg += "\n\nGlobal Max. Validation Score per Validation per Run:"
msg += "\n"+str(best_scores)
msg += "\nmean: "+str(round_value(numpy.mean(best_scores)))
best_scores = [round_value(run.best_team_.score_champion_) for run in run_infos]
msg += "\n\nChampion Score per Validation per Run:"
msg += "\n"+str(best_scores)
msg += "\nmean: "+str(round_value(numpy.mean(best_scores)))
return msg
```
#### File: SBB/utils/run_info.py
```python
import time
from collections import defaultdict
from helpers import round_array, round_value
from ..config import Config
class RunInfo:
"""
Stores metrics for the runs.
"""
def __init__(self, run_id, environment, seed):
self.run_id = run_id
self.environment = environment
self.seed = seed
self.elapsed_time_ = None
self.best_team_ = None
self.teams_in_last_generation_ = []
self.hall_of_fame_in_last_generation_ = []
self.pareto_front_in_last_generation_ = []
self.second_layer_files_ = {}
self.start_time_ = time.time()
self.temp_info_ = {}
self.train_score_per_validation_ = []
self.champion_score_per_validation_ = []
self.global_diversity_per_validation_ = defaultdict(list)
self.global_mean_fitness_per_generation_ = []
self.global_max_fitness_per_generation_ = []
self.global_fitness_per_diversity_per_generation_ = defaultdict(list)
self.global_diversity_per_generation_ = defaultdict(list)
self.novelty_type_per_generation_ = []
self.actions_distribution_per_validation_ = []
self.inputs_distribution_per_instruction_per_validation_ = []
self.inputs_distribution_per_team_per_validation_ = []
self.mean_team_size_per_validation_ = []
self.mean_program_size_with_introns_per_validation_ = []
self.mean_program_size_without_introns_per_validation_ = []
self.environment.metrics_.initialize_attributes_for_run_info(self)
def end(self):
self.elapsed_time_ = round_value((time.time() - self.start_time_)/60.0)
def __str__(self):
msg = "RUN "+str(self.run_id)+"\n"
msg += "seed: "+str(self.seed)
msg += "\n\n\n\n#################### General Metrics:"
msg += "\n\n\n##### GLOBAL METRICS PER VALIDATION"
msg += "\n\nChampion Fitness per Validation: "+str(round_array(self.train_score_per_validation_))
msg += "\nChampion Score per Validation: "+str(round_array(self.champion_score_per_validation_))
if len(Config.USER['advanced_training_parameters']['diversity']['metrics']) > 0:
msg += "\n\nGlobal Diversities per Validation"
for key in self.global_diversity_per_validation_:
msg += "\n - "+str(key)+": "+str(self.global_diversity_per_validation_[key])
msg += "\n\n\n##### GLOBAL METRICS PER TRAINING"
msg += "\n\nGlobal Mean Fitness Score per Training: "+str(self.global_mean_fitness_per_generation_)
msg += "\nGlobal Max. Fitness Score per Training: "+str(self.global_max_fitness_per_generation_)
if len(Config.USER['advanced_training_parameters']['diversity']['metrics']) > 1:
msg += "\n\n\nGlobal Fitness Score per Training (per diversity):"
for key in self.global_fitness_per_diversity_per_generation_:
msg += "\n - "+str(key)+": "+str(self.global_fitness_per_diversity_per_generation_[key])
if len(Config.USER['advanced_training_parameters']['diversity']['metrics']) > 0:
msg += "\n\nGlobal Diversities per Training"
for key in self.global_diversity_per_generation_:
msg += "\n - "+str(key)+": "+str(self.global_diversity_per_generation_[key])
if len(Config.USER['advanced_training_parameters']['diversity']['metrics']) > 1:
msg += "\n\nDiversity Type per Training: "+str(self.novelty_type_per_generation_)
msg += "\n\n\n##### DISTRIBUTION METRICS PER VALIDATION"
msg += "\n\nDistribution of Actions"
msg += "\n - last validation: "+str(self.actions_distribution_per_validation_[-1])
msg += "\n - per validation: "+str(self.actions_distribution_per_validation_)
msg += "\n\nDistribution of Inputs (per program)"
msg += "\n - last validation: "+str(self.inputs_distribution_per_instruction_per_validation_[-1])
msg += "\n - per validation: "+str(self.inputs_distribution_per_instruction_per_validation_)
msg += "\n\nDistribution of Inputs (per team)"
msg += "\n - last validation: "+str(self.inputs_distribution_per_team_per_validation_[-1])
msg += "\n - per validation: "+str(self.inputs_distribution_per_team_per_validation_)
msg += "\n\n\n##### SIZE METRICS PER VALIDATION"
msg += "\n\nMean Team Sizes"
msg += "\n - last validation: "+str(self.mean_team_size_per_validation_[-1])
msg += "\n - per validation: "+str(self.mean_team_size_per_validation_)
msg += "\n\nMean Program Sizes (with introns)"
msg += "\n - last validation: "+str(self.mean_program_size_with_introns_per_validation_[-1])
msg += "\n - per validation: "+str(self.mean_program_size_with_introns_per_validation_)
msg += "\n\nMean Program Sizes (without introns)"
msg += "\n - last validation: "+str(self.mean_program_size_without_introns_per_validation_[-1])
msg += "\n - per validation: "+str(self.mean_program_size_without_introns_per_validation_)
msg += self.environment.metrics_.generate_output_for_attributes_for_run_info(self)
return msg
``` |
{
"source": "jpbonson/WebScrapingTool",
"score": 3
} |
#### File: webscrapingtool/tests/test_outlets_views.py
```python
import json
from django.urls import reverse
from restapi.models import Outlet
from rest_framework import status
from rest_framework.test import APITestCase
class OutletTests(APITestCase):
def setUp(self):
Outlet.objects.create(name="WolfNews", website="wolf.com", description="dark")
Outlet.objects.create(name="Culture", website="culture.com", description="interesting")
def test_create_outlet(self):
"""
Ensure we can create a new outlet object.
"""
url = reverse('v1:outlet-list')
data = {'name': 'NiceNews', 'website': 'news.com', 'description': 'cool website'}
response = self.client.post(url, data, format='json')
result = json.loads(response.content.decode('utf-8'))
result.pop('id')
self.assertEqual(result, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_list_outlets(self):
"""
Ensure we can list outlet objects.
"""
url = reverse('v1:outlet-list')
response = self.client.get(url)
result = map(lambda x: x['name'], json.loads(response.content.decode('utf-8')))
expected = map(lambda x: x.name, list(Outlet.objects.all()))
self.assertEqual(list(result), list(expected))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_outlet(self):
"""
Ensure we can get an outlet object.
"""
sample_id = 1
url = reverse('v1:outlet-detail', kwargs={'outlet_id': sample_id})
response = self.client.get(url)
result = json.loads(response.content.decode('utf-8'))
expected = Outlet.objects.get(id=sample_id)
self.assertEqual(result['name'], expected.name)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_outlet(self):
"""
Ensure we can update an outlet object.
"""
sample_id = 1
url = reverse('v1:outlet-detail', kwargs={'outlet_id': sample_id})
data = {'name': 'NewNews', 'website': 'news2.com', 'description': ''}
response = self.client.put(url, data, format='json')
result = json.loads(response.content.decode('utf-8'))
expected = Outlet.objects.get(id=sample_id)
self.assertEqual(result['name'], expected.name)
self.assertEqual(result['website'], expected.website)
self.assertEqual(result['description'], expected.description)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_partial_update_outlet(self):
"""
Ensure we can partially update an outlet object.
"""
sample_id = 1
url = reverse('v1:outlet-detail', kwargs={'outlet_id': sample_id})
data = {'name': 'NewNews'}
response = self.client.patch(url, data, format='json')
result = json.loads(response.content.decode('utf-8'))
expected = Outlet.objects.get(id=sample_id)
self.assertEqual(result['name'], expected.name)
self.assertEqual(result['website'], expected.website)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_outlet(self):
"""
Ensure we can delete an outlet object.
"""
sample_id = 1
url = reverse('v1:outlet-detail', kwargs={'outlet_id': sample_id})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response.content.decode('utf-8'), '')
with self.assertRaises(Exception) as context:
Outlet.objects.get(id=sample_id)
self.assertEqual('Outlet matching query does not exist.', str(context.exception))
``` |
{
"source": "jpbottaro/anna",
"score": 3
} |
#### File: data/dataset/glove.py
```python
import os
import numpy as np
import anna.data.utils as utils
DESTINATION = "glove"
NAME = "glove.840B.300d"
TXT_NAME = NAME + ".txt"
ZIP_NAME = NAME + ".zip"
URL = "http://nlp.stanford.edu/data/" + ZIP_NAME
def fetch_and_parse(data_dir, voc_size=None):
"""
Fetches and parses the GloVe word embeddings dataset. The dataset is
also cached as a pickle for further calls.
Args:
data_dir (str): absolute path to the dir where datasets are stored
voc_size (int): maximum size of the vocabulary, None for no limit
Returns:
voc (list[str]): list of words, matching the index in `emb`
emb (numpy.array): array of embeddings for each word in `voc`
"""
return parse(fetch(data_dir), voc_size)
def parse(glove_dir, voc_size):
"""
Parses the glove word embeddings.
Args:
glove_dir (str): absolute path to the extracted word embeddings
voc_size (int): maximum size of the vocabulary, None for no limit
Returns:
voc (list[str]): list of words, matching the index in `emb`
emb (numpy.array): array of embeddings for each word in `voc`
"""
voc = []
emb = []
words = set()
glove_path = os.path.join(glove_dir, TXT_NAME)
with open(glove_path) as f:
for line in f:
parts = line.split(" ")
word = parts[0]
if word not in words:
words.add(word)
voc.append(word)
emb.append([float(n) for n in parts[1:]])
if len(words) >= voc_size:
break
return utils.add_special_tokens(voc, np.array(emb))
def fetch(data_dir):
"""
Fetches and extracts pre-trained GloVe word vectors.
Args:
data_dir (str): absolute path to the folder where datasets are stored
Returns:
glove_dir (str): absolute path to the folder where glove is stored
"""
file_path = os.path.join(data_dir, DESTINATION, ZIP_NAME)
txt_path = os.path.join(data_dir, DESTINATION, TXT_NAME)
return utils.fetch(URL, file_path, txt_path)
```
#### File: data/dataset/wmt.py
```python
import os
import anna.data.utils as utils
CORPORA = {
"europarl-parallel.tgz":
"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
"europarl-monolingual.tgz":
"http://www.statmt.org/wmt13/training-monolingual-europarl-v7.tgz",
"commoncrawl.tgz":
"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
"un.tgz":
"http://www.statmt.org/wmt13/training-parallel-un.tgz",
"nc-parallel.tgz":
"http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
"nc-monolingual.tgz":
"http://www.statmt.org/wmt14/training-monolingual-nc-v9.tgz",
"giga-fren.tar":
"http://www.statmt.org/wmt10/training-giga-fren.tar",
"dev.tgz": "http://www.statmt.org/wmt14/dev.tgz",
"test.tgz": "http://www.statmt.org/wmt14/test-full.tgz"
}
def fetch(data_dir, dest="wmt14"):
"""
Fetches most data from the WMT14 shared task.
Creates the `dest` if it doesn't exist.
Args:
data_dir (str): absolute path to the dir where datasets are stored
dest (str): name for dir where WMT14 datasets will be extracted
Returns:
final_dir (str): absolute path where WMT14 datasets were extracted
"""
# Create folder
wmt_dir = os.path.join(data_dir, dest)
utils.create_folder(wmt_dir)
# Download all datasets
for f, url in CORPORA.items():
utils.urlretrieve(url, os.path.join(wmt_dir, f))
return wmt_dir
```
#### File: anna/model/metrics.py
```python
import tensorflow as tf
import tensorflow.compat.v1 as tf1
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.eager import context
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
def create(labels, predictions, vocab, text_samples=False, histograms=False):
num_classes = len(vocab)
with tf.name_scope("metrics"):
expected_labels_idx, expected_labels_str = _label_hot_to_idx(
"expected", labels, vocab)
predicted_labels_idx, predicted_labels_str = _label_hot_to_idx(
"predicted", predictions, vocab)
n_expected_labels = tf1.metrics.mean(tf.reduce_sum(labels, 1))
n_predicted_labels = tf1.metrics.mean(tf.reduce_sum(predictions, 1))
f_micro = f_measure(labels, predictions, num_classes)
f_macro = f_measure(labels, predictions, num_classes, micro=False)
f_ex = f_example(labels, predictions)
hamming = tf1.metrics.accuracy(labels, predictions)
accuracy = tf1.metrics.mean(
tf.reduce_all(tf.equal(labels, predictions), 1))
metrics = {
"out/n_expected_labels": n_expected_labels,
"out/n_predicted_labels": n_predicted_labels,
"perf/miF1": f_micro,
"perf/maF1": f_macro,
"perf/ebF1": f_ex,
"perf/hamming": hamming,
"perf/accuracy": accuracy,
}
for name, value in metrics.items():
tf1.summary.scalar(name, value[1])
if text_samples:
tf.summary.text("out/expected_labels_examples", expected_labels_str)
tf.summary.text("out/predicted_labels_examples", predicted_labels_str)
if histograms:
tf.summary.histogram("out/expected_labels_dist", expected_labels_idx)
tf.summary.histogram("out/predicted_labels_dist", predicted_labels_idx)
return metrics
def display(name, metrics):
message = "\t{}".format(name)
message += "\tloss: {:.6f}".format(metrics["loss"])
message += "\t" + "\t".join(["{}: {:.4f}".format(k[5:], v)
for k, v in metrics.items() if "perf" in k])
print(message)
def _label_hot_to_idx(name, labels, vocab):
# Find all positive labels (ignoring which document they come from)
idx = tf.cast(tf.where(tf.equal(labels, 1.)), tf.int64)
idx = idx[:, 1]
# Fetch string labels for the first document
first_idx = tf.cast(tf.where(tf.equal(labels[0], 1.)), tf.int64)
names = tf.contrib.lookup.index_to_string_table_from_tensor(
vocab,
default_value="_UNK_",
name="{}_output".format(name)).lookup(first_idx)
return idx, names
def f_measure(labels,
predictions,
num_classes,
micro=True,
name=None):
"""Computes the label-based f1 score of the predictions with respect to
the labels.
The `f_measure` function creates three local variables,
`true_positives`, `false_positives` and `false_negatives`, that are used to
compute the f measure. This value is ultimately returned as `f_measure`, an
idempotent operation.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `f_measure`.
Args:
labels (tf.Tensor): the ground truth values, a `Tensor` whose dimensions
must match `predictions`.
predictions (tf.Tensor): the predicted values, a `Tensor` of arbitrary
dimensions.
num_classes (int): the possible number of labels the prediction task can
have.
micro (bool, optional): Whether the f measure should be taken globally
(i.e. micro), or averaged per class (i.e. macro).
name: An optional variable_scope name.
Returns:
f_measure: Scalar float `Tensor` with the f measure.
update_op: `Operation` that increments `true_positives`,
`false_positives` and `false_negatives` variables appropriately and
whose value matches `f_measure`.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('f_measure is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'f_measure',
(predictions, labels)):
def count_hits(expected, predicted):
hits = math_ops.logical_and(expected, predicted)
hits = math_ops.cast(hits, dtypes.float32)
return math_ops.reduce_sum(hits, axis=0)
is_true_positive = count_hits(
math_ops.equal(labels, 1.),
math_ops.equal(predictions, 1.))
is_false_positive = count_hits(
math_ops.equal(labels, 0.),
math_ops.equal(predictions, 1.))
is_false_negative = count_hits(
math_ops.equal(labels, 1.),
math_ops.equal(predictions, 0.))
tp_var = metric_variable([num_classes], dtypes.float32)
fp_var = metric_variable([num_classes], dtypes.float32)
fn_var = metric_variable([num_classes], dtypes.float32)
tp_up = state_ops.assign_add(tp_var, is_true_positive)
fp_up = state_ops.assign_add(fp_var, is_false_positive)
fn_up = state_ops.assign_add(fn_var, is_false_negative)
def compute_f_measure(tp, fp, fn, micro, name):
if micro:
tp = math_ops.reduce_sum(tp)
fp = math_ops.reduce_sum(fp)
fn = math_ops.reduce_sum(fn)
value = 2 * tp
den = 2 * tp + fp + fn
res = array_ops.where(math_ops.greater(den, 0),
math_ops.divide(value, den),
array_ops.ones_like(value))
return math_ops.reduce_mean(res, name=name)
f = compute_f_measure(tp_var, fp_var, fn_var, micro, 'value')
update_op = compute_f_measure(tp_up, fp_up, fn_up, micro, 'update_op')
return f, update_op
def f_example(labels,
predictions,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the example-based f1 score of the predictions with respect to
the labels.
The `f_measure` uses the `tf.compat.v1.metrics.mean` to store the streaming
counts.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `f_measure`.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions.
Will be cast to `bool`.
metrics_collections: An optional list of collections that `f_measure`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
f_measure: Scalar float `Tensor` with the f measure.
update_op: `Operation` that increments `true_positives`,
`false_positives` and `false_negatives` variables appropriately and
whose value matches `f_measure`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('f_example is not '
'supported when eager execution is enabled.')
# Calculate the double of the true positives
value = 2. * math_ops.reduce_sum(labels * predictions, 1)
# Calculate denominator as sum of non-zero values of both matrices
den = math_ops.count_nonzero(labels, 1) + \
math_ops.count_nonzero(predictions, 1)
den = tf.cast(den, dtypes.float32)
# Avoid division by zero
res = array_ops.where(math_ops.greater(den, 0),
math_ops.divide(value, den),
array_ops.ones_like(value), name)
return tf1.metrics.mean(res,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES`) collections.
Taken from tf.compat.v1.metrics directly, as the function is not exposed."""
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
trainable=False,
collections=[
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
],
validate_shape=validate_shape,
name=name)
```
#### File: anna/tests/test_encdec.py
```python
import tensorflow as tf
from anna.model.decode import DecoderRNN
class EncoderDecoderTest(tf.test.TestCase):
def test_encode(self):
labels = ["one", "two", "three"]
dec = DecoderRNN("no/value", labels, 128, max_steps=len(labels))
with self.test_session():
x = tf.constant([[0, 1, 0], [1, 0, 1]])
x, x_len, x_max = dec.encode_labels(x)
self.assertAllEqual(x.eval(), [[4, 2, 0], [3, 5, 2]])
self.assertAllEqual(x_len.eval(), [2, 3])
self.assertAllEqual(x_max.eval(), 3)
def test_decode(self):
labels = ["one", "two", "three"]
dec = DecoderRNN("no/value", labels, 128, max_steps=len(labels))
with self.test_session():
x = tf.constant([[3, 2, 0], [2, 0, 1], [5, 4, 2]])
x = tf.one_hot(x, len(dec.voc))
x = dec.decode_labels(x)
self.assertAllEqual(x.eval(), [[1, 0, 0], [0, 0, 0], [0, 1, 1]])
def test_encode_decode(self):
labels = ["one", "two", "three"]
dec = DecoderRNN("no/value", labels, 128, max_steps=len(labels))
with self.test_session():
orig_x = tf.constant([[0, 1, 0], [1, 0, 1]], dtype=tf.float32)
x, x_len, x_max = dec.encode_labels(orig_x)
x = tf.one_hot(x, len(dec.voc))
x = dec.decode_labels(x)
self.assertAllEqual(orig_x.eval(), x.eval())
def test_repeat_decode(self):
labels = ["one", "two", "three"]
dec = DecoderRNN("no/value", labels, 128, max_steps=len(labels))
with self.test_session():
x = tf.constant([[3, 3, 2, 0]])
x = tf.one_hot(x, len(dec.voc))
x = dec.decode_labels(x)
self.assertAllEqual(x.eval(), [[1, 0, 0]])
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.