metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jireh-father/Real-Time-Voice-Cloning",
"score": 2
} |
#### File: encoder/data_objects/speaker_verification_dataset.py
```python
from encoder.data_objects.random_cycler import RandomCycler
from encoder.data_objects.speaker_batch import SpeakerBatch
from encoder.data_objects.speaker import Speaker
from encoder.params_data import partials_n_frames
from torch.utils.data import Dataset, DataLoader
from pathlib import Path
import glob
import os
# TODO: improve with a pool of speakers for data efficiency
class SpeakerVerificationDataset(Dataset):
def __init__(self, speaker_dirs):
print("speaker_dirs", len(speaker_dirs))
if len(speaker_dirs) == 0:
raise Exception("No speakers found. Make sure you are pointing to the directory "
"containing all preprocessed speaker directories.")
self.total_cnt = 0
for speaker_dir in speaker_dirs:
self.total_cnt += len(glob.glob(os.path.join(speaker_dir, "*.npy")))
print("total items", self.total_cnt)
self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs]
self.speaker_cycler = RandomCycler(self.speakers)
def __len__(self):
return self.total_cnt#int(1e10)
def __getitem__(self, index):
return next(self.speaker_cycler)
def get_logs(self):
log_string = ""
for log_fpath in self.root.glob("*.txt"):
with log_fpath.open("r") as log_file:
log_string += "".join(log_file.readlines())
return log_string
class SpeakerVerificationDataLoader(DataLoader):
def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None,
batch_sampler=None, num_workers=0, pin_memory=False, timeout=0,
worker_init_fn=None):
self.utterances_per_speaker = utterances_per_speaker
super().__init__(
dataset=dataset,
batch_size=speakers_per_batch,
shuffle=False,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=self.collate,
pin_memory=pin_memory,
drop_last=False,
timeout=timeout,
worker_init_fn=worker_init_fn
)
def collate(self, speakers):
return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames)
``` |
{
"source": "jireh-father/tensorflow-triplet-loss",
"score": 2
} |
#### File: jireh-father/tensorflow-triplet-loss/evaluator.py
```python
import tensorflow as tf
F = tf.app.flags.FLAGS
def evaluate():
pass
if __name__ == '__main__':
fl = tf.app.flags
fl.DEFINE_string('config', "config.json", "config file path")
fl.DEFINE_string('param', 'default', '')
fl.DEFINE_boolean('parallel_exec', True, '')
fl.DEFINE_string('save_dir', 'experiments/base_model', '')
fl.DEFINE_string('data_dirs', './data/mnist|./data/', '')
fl.DEFINE_string('data_files', None, '')
fl.DEFINE_string('data_name', 'deepfashion', '')
fl.DEFINE_string('data_mid_name', 'val', '')
evaluate()
```
#### File: tensorflow-triplet-loss/model/model_fn.py
```python
import tensorflow as tf
from model.triplet_loss import batch_all_triplet_loss
from model.triplet_loss import batch_hard_triplet_loss
from model import nets_factory
slim = tf.contrib.slim
def _configure_learning_rate(num_samples_per_epoch, global_step, cf):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
# Note: when num_clones is > 1, this will actually have each clone to go
# over each epoch cf.num_epochs_per_decay times. This is different
# behavior from sync replicas and is expected to produce different results.
decay_steps = int(num_samples_per_epoch * cf.num_epochs_per_decay /
cf.batch_size)
if cf.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(cf.learning_rate,
global_step,
decay_steps,
cf.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif cf.learning_rate_decay_type == 'fixed':
return tf.constant(cf.learning_rate, name='fixed_learning_rate')
elif cf.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(cf.learning_rate,
global_step,
decay_steps,
cf.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized' %
cf.learning_rate_decay_type)
def _configure_optimizer(learning_rate, cf):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if cf.optimizer is not recognized.
"""
if cf.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=cf.adadelta_rho,
epsilon=cf.opt_epsilon)
elif cf.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=cf.adagrad_initial_accumulator_value)
elif cf.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=cf.adam_beta1,
beta2=cf.adam_beta2,
epsilon=cf.opt_epsilon)
elif cf.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=cf.ftrl_learning_rate_power,
initial_accumulator_value=cf.ftrl_initial_accumulator_value,
l1_regularization_strength=cf.ftrl_l1,
l2_regularization_strength=cf.ftrl_l2)
elif cf.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=cf.momentum,
name='Momentum')
elif cf.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=cf.rmsprop_decay,
momentum=cf.rmsprop_momentum,
epsilon=cf.opt_epsilon)
elif cf.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized' % cf.optimizer)
return optimizer
def build_model_default(is_training, images, params):
"""Compute outputs of the model (embeddings for triplet loss).
Args:
is_training: (bool) whether we are training or not
images: (dict) contains the inputs of the graph (features)
this can be `tf.placeholder` or outputs of `tf.data`
params: (Params) hyperparameters
Returns:
output: (tf.Tensor) output of the model
"""
out = images
# Define the number of channels of each convolution
# For each block, we do: 3x3 conv -> batch norm -> relu -> 2x2 maxpool
num_channels = params.num_channels
bn_momentum = params.bn_momentum
channels = [num_channels, num_channels * 2]
for i, c in enumerate(channels):
with tf.variable_scope('block_{}'.format(i + 1)):
out = tf.layers.conv2d(out, c, 3, padding='same')
if params.use_batch_norm:
out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)
out = tf.nn.relu(out)
out = tf.layers.max_pooling2d(out, 2, 2)
assert out.shape[1:] == [7, 7, num_channels * 2]
out = tf.reshape(out, [-1, 7 * 7 * num_channels * 2])
with tf.variable_scope('fc_1'):
out = tf.layers.dense(out, params.embedding_size)
return out
def build_slim_model(is_training, images, params):
"""Compute outputs of the model (embeddings for triplet loss).
Args:
is_training: (bool) whether we are training or not
images: (dict) contains the inputs of the graph (features)
this can be `tf.placeholder` or outputs of `tf.data`
params: (Params) hyperparameters
Returns:
output: (tf.Tensor) output of the model
"""
wd = 0.
if hasattr(params, "weight_decay"):
wd = params.weight_decay
model_f = nets_factory.get_network_fn(params.model_name, int(params.embedding_size), wd,
is_training=is_training)
out, end_points = model_f(images)
return out, end_points
def model_fn(features, labels, mode, params):
"""Model function for tf.estimator
Args:
features: input batch of images
labels: labels of the images
mode: can be one of tf.estimator.ModeKeys.{TRAIN, EVAL, PREDICT}
params: contains hyperparameters of the model (ex: `params.learning_rate`)
Returns:
model_spec: tf.estimator.EstimatorSpec object
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
images = features
if len(images.get_shape()) == 2:
images = tf.reshape(images, [-1, params.image_size, params.image_size, 1])
assert images.shape[1:] == [params.image_size, params.image_size, 1], "{}".format(images.shape)
# -----------------------------------------------------------
# MODEL: define the layers of the model
with tf.variable_scope('model'):
# Compute the embeddings with the model
if params.model_name == "base_model":
embeddings = build_model_default(is_training, images, params)
else:
embeddings, _ = build_slim_model(is_training, images, params)
embedding_mean_norm = tf.reduce_mean(tf.norm(embeddings, axis=1))
tf.summary.scalar("embedding_mean_norm", embedding_mean_norm)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'embeddings': embeddings}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
labels = tf.cast(labels, tf.int64)
# Define triplet loss
if params.triplet_strategy == "batch_all":
loss, fraction = batch_all_triplet_loss(labels, embeddings, margin=params.margin,
squared=params.squared)
elif params.triplet_strategy == "batch_hard":
loss = batch_hard_triplet_loss(labels, embeddings, margin=params.margin,
squared=params.squared)
else:
raise ValueError("Triplet strategy not recognized: {}".format(params.triplet_strategy))
# -----------------------------------------------------------
# METRICS AND SUMMARIES
# Metrics for evaluation using tf.metrics (average over whole dataset)
# TODO: some other metrics like rank-1 accuracy?
with tf.variable_scope("metrics"):
eval_metric_ops = {"embedding_mean_norm": tf.metrics.mean(embedding_mean_norm)}
if params.triplet_strategy == "batch_all":
eval_metric_ops['fraction_positive_triplets'] = tf.metrics.mean(fraction)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=eval_metric_ops)
# Summaries for training
tf.summary.scalar('loss', loss)
if params.triplet_strategy == "batch_all":
tf.summary.scalar('fraction_positive_triplets', fraction)
tf.summary.image('train_image', images, max_outputs=10)
# Define training step that minimizes the loss with the Adam optimizer
optimizer = tf.train.AdamOptimizer(params.learning_rate)
global_step = tf.train.get_global_step()
if params.use_batch_norm:
# Add a dependency to update the moving mean and variance for batch normalization
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.minimize(loss, global_step=global_step)
else:
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def _get_variables_to_train(cf):
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if cf.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in cf.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def train_op_fun(total_loss, global_step, num_examples, cf):
"""Train model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if cf.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
cf.moving_average_decay, global_step)
update_ops.append(variable_averages.apply(moving_average_variables))
lr = _configure_learning_rate(num_examples, global_step, cf)
tf.summary.scalar('learning_rate', lr)
opt = _configure_optimizer(lr, cf)
variables_to_train = _get_variables_to_train(cf)
grads = opt.compute_gradients(total_loss, variables_to_train)
grad_updates = opt.apply_gradients(grads, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(total_loss, name='train_op')
return train_op
def build_model(features, labels, cf, attrs=None, is_training=True, use_attr_net=False, num_hidden_attr_net=1,
num_examples=None, global_step=None, use_old_model=False):
images = features
# -----------------------------------------------------------
# MODEL: define the layers of the model
# Compute the embeddings with the model
if use_old_model:
with tf.variable_scope('model'):
embeddings, end_points = build_slim_model(is_training, images, cf)
if attrs is not None and use_attr_net:
hidden_step = int((cf.attr_dim - cf.embedding_size) / (num_hidden_attr_net + 1))
for i in range(num_hidden_attr_net):
print(cf.attr_dim - (hidden_step * (i + 1)))
attr_net = tf.layers.dense(attrs, cf.attr_dim - (hidden_step * (i + 1)), tf.nn.relu,
trainable=is_training)
attr_net = tf.layers.dropout(attr_net, training=is_training)
attrs = tf.layers.dense(attr_net, cf.embedding_size, tf.nn.relu, trainable=is_training)
else:
embeddings, end_points = build_slim_model(is_training, images, cf)
if attrs is not None and use_attr_net:
hidden_step = int((cf.attr_dim - cf.embedding_size) / (num_hidden_attr_net + 1))
for i in range(num_hidden_attr_net):
print(cf.attr_dim - (hidden_step * (i + 1)))
attr_net = tf.layers.dense(attrs, cf.attr_dim - (hidden_step * (i + 1)), tf.nn.relu,
trainable=is_training)
attr_net = tf.layers.dropout(attr_net, training=is_training)
attrs = tf.layers.dense(attr_net, cf.embedding_size, tf.nn.relu, trainable=is_training)
if not is_training:
if attrs is not None:
return embeddings, attrs
return embeddings
if cf.l2norm:
embeddings = tf.nn.l2_normalize(embeddings, axis=1)
if attrs is not None:
attrs = tf.nn.l2_normalize(attrs, axis=1)
embedding_mean_norm = tf.reduce_mean(tf.norm(embeddings, axis=1))
tf.summary.scalar("embedding_mean_norm", embedding_mean_norm)
labels = tf.cast(labels, tf.int64)
# Define triplet loss
if cf.triplet_strategy == "batch_all":
loss, fraction = batch_all_triplet_loss(labels, embeddings, margin=cf.margin, attrs=attrs,
attr_weight=cf.attr_loss_weight, squared=cf.squared)
elif cf.triplet_strategy == "batch_hard":
loss = batch_hard_triplet_loss(labels, embeddings, margin=cf.margin, attrs=attrs,
attr_weight=cf.attr_loss_weight,
squared=cf.squared)
elif cf.triplet_strategy == "semihard":
loss = tf.contrib.losses.metric_learning.triplet_semihard_loss(labels, embeddings, margin=cf.margin)
elif cf.triplet_strategy == "cluster":
loss = tf.contrib.losses.metric_learning.cluster_loss(
labels,
embeddings,
1.0
)
elif cf.triplet_strategy == "contrastive":
pass
elif cf.triplet_strategy == "lifted_struct":
loss = tf.contrib.losses.metric_learning.lifted_struct_loss(
labels,
embeddings,
margin=cf.margin
)
elif cf.triplet_strategy == "npairs":
pass
elif cf.triplet_strategy == "npairs_multilabel":
pass
else:
raise ValueError("Triplet strategy not recognized: {}".format(cf.triplet_strategy))
vars = tf.trainable_variables()
loss += tf.add_n([tf.nn.l2_loss(v) for v in vars if 'bias' not in v.name]) * cf.weight_decay
# -----------------------------------------------------------
# METRICS AND SUMMARIES
# Metrics for evaluation using tf.metrics (average over whole dataset)
# TODO: some other metrics like rank-1 accuracy?
with tf.variable_scope("metrics"):
eval_metric_ops = {"embedding_mean_norm": tf.metrics.mean(embedding_mean_norm)}
if cf.triplet_strategy == "batch_all":
eval_metric_ops['fraction_positive_triplets'] = tf.metrics.mean(fraction)
# Summaries for training
tf.summary.scalar('loss', loss)
if cf.triplet_strategy == "batch_all":
tf.summary.scalar('fraction_positive_triplets', fraction)
tf.summary.image('train_image', images, max_outputs=1)
train_op = train_op_fun(loss, global_step, num_examples, cf)
return loss, end_points, train_op
```
#### File: tensorflow-triplet-loss/model/tfrecord_input_fn.py
```python
from model import tfrecords_dataset as td
import tensorflow as tf
def train_input_fn(data_dir, params):
"""Train input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.train(data_dir)
# if hasattr(params, "shuffle_rand_seed"):
# shuffle_rand_seed = params.shuffle_rand_seed
# else:
# shuffle_rand_seed = 1
# import tensorflow as tf
# shuffle_rand_seed_ph = tf.placeholder(tf.int64, ())
dataset = dataset.shuffle(1000) # whole dataset into the buffer
dataset = dataset.repeat(
params.num_epochs) # r epeat for multiple epochs
dataset = dataset.batch(params.batch_size)
dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset # , shuffle_rand_seed_ph
def train_input_fn_once(data_dir, params):
"""Train input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.train(data_dir)
dataset = dataset.batch(params.batch_size)
return dataset
def test_input_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.test(data_dir)
dataset = dataset.batch(params.batch_size)
dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def query_input_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.query(data_dir)
dataset = dataset.batch(params.batch_size)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def index_input_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.index(data_dir)
dataset = dataset.batch(params.batch_size)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def train_label_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.train_label(data_dir)
dataset = dataset.batch(params.train_size)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def test_label_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.test_label(data_dir)
dataset = dataset.batch(params.eval_size)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def count_records(tfrecord_filenames):
c = 0
for fn in tfrecord_filenames:
for _ in tf.python_io.tf_record_iterator(fn):
c += 1
return c
def query_label_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset, files = td.query_label(data_dir)
cnt = count_records(files)
dataset = dataset.batch(cnt)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset, cnt
def index_label_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset, files = td.index_label(data_dir)
cnt = count_records(files)
dataset = dataset.batch(cnt)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset, cnt
```
#### File: tensorflow-triplet-loss/model/tfrecords_dataset.py
```python
import os, glob
import tensorflow as tf
def train_pre_process(example_proto):
features = {"image/encoded": tf.FixedLenFeature((), tf.string, default_value=""),
"image/class/label": tf.FixedLenFeature((), tf.int64, default_value=0),
'image/height': tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width': tf.FixedLenFeature((), tf.int64, default_value=0)
}
parsed_features = tf.parse_single_example(example_proto, features)
image = tf.image.decode_jpeg(parsed_features["image/encoded"], 3)
image = tf.cast(image, tf.float32)
image = tf.expand_dims(image, 0)
image = tf.image.resize_image_with_pad(image, 224, 224)
# image = tf.image.resize_bilinear(image, [224, 224], align_corners=False)
image = tf.squeeze(image, [0])
image = tf.divide(image, 255.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
label = parsed_features["image/class/label"]
return image, label
def test_pre_process(example_proto):
features = {"image/encoded": tf.FixedLenFeature((), tf.string, default_value=""),
"image/class/label": tf.FixedLenFeature((), tf.int64, default_value=0),
'image/height': tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width': tf.FixedLenFeature((), tf.int64, default_value=0)
}
parsed_features = tf.parse_single_example(example_proto, features)
image = tf.image.decode_jpeg(parsed_features["image/encoded"], 3)
image = tf.cast(image, tf.float32)
image = tf.expand_dims(image, 0)
image = tf.image.resize_image_with_pad(image, 224, 224)
# image = tf.image.resize_bilinear(image, [224, 224], align_corners=False)
image = tf.squeeze(image, [0])
image = tf.divide(image, 255.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
label = parsed_features["image/class/label"]
return image, label
def only_label(example_proto):
features = {
"image/class/label": tf.FixedLenFeature((), tf.int64, default_value=0),
}
parsed_features = tf.parse_single_example(example_proto, features)
label = parsed_features["image/class/label"]
return label
def dataset(tfrecord_files, preprocess_fn):
dataset = tf.data.TFRecordDataset(tfrecord_files)
return dataset.map(preprocess_fn)
def train(directory):
files = glob.glob(os.path.join(directory, "*_train_*tfrecord"))
files.sort()
assert len(files) > 0
return dataset(files, train_pre_process)
def test(directory):
files = glob.glob(os.path.join(directory, "*_test_*tfrecord"))
files.sort()
assert len(files) > 0
return dataset(files, test_pre_process)
def query(directory):
files = glob.glob(os.path.join(directory, "*_query_*tfrecord"))
files.sort()
assert len(files) > 0
return dataset(files, test_pre_process)
def index(directory):
files = glob.glob(os.path.join(directory, "*_index_*tfrecord"))
files.sort()
assert len(files) > 0
return dataset(files, test_pre_process)
def query_label(directory):
files = glob.glob(os.path.join(directory, "*_query_*tfrecord"))
files.sort()
assert len(files) > 0
return dataset(files, only_label), files
def index_label(directory):
files = glob.glob(os.path.join(directory, "*_index_*tfrecord"))
files.sort()
assert len(files) > 0
return dataset(files, only_label), files
def train_label(directory):
files = glob.glob(os.path.join(directory, "*_train_*tfrecord"))
files.sort()
assert len(files) > 0
return dataset(files, only_label)
def test_label(directory):
files = glob.glob(os.path.join(directory, "*_test_*tfrecord"))
files.sort()
assert len(files) > 0
return dataset(files, only_label)
```
#### File: jireh-father/tensorflow-triplet-loss/test.py
```python
import util
import glob
files = glob.glob("E:/data/adience_kaggle/test/*.tfrecord")
files += glob.glob("E:\data/adience_kaggle/faces/*.tfrecord")
print(files)
ma = util.create_label_map(files)
print(ma)
def ct(s, a):
t = 0
for r in a:
if r[1].startswith(s):
t += 1
return t
def gr(a, pc):
d = {}
for r in a:
if r[1][:pc] not in d:
d[r[1][:pc]] = []
d[r[1][:pc]].append(r[1])
return d
def gr2(a, pc):
d = {}
for r in a:
if r[1][:pc] not in d:
d[r[1][:pc]] = []
d[r[1][:pc]].append([r])
return d
def grc(a, pc):
d = {}
for r in a:
if r[1][:pc] not in d:
d[r[1][:pc]] = 0
d[r[1][:pc]] += 1
return d
def grc2(a, pc):
d = {}
for r in a:
r = r.rstrip('\n').split(",")
if r[1][:pc] not in d:
d[r[1][:pc]] = 0
d[r[1][:pc]] += 1
return d
```
#### File: jireh-father/tensorflow-triplet-loss/tfrecord_image_view.py
```python
import argparse
import os
import pathlib
import shutil
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from model import mnist_dataset
from model import tfrecords_dataset
from model.utils import Params
from model.model_fn import model_fn
from model import input_fn
from model import tfrecord_input_fn
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='experiments/alexnet',
help="Experiment directory containing params.json")
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = Params(json_path)
import glob
def train_pre_process(example_proto):
features = {"image/encoded": tf.FixedLenFeature((), tf.string, default_value=""),
"image/class/label": tf.FixedLenFeature((), tf.int64, default_value=0),
'image/height': tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width': tf.FixedLenFeature((), tf.int64, default_value=0)
}
parsed_features = tf.parse_single_example(example_proto, features)
image = tf.image.decode_jpeg(parsed_features["image/encoded"], 3)
image = tf.cast(image, tf.float32)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [224, 224], align_corners=False)
image = tf.squeeze(image, [0])
image = tf.divide(image, 255.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
label = parsed_features["image/class/label"]
# return parsed_features["image/encoded"], label
return image, label
def aa(image, label):
image = tf.image.decode_jpeg(image, 3)
image = tf.cast(image, tf.float32)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [224, 224], align_corners=False)
image = tf.squeeze(image, [0])
image = tf.divide(image, 255.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image, label
# sampling_p = tf.random_uniform([4], minval=0, maxval=9, dtype=tf.int64)
# sampling_p = tf.placeholder(tf.int64, [4], name="sampling")
data_dir = "c:\source/tensorflow-image-classification-framework/mnist"
files = glob.glob(os.path.join(data_dir, "*_train_*tfrecord"))
aaa = 4
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(train_pre_process)
# dataset = dataset.filter(
# lambda im, lb: tf.reduce_any(tf.equal(sampling_p, lb))
# )
# dataset = dataset.map(aa)
dataset = dataset.shuffle(100) # whole dataset into the buffer
dataset = dataset.repeat(3)
dataset = dataset.batch(512)
dataset = dataset.prefetch(32)
# index_iterator = dataset.make_initializable_iterator()
index_iterator = dataset.make_one_shot_iterator()
img, index_labels = index_iterator.get_next()
# index_labels = index_iterator.get_next()
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
print(11)
sess = tf.Session(config=tf_config)
print(22)
import time
start = time.time()
# sess.run(index_iterator.initializer, feed_dict={sampling_p: np.array([1, 2, 3, 4], np.int64)})
print(time.time() - start)
ii, ll = sess.run([img, index_labels])
print(ii, ll)
sys.exit()
# from PIL import Image
#
# im = Image.fromarray(ii[0].astype('uint8'))
# im.show()
print(ll)
# ll = sess.run(index_labels)
# print(ll)
# ll = sess.run(index_labels)
# print(ll)
sys.exit()
index_label_ds, shuffle_rand_seed_ph = tfrecord_input_fn.train_input_fn(data_dir, params)
# index_iterator = index_label_ds.make_one_shot_iterator()
index_iterator = index_label_ds.make_initializable_iterator()
img, index_labels = index_iterator.get_next()
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
sess = tf.Session()
from PIL import Image
for i in range(2):
sess.run(index_iterator.initializer, feed_dict={shuffle_rand_seed_ph: i})
j = 0
while True:
try:
images = sess.run(img)
im = Image.fromarray(images[0].astype('uint8'))
# im.show()
# if j == 0:
im.save("%d.jpg" % i)
break
# j += 1
# if i == 1:
# break
except tf.errors.OutOfRangeError:
break
sys.exit()
sess.run(index_iterator.initializer)
images = sess.run(img)
sess.close()
print(images[0].shape)
print(images[0].max())
print(images[0].min())
im = Image.fromarray(images[0].astype('uint8'))
im.show()
```
#### File: jireh-father/tensorflow-triplet-loss/trainer.py
```python
import os
import tensorflow as tf
from tensorflow.python.client import device_lib
from model import model_fn
import glob
import util
from datetime import datetime
import time
import numpy as np
from numba import cuda
from preprocessing import preprocessing_factory
import socket
import traceback
slim = tf.contrib.slim
notify_params = [
"gpu_no",
"data_dir",
"save_dir",
"model_name",
"preprocessing_name",
"batch_size",
"learning_rate",
"max_number_of_epochs",
"max_number_of_steps",
"save_interval_epochs",
"save_interval_steps",
"checkpoint_path",
"checkpoint_exclude_scopes",
"sampling_buffer_size",
"shuffle_buffer_size",
"train_image_size",
"shutdown_after_train",
"keep_checkpoint_max",
"embedding_size",
"triplet_strategy",
"margin",
"l2norm",
"use_attr",
"use_attr_net",
"num_hidden_attr_net",
"attr_dim",
]
server_map = {"ip-172-31-12-89": "p3.2xlarge", "ip-172-31-29-214": "p3.8xlarge"}
def main(cf, hyper_param_txt, hostname):
tf.logging.set_verbosity(tf.logging.INFO)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = F.gpu_no
print("CUDA Visible device", device_lib.list_local_devices())
start_time = datetime.now().strftime('%Y%m%d%H%M%S')
start_time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if not os.path.isdir(cf.save_dir):
os.makedirs(cf.save_dir)
f = open(os.path.join(cf.save_dir, "train_parameters_%s.txt" % start_time), mode="w+")
f.write(hyper_param_txt)
# inputs_ph = tf.placeholder(tf.float32, [None, cf.train_image_size, cf.train_image_size, cf.train_image_channel],
# name="inputs")
# labels_ph = tf.placeholder(tf.int32, [None], name="labels")
tf.set_random_seed(123)
files = glob.glob(os.path.join(cf.data_dir, "*_train*tfrecord"))
files.sort()
assert len(files) > 0
num_examples = util.count_records(files)
global_step = tf.Variable(0, trainable=False)
image_preprocessing_fn = None
if cf.preprocessing_name:
image_preprocessing_fn = preprocessing_factory.get_preprocessing(cf.preprocessing_name, is_training=True)
def train_pre_process(example_proto):
features = {"image/encoded": tf.FixedLenFeature((), tf.string, default_value=""),
"image/class/label": tf.FixedLenFeature((), tf.int64, default_value=0),
'image/height': tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width': tf.FixedLenFeature((), tf.int64, default_value=0)
}
if cf.use_attr:
features["image/attr"] = tf.VarLenFeature(dtype=tf.int64)
parsed_features = tf.parse_single_example(example_proto, features)
image = tf.image.decode_jpeg(parsed_features["image/encoded"], cf.train_image_channel)
if image_preprocessing_fn is not None:
image = image_preprocessing_fn(image, cf.train_image_size, cf.train_image_size)
else:
image = tf.cast(image, tf.float32)
image = tf.expand_dims(image, 0)
image = tf.image.resize_image_with_pad(image, cf.train_image_size, cf.train_image_size)
# image = tf.image.resize_bilinear(image, [224, 224], align_corners=False)
image = tf.squeeze(image, [0])
image = tf.divide(image, 255.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
label = parsed_features["image/class/label"]
if cf.use_attr:
return image, label, parsed_features["image/attr"]
else:
return image, label
steps_each_epoch = int(num_examples / cf.batch_size)
if num_examples % cf.batch_size > 0:
steps_each_epoch += 1
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(train_pre_process, num_parallel_calls=cf.num_preprocessing_threads)
dataset = dataset.shuffle(cf.shuffle_buffer_size)
dataset = dataset.repeat()
dataset = dataset.batch(cf.sampling_buffer_size)
dataset = dataset.prefetch(cf.sampling_buffer_size)
iterator = dataset.make_one_shot_iterator()
# iterator = dataset.make_initializable_iterator()
if cf.use_attr:
images, labels, attrs = iterator.get_next()
else:
images, labels = iterator.get_next()
images_ph = tf.placeholder(tf.float32,
[cf.batch_size, cf.train_image_size, cf.train_image_size, cf.train_image_channel],
name="inputs")
labels_ph = tf.placeholder(tf.int32, [cf.batch_size], name="labels")
if cf.use_attr:
attrs_ph = tf.placeholder(tf.float32, [cf.batch_size, cf.attr_dim], name="attrs")
if not cf.use_attr_net:
cf.embedding_size = cf.attr_dim
else:
attrs_ph = None
# seed_ph = tf.placeholder(tf.int64, (), name="shuffle_seed")
loss_op, end_points, train_op = model_fn.build_model(images_ph, labels_ph, cf, attrs_ph, True, cf.use_attr_net,
cf.num_hidden_attr_net, num_examples, global_step,
use_old_model=cf.use_old_model)
vars = tf.trainable_variables()
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for end_points.
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
summary_op = tf.summary.merge(list(summaries), name='summary_op')
if cf.quantize_delay >= 0:
tf.contrib.quantize.create_training_graph(quant_delay=cf.quantize_delay)
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config)
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(cf.save_dir, sess.graph)
epoch = 1
steps = 1
latest_epoch = 0
if cf.checkpoint_path is not None and (os.path.isfile(cf.checkpoint_path) or (
os.path.isdir(cf.checkpoint_path) and tf.train.latest_checkpoint(cf.checkpoint_path) is not None)):
latest_checkpoint = tf.train.latest_checkpoint(cf.checkpoint_path)
exclusions = []
if cf.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in cf.checkpoint_exclude_scopes.split(',')]
variables_to_restore = []
for var in slim.get_model_variables():
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
break
else:
variables_to_restore.append(var)
saver_for_restore = tf.train.Saver(var_list=variables_to_restore, max_to_keep=cf.keep_checkpoint_max)
if os.path.isdir(cf.checkpoint_path) and tf.train.latest_checkpoint(cf.checkpoint_path) is not None:
cp = tf.train.latest_checkpoint(cf.checkpoint_path)
else:
cp = cf.checkpoint_path
saver_for_restore.restore(sess, cp)
if os.path.isdir(cf.checkpoint_path) and tf.train.latest_checkpoint(cf.checkpoint_path) is not None:
latest_epoch = int(os.path.basename(latest_checkpoint).split("-")[1])
epoch = latest_epoch + 1
cf.max_number_of_epochs += latest_epoch
f.write("%s:%s\n" % ("restore_checkpoint", latest_checkpoint))
saver = tf.train.Saver(tf.global_variables(), max_to_keep=cf.keep_checkpoint_max)
f.close()
num_trained_images = 0
last_saved_epoch = None
last_saved_step = None
start_avg_loss_steps = 10
start_total_loss = 0.
while True:
# sess.run(iterator.initializer, feed_dict={seed_ph: steps})
try:
start = time.time()
if cf.use_attr:
tmp_images, tmp_labels, tmp_attrs = sess.run([images, labels, attrs])
tmp_attrs = np.reshape(tmp_attrs.values, [cf.sampling_buffer_size, cf.attr_dim])
tmp_attrs = tmp_attrs.astype(np.float64)
else:
tmp_images, tmp_labels = sess.run([images, labels])
pair_indices = set()
single_index_map = {}
label_buffer = {}
for i, tmp_label in enumerate(tmp_labels):
if tmp_label in label_buffer:
pair_indices.add(i)
pair_indices.add(label_buffer[tmp_label])
if tmp_label in single_index_map:
del single_index_map[tmp_label]
else:
label_buffer[tmp_label] = i
single_index_map[tmp_label] = i
pair_indices = list(pair_indices)
# print(len(pair_indices))
# continue
if len(pair_indices) > cf.batch_size:
pair_indices = pair_indices[:cf.batch_size]
elif len(pair_indices) < cf.batch_size:
pair_indices += list(single_index_map.values())[:cf.batch_size - len(pair_indices)]
# print(pair_indices)
batch_images = tmp_images[pair_indices]
batch_labels = tmp_labels[pair_indices]
if cf.use_attr:
batch_attrs = tmp_attrs[pair_indices]
sampling_time = time.time() - start
tmp_images = None
tmp_labels = None
start = time.time()
feed_dict = {images_ph: batch_images, labels_ph: batch_labels}
if cf.use_attr:
feed_dict[attrs_ph] = batch_attrs
if steps % cf.save_summaries_steps == 0:
loss, _, summary = sess.run([loss_op, train_op, summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary, steps)
else:
loss, _ = sess.run([loss_op, train_op], feed_dict=feed_dict)
if steps <= start_avg_loss_steps:
start_total_loss += loss
train_time = time.time() - start
if steps % cf.log_every_n_steps == 0:
now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
print("[%s: %d epoch(%d/%d), %d steps] sampling time: %f, train time: %f, loss: %f" % (
now, epoch, steps % steps_each_epoch, steps_each_epoch, steps, sampling_time, train_time, loss))
num_trained_images += cf.batch_size
if cf.use_save_steps:
if steps % cf.save_interval_steps == 0:
saver.save(sess, cf.save_dir + "/model.ckpt", steps)
last_saved_step = steps
if cf.max_number_of_steps is not None and steps >= cf.max_number_of_steps:
break
steps += 1
if num_trained_images >= num_examples:
if not cf.use_save_steps and cf.save_interval_epochs >= 1 and (
epoch - latest_epoch) % cf.save_interval_epochs == 0:
saver.save(sess, cf.save_dir + "/model.ckpt", epoch)
last_saved_epoch = epoch
if epoch >= cf.max_number_of_epochs:
break
epoch += 1
num_trained_images = 0
except tf.errors.OutOfRangeError:
break
if cf.use_save_steps:
if last_saved_step is None or last_saved_step < steps:
saver.save(sess, cf.save_dir + "/model.ckpt", steps)
else:
if last_saved_epoch is None or last_saved_epoch < epoch:
saver.save(sess, cf.save_dir + "/model.ckpt", epoch)
summary_writer.add_summary(sess.run(summary_op, feed_dict=feed_dict), steps)
sess.close()
tf.reset_default_graph()
if cf.notify_after_training:
txt = "%s[%s]\n\n" % (hostname, socket.gethostbyname(socket.gethostname()))
txt += "start avg loss : %f" % (start_total_loss / start_avg_loss_steps)
txt += "last loss : %f" % loss
txt += "start time: %s\n" % start_time_str
txt += "end time: %s\n" % datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if cf.eval_after_training:
txt += "going to evaluate"
else:
txt += "not going to evaluate"
txt += "\n[params]\n"
txt += hyper_param_txt
util.send_msg_to_slack("\n\n==================================\nTraining is Done\n" + txt)
if cf.eval_after_training:
cuda.select_device(0)
cuda.close()
eval_cmd = 'python -u multiple_search_models.py --model_dir="%s" --embedding_size=%d --data_dir="%s" --model_name=%s --max_top_k=%d --shutdown_after_train=%d --gpu_no=%s --step_type=%s --image_size=%s --eval_batch_size=%d --preprocessing_name="%s" --notify_after_training=%d --use_old_model=%d --save_static_data=%d --num_preprocessing_threads=%d' % (
cf.save_dir, cf.embedding_size, cf.data_dir, cf.model_name, cf.eval_max_top_k,
1 if cf.shutdown_after_train else 0, cf.gpu_no, "step" if cf.use_save_steps else "epoch",
cf.train_image_size, cf.eval_batch_size, cf.preprocessing_name, 1 if cf.notify_after_training else 0,
1 if cf.use_old_model else 0, 1 if cf.save_static_data else 0, cf.num_preprocessing_threads)
print(eval_cmd)
os.system(eval_cmd)
else:
if cf.shutdown_after_train:
os.system("sudo shutdown now")
if __name__ == '__main__':
fl = tf.app.flags
fl.DEFINE_string('save_dir', 'experiments/test', '')
fl.DEFINE_integer('num_preprocessing_threads', 4, '')
fl.DEFINE_integer('log_every_n_steps', 1, 'The frequency with which logs are print.')
fl.DEFINE_integer('save_summaries_steps', 100, '')
fl.DEFINE_boolean('use_save_steps', False, '')
fl.DEFINE_integer('save_interval_steps', 10000, '')
fl.DEFINE_integer('save_interval_epochs', 2, '')
fl.DEFINE_boolean('shutdown_after_train', False, '')
fl.DEFINE_boolean('eval_after_training', True, '')
fl.DEFINE_integer('eval_max_top_k', 20, '')
fl.DEFINE_integer('eval_batch_size', 128, '')
fl.DEFINE_boolean('notify_after_training', True, '')
fl.DEFINE_boolean('save_static_data', True, '')
fl.DEFINE_string('gpu_no', "0", '')
#######################
# Dataset Flags #
#######################
fl.DEFINE_string('data_dir',
'D:\data\\fashion\image_retrieval\deep_fashion\consumer-to-shop\\tfrecord',
'')
# fl.DEFINE_string('data_dir',
# "D:\data\\fashion\image_retrieval\cafe24product\\tfrecord_with_attr",
# '')
fl.DEFINE_string('model_name', 'inception_resnet_v2', '')
fl.DEFINE_string('preprocessing_name', "inception", '')
fl.DEFINE_integer('batch_size', 8, '')
fl.DEFINE_integer('sampling_buffer_size', 400, '')
fl.DEFINE_integer('shuffle_buffer_size', 800, '')
fl.DEFINE_integer('train_image_channel', 3, '')
fl.DEFINE_integer('train_image_size', 299, '')
fl.DEFINE_integer('max_number_of_steps', None, '')
fl.DEFINE_integer('max_number_of_epochs', 10, '')
fl.DEFINE_integer('keep_checkpoint_max', 5, '')
#######################
# Triplet #
#######################
fl.DEFINE_integer('embedding_size', 128, '')
fl.DEFINE_string('triplet_strategy', 'cluster', '')
fl.DEFINE_float('margin', 0.5, '')
fl.DEFINE_boolean('squared', False, '')
fl.DEFINE_boolean('l2norm', False, '')
#######################
# Attribute data #
#######################
fl.DEFINE_boolean('use_attr', False, '')
fl.DEFINE_boolean('use_attr_net', False, '')
fl.DEFINE_integer('num_hidden_attr_net', 1, '')
fl.DEFINE_integer('attr_dim', 463, '')
fl.DEFINE_float('attr_loss_weight', 1.0, '')
fl.DEFINE_boolean('use_old_model', False, '')
######################
# Optimization Flags #
######################
fl.DEFINE_float('weight_decay', 0.00004, '')
fl.DEFINE_string('optimizer', 'rmsprop', '"adadelta", "adagrad", "adam",''"ftrl", "momentum", "sgd" "rmsprop".')
fl.DEFINE_float('adadelta_rho', 0.95, 'The decay rate for adadelta.')
fl.DEFINE_float('adagrad_initial_accumulator_value', 0.1, 'Starting value for the AdaGrad accumulators.')
fl.DEFINE_float('adam_beta1', 0.9, 'The exponential decay rate for the 1st moment estimates.')
fl.DEFINE_float('adam_beta2', 0.999, 'The exponential decay rate for the 2nd moment estimates.')
fl.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
fl.DEFINE_float('ftrl_learning_rate_power', -0.5, 'The learning rate power.')
fl.DEFINE_float('ftrl_initial_accumulator_value', 0.1, 'Starting value for the FTRL accumulators.')
fl.DEFINE_float('ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
fl.DEFINE_float('ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
fl.DEFINE_float('momentum', 0.9, 'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
fl.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
fl.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
fl.DEFINE_integer('quantize_delay', -1, 'Number of steps to start quantized training. Set to -1 would disable')
#######################
# Learning Rate Flags #
#######################
fl.DEFINE_string('learning_rate_decay_type', 'exponential', '"fixed", "exponential",'' or "polynomial"')
fl.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
fl.DEFINE_float('end_learning_rate', 0.0001, 'The minimal end learning rate used by a polynomial decay.')
fl.DEFINE_float('learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
fl.DEFINE_float('num_epochs_per_decay', 2.0, 'Number of epochs after which learning rate decays.')
fl.DEFINE_float('moving_average_decay', None, 'The decay to use for the moving average.')
#####################
# Fine-Tuning Flags #
#####################
# fl.DEFINE_string('checkpoint_path', "D:/pretrained/inception_resnet_v2_2016_08_30.ckpt", '')
fl.DEFINE_string('checkpoint_path', None, '')
fl.DEFINE_string('checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
# fl.DEFINE_string('checkpoint_exclude_scopes', "InceptionResnetV2/Logits,InceptionResnetV2/AuxLogits",
# 'Comma-separated list of scopes of variables to exclude when restoring '
# 'from a checkpoint.')
fl.DEFINE_string('trainable_scopes', None, 'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
F = fl.FLAGS
param_iterator = iter(F)
hyper_param_txt = ""
for key in notify_params:
hyper_param_txt += "%s:%s\n" % (key, str(getattr(F, key)))
hostname = socket.gethostname()
if hostname in server_map:
hostname = server_map[hostname] + "_" + hostname
try:
start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
txt = "%s[%s]\n\n" % (hostname, socket.gethostbyname(socket.gethostname()))
txt += "start time: %s\n" % start_time
txt += "\n[params]\n"
txt += hyper_param_txt
util.send_msg_to_slack("\n\n==================================\nStarted to train !!!\n\n" + txt)
main(F, hyper_param_txt, hostname)
if F.eval_after_training:
cuda.select_device(0)
cuda.close()
eval_cmd = 'python -u multiple_search_models.py --model_dir="%s" --embedding_size=%d --data_dir="%s" --model_name=%s --max_top_k=%d --shutdown_after_train=%d --gpu_no=%s --step_type=%s --image_size=%s --eval_batch_size=%d --preprocessing_name=%s --notify_after_training=%d' % (
F.save_dir, F.embedding_size, F.data_dir, F.model_name, F.eval_max_top_k,
1 if F.shutdown_after_train else 0, F.gpu_no, "step" if F.use_save_steps else "epoch",
F.train_image_size, F.eval_batch_size, F.preprocessing_name, 1 if F.notify_after_training else 0)
print(eval_cmd)
os.system(eval_cmd)
except:
txt = "%s[%s]\n\n" % (hostname, socket.gethostbyname(socket.gethostname()))
txt += "start time: %s\n" % start_time
txt += "end time: %s\n" % datetime.now().strftime('%Y-%m-%d %H:%M:%S')
txt += "\n[stack trace]\n"
txt += traceback.format_exc()
txt += "\n[params]\n"
txt += hyper_param_txt
util.send_msg_to_slack("\n\n==================================\nTraining Exception!!!\n\n" + txt)
traceback.print_exc()
if not F.eval_after_training and F.shutdown_after_train:
os.system("sudo shutdown now")
``` |
{
"source": "jireh-father/transformers",
"score": 3
} |
#### File: examples/seq2seq/submit.py
```python
import argparse
import random
import jsonlines
import os
import csv
def main(args):
for arg in vars(args):
print(arg, getattr(args, arg))
os.makedirs(args.output_dir, exist_ok=True)
if args.replace_special_chars:
summaries = [s.replace('\n', '').replace('<unk>', '').replace('</s>', '').strip() for s in
open(args.generated_file).readlines()]
else:
summaries = [s.replace('\n', '').strip() for s in open(args.generated_file).readlines()]
ids = []
with jsonlines.open(args.test_file) as f:
for i, line in enumerate(f.iter()):
ids.append(line['id'])
rows = zip(ids, summaries)
with open(os.path.join(args.output_dir, "submission.csv"), "w+") as f:
writer = csv.writer(f)
writer.writerow(["id", "summary"])
for row in rows:
writer.writerow(row)
print("done")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--generated_file', type=str,
default='/media/irelin/data_disk/dataset/dacon_summury/abstractive/abstractive_test_v2.jsonl')
parser.add_argument('--test_file', type=str,
default='/media/irelin/data_disk/dataset/dacon_summury/abstractive/abstractive_test_v2.jsonl')
parser.add_argument('--output_dir', type=str,
default='/media/irelin/data_disk/dataset/dacon_summury/abstractive/preprocessed')
parser.add_argument('--replace_special_chars', action='store_true', default=False)
main(parser.parse_args())
``` |
{
"source": "Jireh-Jam/fingerprint_denoising",
"score": 2
} |
#### File: fingerprint_denoising/code/baseline_aug.py
```python
from glob import glob
import os
import argparse
import numpy as np
import pandas as pd
from PIL import Image
from keras import backend as K
from keras import losses
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.layers import Input, MaxPooling2D
from keras.layers import concatenate, Conv2D, Conv2DTranspose, Dropout, LeakyReLU, PReLU, ReLU
from keras.models import Model
from keras.activations import relu
from keras.optimizers import Adam
from numpy import random
from sklearn.model_selection import KFold
from skimage.transform import resize
from sklearn.model_selection import train_test_split
input_shape = (400, 288)
def custom_activation(x):
return K.relu(x, alpha=0.0, max_value=1)
smooth = 1.
def get_unet(do=0, activation=ReLU):
inputs = Input(input_shape+(3,))
conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs)))
conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1)))
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1)))
conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2)))
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2)))
conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3)))
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3)))
conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4)))
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4)))
conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5)))
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6)))
conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6)))
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7)))
conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7)))
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8)))
conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8)))
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(up9)))
conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv9)))
conv10 = Dropout(do)(Conv2D(1, (1, 1), activation='sigmoid')(conv9))
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-3), loss=losses.mse)
model.summary()
return model
#masks_tr_tr = masks_tr_tr[... ,np.newaxis]
batch_size = 8
from aug_utils import random_augmentation
import cv2
def read_input(path):
x = resize(cv2.imread(path)/255., input_shape)
return np.asarray(x)
def read_gt(path):
y = resize(cv2.imread(path, 0)/255., input_shape)
return np.asarray(y)[..., np.newaxis]
def gen(data):
while True:
# choose random index in features
# try:
index= random.choice(list(range(len(data))), batch_size)
index = list(map(int, index))
list_images_base = [read_input(data[i][0]) for i in index]
list_gt_base = [read_gt(data[i][1]) for i in index]
list_images_aug = []
list_gt_aug = []
for image_, gt in zip(list_images_base, list_gt_base):
image_aug, gt = random_augmentation(image_, gt) #image_, gt
list_images_aug.append(image_aug)
list_gt_aug.append(gt)
yield np.array(list_images_aug), np.array(list_gt_aug)
# except Exception as e:
# print(e)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dropout", required=False,
help="dropout", type=float, default=0)
ap.add_argument("-a", "--activation", required=False,
help="activation", default="ReLu")
args = vars(ap.parse_args())
# if "dropout" not in args:
# args['dropout'] = 0
#
# if "activation" not in args:
# args['activation'] = "ReLu"
activation = globals()[args['activation']]
model_name = "baseline_unet_aug_do_%s_activation_%s_"%(args['dropout'], args['activation'])
print("Model : %s"%model_name)
train_data = list(zip(sorted(glob('../input/training_input/*.jpg')), sorted(glob('../input/training_ground-truth/*.jpg'))))
val_data = list(zip(sorted(glob('../input/validation_input/*.jpg')), sorted(glob('../input/validation_ground-truth/*.jpg'))))
print(len(val_data)//batch_size, len(val_data), batch_size)
model = get_unet(do=args['dropout'], activation=activation)
file_path = model_name + "weights.best.hdf5"
try:
model.load_weights(file_path, by_name=True)
except:
pass
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor="val_loss", mode="min", patience=5, verbose=1)
redonplat = ReduceLROnPlateau(monitor="val_loss", mode="min", patience=3, verbose=1)
callbacks_list = [checkpoint, early, redonplat] # early
history = model.fit_generator(gen(train_data), validation_data=gen(val_data), epochs=1000, verbose=2,
callbacks=callbacks_list, steps_per_epoch= len(train_data)//batch_size,
validation_steps=len(val_data)//batch_size, use_multiprocessing=False, workers=16)
``` |
{
"source": "JirenJin/data-structures-and-algorithms-in-python",
"score": 4
} |
#### File: JirenJin/data-structures-and-algorithms-in-python/binary_search.py
```python
def bisect_right(array, x, low=0, high=None):
if low < 0:
raise ValueError('low should be a non-negative integer.')
if high is None:
high = len(array) - 1
else:
if high > len(array) - 1:
raise ValueError(f'high should be less than len(array): {len(array)}.')
while low <= high:
mid = low + (high - low) // 2
if array[mid] <= x:
low = mid + 1
else:
high = mid - 1
return low
def bisect_left(array, x, low=0, high=None):
if low < 0:
raise ValueError('low should be a non-negative interger.')
if high is None:
high = len(array) - 1
else:
if high > len(array) - 1:
raise ValueError(f'high should be less than len(array): {len(array)}.')
while low <= high:
mid = low + (high - low) // 2
if array[mid] < x:
low = mid + 1
else:
high = mid - 1
return low
def binary_search_left(array, x):
index = bisect_left(array, x)
if index != len(array) and array[index] == x:
return index
raise ValueError
def binary_search_right(array, x):
index = bisect_right(array, x)
if index != 0 and array[index - 1] == x:
return index
raise ValueError
if __name__ == "__main__":
import bisect
import random
for _ in range(1000):
a = [random.randint(0, 10) for _ in range(100)]
a.sort()
x = bisect_right(a, 5)
y = bisect.bisect_right(a, 5)
assert x == y
for _ in range(1000):
a = [random.randint(0, 10) for _ in range(100)]
a.sort()
x = bisect_left(a, 5)
y = bisect.bisect_left(a, 5)
assert x == y
```
#### File: JirenJin/data-structures-and-algorithms-in-python/heap.py
```python
class Heap:
"""A customized heap implementation supporting update of keys.
The main motivation to implement a customized heap instead of using the
builtin `heapq` module is due to the need for efficiently updating and
deleting an arbitrary node in the heap.
Attributes:
_array: an array actually storing the keys and nodes.
_node_to_index: a hashmap from node to its index in the heap (`_array`)
last: the index of the last node in the heap, also representing the
current size of the heap.
"""
def __init__(self):
"""Inits the hidden array and the mapping from node to index.
Note that `_array` stores actual elements from index `1`, i.e., the
value at `0` index does not matter.
"""
self._array = [None]
self._node_to_index = {}
def __repr__(self):
"""Returns the array representation of the heap."""
return repr(self._array[1:])
@property
def last(self):
"""Index of the last element/node in the heap."""
return len(self._array) - 1
def is_empty(self):
"""Returns True if the heap is empty, otherwise False."""
# the actual heap elements starts from `1` index
return len(self._array) == 1
def top(self):
"""Returns the (key, node) tuple of the minimum / top of the heap.
If the heap is empty, returns None.
"""
if self.is_empty():
return None
return self._array[1]
def pop(self):
"""Returns and deletes the (key, node) tuple from the top of the heap.
If the heap is empty, does nothing and returns None.
"""
if self.is_empty():
return None
key, node = self._array[1]
del self._node_to_index[node]
self._array[1] = self._array[self.last]
del self._array[self.last]
# no need to sift down if the heap is already empty
if not self.is_empty():
self._sift_down(1)
return key, node
def update_key(self, node, key):
"""Updates the key for the given node in the heap.
After updating the key, this function ensures that the heap property is
maintained.
"""
index = self._node_to_index[node]
curr_key, _ = self._array[index]
self._array[index] = (key, node)
if key < curr_key:
self._sift_up(index)
elif key > curr_key:
self._sift_down(index)
def insert(self, node, key):
"""Inserts a new node with the given key to the heap."""
self._array.append((key, node))
self._node_to_index[node] = self.last
self._sift_up(self.last)
def _get_parent(self, index):
"""Returns the parent (key, node) tuple for the given index.
Returns None if the index is 1, i.e., the index of the top of the heap.
"""
if index == 1:
return None
return self._array[index // 2]
def _get_left(self, index):
"""Returns the left child of the given index.
Returns None is the left child does not exists.
"""
if 2 * index > self.last:
return None
return self._array[2 * index]
def _get_right(self, index):
"""Returns the right child of the given index.
Returns None is the right child does not exists.
"""
if 2 * index + 1 > self.last:
return None
return self._array[2 * index + 1]
def _get_smaller_child_index(self, index, curr_key):
"""Returns the index of the child that has a smaller key than curr_key.
The key of this child should also be smaller than or equal to the other
child of the node with `index`.
If there is not such a child, returns None.
"""
left = self._get_left(index)
right = self._get_right(index)
if left is None:
return None
if right is None or left[0] <= right[0]:
return 2 * index if left[0] < curr_key else None
else:
return 2 * index + 1 if right[0] < curr_key else None
def _sift_up(self, index):
"""Sift up a node until the parent's key is not larger than its key.
Siftting up means swapping the parent and current node when necessary.
Note that the `_node_to_index` hashmap should also be updated during
the sifting procedure.
"""
if index < 1:
raise IndexError("index should be larger than 0.")
curr_key, curr_node = self._array[index]
while index > 1 and curr_key < self._get_parent(index)[0]:
self._array[index] = self._get_parent(index)
self._node_to_index[self._get_parent(index)[1]] = index
index = index // 2
self._array[index] = curr_key, curr_node
self._node_to_index[curr_node] = index
def _sift_down(self, index):
"""Sift down a node until the children's keys are not larger than its.
Siftting down means swapping the current node and one of its children
with the smaller key when necessary. Note that the `_node_to_index`
hashmap should also be updated during the sifting procedure.
"""
if index > self.last:
raise IndexError("index is out of boundary")
curr_key, curr_node = self._array[index]
smaller_child_index = self._get_smaller_child_index(index, curr_key)
while smaller_child_index is not None:
smaller_child = self._array[smaller_child_index]
self._array[index] = smaller_child
self._node_to_index[smaller_child[1]] = index
index = smaller_child_index
smaller_child_index = self._get_smaller_child_index(index, curr_key)
self._array[index] = curr_key, curr_node
self._node_to_index[curr_node] = index
```
#### File: JirenJin/data-structures-and-algorithms-in-python/sorting.py
```python
def insertion_sort(array):
for i in range(1, len(array)):
to_sort = array[i]
# for j in range(i, -1, -1):
# if to_sort < array[j-1]:
# array[j] = array[j-1]
# else:
# break
# array[j] = to_sort
j = i - 1
while j >= 0 and to_sort < array[j]:
array[j+1] = array[j]
j -= 1
array[j+1] = to_sort
return array
if __name__ == "__main__":
import random
testcases = [[random.randint(0, 100) for _ in range(10)] for _ in range(100)]
for i, testcase in enumerate(testcases):
if sorted(testcase[:]) != insertion_sort(testcase):
print(testcases[i], testcase, sorted(testcase[i]))
break
print("All test cases passed!")
``` |
{
"source": "jirenmaa/djraft",
"score": 2
} |
#### File: djraft/stories/apps.py
```python
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class StoriesConfig(AppConfig):
name = 'djraft.stories'
verbose_name = _("Stories")
def ready(self):
import djraft.stories.signals
```
#### File: djraft/stories/signals.py
```python
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Story, Like, Dislike
@receiver(post_save, sender=Story)
def create_story(sender, instance, created, **kwargs):
if created:
Like.objects.create(story=instance)
Dislike.objects.create(story=instance)
```
#### File: stories/templatetags/ctx_length.py
```python
from django.utils.lorem_ipsum import *
from random import choice
from django.template.defaultfilters import linebreaksbr, slice_filter
from django.template.library import Library
register = Library()
@register.simple_tag
def context_length(value, _slice):
if len(value) > _slice:
value = slice_filter(value, _slice) + " ..."
return value
return value
```
#### File: djraft/stories/utils.py
```python
from django.utils import timezone as djtimezone
from django.utils.text import slugify
from unidecode import unidecode
from datetime import datetime, timedelta, timezone
def generate_slug(title: str, username: str):
# decode all unknown string
# then replace all `spaces` with `-`
title = unidecode(title).replace(" ", "-").lower()
# using `string encoded hex time` to improve slug
# lol idk how to say it but yeah
d = str(str(djtimezone.now()).encode("utf-8").hex()[30:42])
n = str(username.encode("utf-8").hex()[:5])
unique = d + n
return slugify("%s-%s" % (title, unique))
def convert_timedelta(duration):
days, seconds = abs(duration.days), duration.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
# return hours, minutes, seconds
return '{} hours, {} minutes, {} seconds'.format(hours, minutes, seconds)
```
#### File: djraft/djraft/views.py
```python
from django.views.generic import ListView
from django.conf import settings
from djraft.stories.models import Story
class Home(ListView):
model = Story
paginate_by = 10
template_name = "pages/home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["trendings"] = Story.objects.filter(likes__users__gte=1).distinct()[:4]
return context
landing_home_view = Home.as_view()
``` |
{
"source": "jirenmaa/twitter-clone",
"score": 2
} |
#### File: twitter-clone/celeryapp/tasks.py
```python
from celeryapp.artisan import app as artisan
from celeryapp.workers import mailer
@artisan.task(name="send_email_activation")
def send_email_activation(recepients: str, hashkey: str, **kwargs):
"""Send activation link to user email"""
mailer.email_activation_link(recepients, hashkey, **kwargs)
@artisan.task(name="send_email_resetpassword")
def send_email_resetpassword(recepients: str, hashkey: str, **kwargs):
"""Send resetpassword link to user email"""
mailer.email_resetpassword_link(recepients, hashkey, **kwargs)
```
#### File: celeryapp/workers/mailer.py
```python
from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import get_template
def url_constructor(path: str, parameters: list, **kwargs) -> str:
"""return url"""
scheme = kwargs.get("scheme", "http")
host = kwargs.get("host", settings.WEBSITE_URL)
# construct url
general = "{0}://{1}/{2}".format(scheme, host, path)
for parameter in parameters:
general += "/{0}".format(parameter)
return general
def generate_email_from_template(recipient: str, template: str, **kwargs) -> str:
"""return email render template"""
signature = "?key={0}".format(kwargs.get("signature"))
# context for template
context = {
"recepient": recipient,
"url": url_constructor(path="activation", parameters=[signature], **kwargs),
}
return get_template("{0}.html".format(template)).render(context)
def email_activation_link(recipient: str, hash: str, **kwargs) -> None:
"""send email activation to recipeint email"""
content = generate_email_from_template(
recipient, "activation", signature=hash, **kwargs
)
# construct email message
mailing = EmailMessage(
subject="Account Activation",
body=content,
from_email=settings.DEFAULT_FROM_EMAIL,
to=[recipient],
)
mailing.content_subtype = "html"
mailing.send()
def email_resetpassword_link(recipient: str, hash: str, **kwargs) -> None:
"""send email resetpassword to recipeint email"""
content = generate_email_from_template(
recipient, "resetpassword", signature=hash, **kwargs
)
# construct email message
mailing = EmailMessage(
subject="Reset Password",
body=content,
from_email=settings.DEFAULT_FROM_EMAIL,
to=[recipient],
)
mailing.content_subtype = "html"
mailing.send()
```
#### File: modules/tweets/apps.py
```python
from django.apps import AppConfig
class TweetsConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "modules.tweets"
def ready(self):
from . import signals
signals.trigger()
return super().ready()
``` |
{
"source": "jirentianxiang/YinBlogDownloader",
"score": 3
} |
#### File: jirentianxiang/YinBlogDownloader/crawler.py
```python
import pdfkit
import os
import requests
from bs4 import BeautifulSoup
import time
# 获取标题列表
def get_title_list():
soup = requests.get('http://www.yinwang.org')
content = BeautifulSoup(soup.text,'html.parser')
titles = []
for text in content.find_all('li', 'list-group-item'):
titles.append(text.a.string)
return titles
# 获取所有页面url
def get_url_list():
soup = requests.get('http://www.yinwang.org')
content = BeautifulSoup(soup.text, 'html.parser')
urls = []
for li in content.find_all(class_='list-group-item'):
urls.append("http://www.yinwang.org" + li.a.get('href'))
return urls
# 将html页面保存到本地
def saveHtml(file_name, file_content):
fp = open(file_name, "w+b")
fp.write(file_content)
fp.close()
# 将博客转化为pdf文件
def savePDF(url, file_name):
options = {
'page-size': 'A4',
'zoom':'2.5'
}
pdfkit.from_url(url, file_name, options = options)
# 将当前所有文章url保存到文件里
def saveCurrUrList(urls, filename, mode = 'a'):
file = open(filename,mode)
for i in range(len(urls)):
file.write(str(urls[i] + '\n'))
file.close()
if __name__ == '__main__':
urls = get_url_list()
titles = get_title_list()
start = 0
end = len(urls)
for i in range(start, end):
soup = requests.get(urls[i])
content = BeautifulSoup(soup.text, 'html.parser')
saveHtml(os.getcwd() + '/html/' + titles[i] + '.html', content.encode())
savePDF(urls[i], os.getcwd() + '/pdf/' + titles[i] + ".pdf")
print("第", i, "篇博客《", titles[i], "》成功保存!")
``` |
{
"source": "jirenz/CS229_Project",
"score": 3
} |
#### File: CS229_Project/hearthbreaker/powers.py
```python
import hearthbreaker.targeting
from copy import copy
class Power:
def __init__(self):
self.hero = None
self.used = False
def can_use(self):
return not self.used and self.hero.player.mana >= 2
def use(self):
if self.can_use():
self.hero.player.trigger("used_power")
self.hero.player.mana -= 2
self.used = True
def allowed_targets(self):
return None
def power_targets(self):
return hearthbreaker.targeting.find_spell_target(self.hero.player.game, lambda t: t.spell_targetable())
class DruidPower(Power):
def use(self):
super().use()
self.hero.change_temp_attack(1)
self.hero.increase_armor(1)
class HunterPower(Power):
def use(self):
if self.hero.power_targets_minions:
target = self.hero.find_power_target()
super().use()
target.damage(2 * self.hero.player.spell_multiplier, None)
self.hero.player.game.check_delayed()
else:
super().use()
self.hero.player.game.other_player.hero.damage(2 * self.hero.player.spell_multiplier, None)
def allowed_targets(self):
if self.hero.power_targets_minions:
return self.power_targets()
else:
return [None]
class MagePower(Power):
def use(self):
target = self.hero.find_power_target()
super().use()
target.damage(1 * self.hero.player.spell_multiplier, None)
self.hero.player.game.check_delayed()
def allowed_targets(self):
return self.power_targets()
class PriestPower(Power):
def use(self):
target = self.hero.find_power_target()
super().use()
if self.hero.player.heal_does_damage:
target.damage(2 * self.hero.player.spell_multiplier, None)
else:
target.heal(2 * self.hero.player.heal_multiplier, None)
def __str__(self):
return "Lesser Heal"
def allowed_targets(self):
return self.power_targets()
# Special power the priest can obtain via the card Shadowform
class MindSpike(Power):
def use(self):
super().use()
target = self.hero.find_power_target()
target.damage(2 * self.hero.player.spell_multiplier, None)
def __str__(self):
return "Mind Spike"
def allowed_targets(self):
return self.power_targets()
# Special power the priest can obtain via the card Shadowform
class MindShatter(Power):
def use(self):
super().use()
target = self.hero.find_power_target()
target.damage(3 * self.hero.player.spell_multiplier, None)
def __str__(self):
return "Mind Shatter"
def allowed_targets(self):
return self.power_targets()
class PaladinPower(Power):
def use(self):
super().use()
from hearthbreaker.cards.minions.paladin import SilverHandRecruit
recruit_card = SilverHandRecruit()
recruit_card.summon(self.hero.player, self.hero.player.game, len(self.hero.player.minions))
class RoguePower(Power):
def use(self):
super().use()
from hearthbreaker.cards.weapons.rogue import WickedKnife
wicked_knife = WickedKnife()
knife = wicked_knife.create_weapon(self.hero.player)
knife.card = wicked_knife
knife.equip(self.hero.player)
class ShamanPower(Power):
def __init__(self):
self.healing_totem = False
self.searing_totem = False
self.stoneclaw_totem = False
self.wrath_of_air_totem = False
super().__init__()
def can_use(self):
self.healing_totem = False
self.searing_totem = False
self.stoneclaw_totem = False
self.wrath_of_air_totem = False
for minion in self.hero.player.minions:
if minion.card.name == "Healing Totem":
self.healing_totem = True
elif minion.card.name == "Searing Totem":
self.searing_totem = True
elif minion.card.name == "Stoneclaw Totem":
self.stoneclaw_totem = True
elif minion.card.name == "Wrath of Air Totem":
self.wrath_of_air_totem = True
if self.healing_totem and self.searing_totem and self.stoneclaw_totem and self.wrath_of_air_totem:
return False
return super().can_use()
def use(self):
super().use()
from hearthbreaker.cards.minions.shaman import HealingTotem, SearingTotem, StoneclawTotem, WrathOfAirTotem
totems = []
if not self.healing_totem:
totems.append(HealingTotem())
if not self.searing_totem:
totems.append(SearingTotem())
if not self.stoneclaw_totem:
totems.append(StoneclawTotem())
if not self.wrath_of_air_totem:
totems.append(WrathOfAirTotem())
random_totem = self.hero.player.game.random_choice(totems)
random_totem.summon(self.hero.player, self.hero.player.game, len(self.hero.player.minions))
class WarlockPower(Power):
def use(self):
super().use()
self.hero.player.game.current_player.hero.damage(2 * self.hero.player.spell_multiplier, None)
self.hero.player.game.current_player.draw()
class JaraxxusPower(Power):
def use(self):
super().use()
from hearthbreaker.cards.minions.warlock import Infernal
infernal_card = Infernal()
infernal_card.summon(self.hero.player, self.hero.player.game, len(self.hero.player.minions))
class DieInsect(Power):
def use(self):
super().use()
targets = copy(self.hero.player.opponent.minions)
targets.append(self.hero.player.opponent.hero)
target = self.hero.player.game.random_choice(targets)
target.damage(2 * self.hero.player.spell_multiplier, None)
class WarriorPower(Power):
def use(self):
super().use()
self.hero.increase_armor(2)
```
#### File: CS229_Project/learning/function_approximator.py
```python
from hearthbreaker.engine import *
from projectfiles.feature_extract import *
import numpy as np
import random
from projectfiles.game_history_generator import *
from sklearn import linear_model
from sknn.mlp import Regressor, Layer
# from projectfiles.pear_extractor import *
#print("function_approximator.py is deprecated")
class BasicFunctionApproximator:
def __init__(self):
pass
def eval(self, state_1, state_2):
def score(player):
score = 0
for i in player.minions:
score += i.calculate_attack()
score += i.health
score += len(player.hand) * 2
score += player.hero.health + player.hero.armor
return score
return score(state_2.current_player) - score(state_2.other_player)
class SimpleExtractor:
def extract(player):
attack = 0;
health = 0;
for i in player.minions:
attack += i.calculate_attack()
health += i.health
feat = [attack, health]
feat.append(len(player.hand))
feat.append(player.hero.health + player.hero.armor)
feat.append(player.game.other_player.hero.health)
return np.array(feat, dtype=np.float64)
def initial():
return np.zeros((5, ))
class LinearFunctionApproximator():
def __init__(self, initial_weights = None):
self.extractor = PearExtractor()
self.train()
#if initial_weights is None:
# self.weights = self.extractor.get_initial()
#else:
# self.weights = initial_weights
def __call__(self, state):
#print(len(self.extractor(state)), len(self.weights))
return self.eval(state)
def eval(self, state):
if state.current_player_win(): return 100000000
if state.current_player_lose(): return -10000000
return np.dot(self.extractor(state), self.weights)
def train(self):
Data = open("data.txt", "r")
Tmp = Data.read().splitlines()
training_set = []
for i in Tmp:
c = i.split(" ")
for j in range(0, len(c)):
c[j] = float(c[j])
training_set.append(c)
clf = linear_model.LinearRegression()
X = []
y = []
for data_point in training_set:
X.append(data_point[0:-1])
y.append(data_point[-1])
for i in X:
if (len(i) != 38):
print(i)
clf.fit(X, y)
self.weights = clf.coef_
print("Learning from data size: " + str(len(y)))
Data.close()
class BasicNeuroApproximator():
def __init__(self, initial_weights = None, nn = None):
self.extractor = PearExtractor()
if nn is None:
self.nn = self.regressor()
else:
self.nn = nn
self.train()
#if initial_weights is None:
# self.weights = self.extractor.get_initial()
#else:
# self.weights = initial_weights
def regressor(self):
return Regressor(
layers=[
Layer("Rectifier", units=100),
# Layer("Sigmoid", units = 200),
# Layer("Tanh", units = 100)
Layer("Linear")],
learning_rate=0.001,
n_iter=10,
f_stable = 0.1)
def __call__(self, state):
#print(len(self.extractor(state)), len(self.weights))
return self.eval(state)
def eval(self, state):
if state.current_player_win(): return 100000000
if state.current_player_lose(): return -10000000
vec = np.array(self.extractor(state))
return self.nn.predict(np.ndarray(shape = (1, len(vec)), buffer = vec))
def train(self):
Data = open("data.txt", "r")
Tmp = Data.read().splitlines()
training_set = []
for i in Tmp:
c = i.split(" ")
for j in range(0, len(c)):
c[j] = float(c[j])
training_set.append(c)
clf = linear_model.LinearRegression()
X = []
y = []
for data_point in training_set:
X.append(data_point[0:-1])
y.append(data_point[-1])
for i in X:
if (len(i) != 38):
print(i)
X = np.ndarray(shape = (len(y), len(X[0])), buffer = np.array(X))
y = np.ndarray(shape = (len(y), 1), buffer = np.array(y))
self.nn.fit(X, y)
print("Learning from data size: " + str(len(y)))
Data.close()
class DeepNeuroApproximator(BasicNeuroApproximator):
def regressor(self):
return Regressor(
layers=[
Layer("Rectifier", units=100),
Layer("Sigmoid", units = 200),
Layer("Tanh", units = 100),
Layer("Linear")],
learning_rate=0.001,
n_iter=10,
f_stable = 0.1)
```
#### File: CS229_Project/learning/learning.py
```python
import random
import json
from projectfiles.util import spark_weights
from learning.model import *
from projectfiles.game_history_generator import *
import numpy as np
import pickle
class QLearningAlgorithm:
def __init__(self, mdp, eta, explore_prob, function_approximator):
self.mdp = mdp
self.eta = eta
self.explore_prob = explore_prob
self.F = function_approximator
def getQ(self, state, action):
next_state = action.copy()
next_state._end_turn()
return self.F(state, next_state)
def getV(self, state):
return self.getQ(state, self.mdp.getBestAction(state, self.getQ))
# return max(self.getQ(state, action) for action in self.mdp.getActions(state))
def epsilon_greedy(self, state):
if random.random() < self.explore_prob:
# print("epsilon_greedy: get random action")
# next_action = random.choice(self.mdp.getActions(state))
return self.mdp.getRandomAction(state)
else:
# print("epsilon_greedy: get best action")
return self.mdp.getBestAction(state, self.getQ)
def simulate_game(self, callback):
state = self.mdp.start_state()
turns = 0
while not self.mdp.is_end_state(state):
state._start_turn()
# print("simulate_game: turn", turns, "current player", state.current_player.name)
# print(state.current_player.hero.__to_json__())
action = self.epsilon_greedy(state)
next_state, reward = self.mdp.getSuccAndReward(state, action)
assert(action.current_player.name == state.current_player.name)
callback(state, action, reward, next_state)
next_state._end_turn()
state = next_state
turns += 1
return state
def train(self, epochs = 10):
def qlearning_update(state, action, reward, next_state):
self.F.update(state, action, \
self.eta * (reward + self.mdp.getDiscount() * self.getV(next_state) - self.getQ(state, action)))
QLearningAlgorithm.spark_weights(self.F.weights)
for epoch in range(epochs):
done = False
while not done:
old_weights = self.F.weights
try:
self.simulate_game(qlearning_update)
done = True
except:
self.F.weights = old_weights
class ExperienceReplayQ(QLearningAlgorithm):
def __init__(self, mdp, eta, explore_prob, function_approximator,
experience_size = 500,
replays_per_epoch = 20):
super().__init__(mdp, eta, explore_prob, function_approximator)
self.experience_size = experience_size
self.replays_per_epoch = replays_per_epoch
# list of (state, next_state, reward) tuples
self.experience = []
def train(self, epochs = 10):
for epoch in range(epochs):
history = []
def save_history(state, action, reward, next_state):
assert(state.current_player.name == action.current_player.name)
history.append((state.copy(), action.copy()))
done = False
while not done:
try:
history = []
state = self.simulate_game(save_history)
done = True
except:
pass
# ... s1 a1 (p0), s2 a2 (p1), END : last action by p1, p1 either won or lost
# the last state tells you the winner. suppose p0 won, then we should get
# (s1, a1, win_reward), (s2, a2, lose_reward)
# train once in reverse:
# last states get a special case reward:
s1, a1 = history[-2]
s2, a2 = history[-1]
game_experience = []
print("RESULT", s1.current_player.name, s2.current_player.name, state.winner.name if state.winner is not None else "tie")
if state.winner is None:
game_experience.append((s1, a1, self.mdp.getReward("tie")))
game_experience.append((s2, a2, self.mdp.getReward("tie")))
else:
if state.winner.name == s1.current_player.name:
game_experience.append((s1, a1, self.mdp.getReward("win")))
game_experience.append((s2, a2, self.mdp.getReward("lose")))
else:
game_experience.append((s1, a1, self.mdp.getReward("lose")))
game_experience.append((s2, a2, self.mdp.getReward("win")))
# for all other states, give a zero reward
for s, a in history[-3::-1]:
game_experience.append((s, a, 0))
# train on the current game
for r_state, action, reward in game_experience:
print("Epoch", epoch, "instant replay", "reward", reward)
next_state, _ = self.mdp.getSuccAndReward(r_state, action)
assert(r_state.current_player.name == next_state.current_player.name)
# print(r_state.current_player.name, action.current_player.name, next_state.current_player.name)
self.F.update(r_state, action, \
self.eta * (reward + self.mdp.getDiscount() * self.getV(next_state) - self.getQ(r_state, action)))
spark_weights(self.F.weights)
# truncate experience
self.experience += game_experience
if len(self.experience) > self.experience_size:
self.experience = random.sample(self.experience, self.experience_size)
for episode in range(self.replays_per_epoch):
print("Epoch", epoch, "experience replay", episode, "reward", reward)
r_state, action, reward = random.choice(self.experience)
next_state, _ = self.mdp.getSuccAndReward(r_state, action)
self.F.update(r_state, action, \
self.eta * (reward + self.mdp.getDiscount() * self.getV(next_state) - self.getQ(r_state, action)))
spark_weights(self.F.weights)
self.F.feature_extractor.debug(self.F.weights)
```
#### File: CS229_Project/learning/mdp.py
```python
import random
class MDP:
"""Abstract interface for modelling MDPs"""
def start_state(self):
"""Return the initial state of the MDP"""
raise NotImplementedError("")
def is_end_state(self, state):
"""Return true if the given state is an end state"""
raise NotImplementedError("")
def getActions(self, state):
"""Propose a set of actions doable at state"""
raise NotImplementedError("")
def getRandomAction(self, state):
"""Propose a random action"""
return random.choice(self.getActions(state))
def getBestActions(self, state, heuristic):
scoredActions = map(lambda action: (heuristic(state, action), action), self.getActions(state))
scoredActions.sort(key=lambda q: q[0])
return scoredActions[:max_actions]
def getReward(self, state, next_state):
"""Calculate the reward from the next_state"""
raise NotImplementedError("")
def getDiscount(self):
return 1.0
```
#### File: jirenz/CS229_Project/production.py
```python
from hearthbreaker.agents import registry
from hearthbreaker.cards.heroes import hero_for_class
from hearthbreaker.constants import CHARACTER_CLASS
from hearthbreaker.engine import Deck, card_lookup, Game
from hearthbreaker.cards import *
from hearthbreaker.agents import *
from projectfiles.random_deck_generator import RandomDeckGenerator
from projectfiles.agent import *
import sys
# import shelve
from projectfiles.deck_loader import DeckLoader
from projectfiles.hearthlogger import Hearthlogger
from projectfiles.agent import *
from projectfiles.feature_extract import *
from projectfiles.strategy_agent import *
from learning.function_approximator import *
# from learning.model import *
import numpy as np
# import pickle
# from projectfiles.util import spark_weights
def test_agent_once(one, other):
generator = RandomDeckGenerator()
deck1 = generator.generate()
deck2 = deck1.copy()
if other is None:
other = TradeAgent()
#other = RandomAgent()
game = Game([deck1, deck2], [one, other])
new_game = game.copy()
try:
new_game.start()
except Exception as e:
print("Game error: " + str(e))
raise e
# raise
#print(json.dumps(new_game.__to_json__(), default=lambda o: o.__to_json__(), indent=1))
# new_game
return False
print("Game lasted: " + str(new_game._turns_passed))
print("winning agent: " + new_game.winner.agent.name)
# spark_weights(ql.weights)
return new_game.winner.agent.name
def test_agent(one, other, number = 20):
i = 0
err = 0
winning_count = {one.name : 0, other.name : 0}
while i < number:
winner = test_agent_once(one, other)
if winner:
i += 1
winning_count[winner] += 1
pass
# print("Error")
# err += 1
#if err > 100:
# print("Aborting after 5 errors.")
# break
print(winning_count)
print("Winning_rate: " + one.name + ": " + str(winning_count[one.name]/winning_count[other.name]))
if __name__ == "__main__":
function_approximator = None
agent_1 = StrategyAgent(LinearFunctionApproximator(), "Learner")
agent_2 = TradeAgent()
#while True:
test_agent(agent_1, agent_2, 5)
```
#### File: projectfiles/LR-statevalue/datagate-shelve.py
```python
import shelve
def getdata():
data = []
s = shelve.open("gamefiles.dat")
for i in s:
print(s[i])
collect = s["gamelogger"]
print(collect)
for i in collect:
tmp = []
print(i)
data.append(tmp)
return data
getdata()
```
#### File: projectfiles/LR-statevalue/learner.py
```python
from sklearn import linear_model
# each minion information: [attack,health,can_attack?]
# state information f=[(minion1),...,(minion7),hero_health,hero_armor,
# (oppo_minion1),...,(oppo_minion7),oppo_hero_health,oppo_hero_armor,
# state_value]
def learner(array_f, NumFeat):
# this function receives a vector of state information (NumFeat-dimension, not including state_value)
# this function returns a vector of coefficient terms (NumFeat+1 terms; first term is intercept)
clf = linear_model.LinearRegression()
X = []
y = []
for i in array_f:
y.append(i[NumFeat])
X.append([1]+i[0:NumFeat])
clf.fit(X, y)
return clf.coef_
``` |
{
"source": "jirheee/CS492-Team-Project",
"score": 2
} |
#### File: ml/AlphaZero_Gomoku/train.py
```python
import json
import random
import datetime
import numpy as np
from tqdm import tqdm
from collections import defaultdict, deque
from game import Board, Game
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
from nn_architecture import PolicyValueNet
import os
import argparse
import time
import re
import threading
import copy
class Eval_Thread(threading.Thread):
def __init__(self, train_pipeline, curr_mcts, pure_mcts, round_num, winner_cnt):
threading.Thread.__init__(self)
self.train_pipeline = train_pipeline
self.game = copy.deepcopy(train_pipeline.game)
self.curr_mcts = copy.deepcopy(curr_mcts)
self.pure_mcts = copy.deepcopy(pure_mcts)
self.round =round_num
self.winner_cnt = winner_cnt
self.daemon=True
def run(self):
try:
winner = self.game.start_play(self.curr_mcts,
self.pure_mcts,
start_player=self.round % 2,
is_shown=0)
with winner_cnt_lock:
self.winner_cnt[winner]+=1
except KeyboardInterrupt:
print(f"Terminating round{self.round}",flush=True)
from torch.utils.tensorboard import SummaryWriter
class TrainPipeline():
def __init__(self, uuid = "0000", resume = False, force_cpu = False):
# load data from json file
self.uuid = uuid
self.io_dir = f"../models/{str(uuid)}/"
self.output_json_path = self.io_dir+f"output.json"
output_num=0
while os.path.exists(self.output_json_path):
output_num = output_num+1
self.output_json_path = self.io_dir+f"output{output_num}.json"
model_config = self.io_dir + f"model.json"
train_config = self.io_dir + f"train.json"
with open(model_config, encoding='utf-8') as f:
model_config = json.loads(f.read())
with open(train_config, encoding='utf-8') as f:
train_config = json.loads(f.read())
# params of the board and the game
self.board_width = model_config["board"]["board_width"]
self.board_height = model_config["board"]["board_height"]
self.n_in_row = model_config["board"]["n_in_row"]
self.board = Board(width=self.board_width,
height=self.board_height,
n_in_row=self.n_in_row)
self.game = Game(self.board)
# # training params
# self.lr = train_config["hyperparameters"]["lr"]
# self.buffer_size = train_config["hyperparameters"]["buffer_size"]
# self.batch_size = train_config["hyperparameters"]["batch_size"]
# self.epochs = train_config["hyperparameters"]["epochs"]
# self.eval_rounds = train_config["testparameters"]["eval_rounds"]
# self.model_playout = train_config["testparameters"]["model_playout"] # num of simulations for each move
# self.best_win_ratio = train_config["testparameters"]["best_win_ratio"] # Critical when resuming. All previous progress will be lost when not set.
# # num of simulations used for the pure mcts, which is used as
# # the opponent to evaluate the trained policy
# self.pure_mcts_playout_num = train_config["testparameters"]["mcts_playout"]
# self.check_freq = train_config["testparameters"]["check_freq"]
# training params
self.lr = train_config["lr"]
self.buffer_size = train_config["buffer_size"]
self.batch_size = train_config["batch_size"]
self.epochs = train_config["epochs"]
self.eval_rounds = 10
self.model_playout = 400 # num of simulations for each move
try:
self.best_win_ratio = train_config["testparameters"]["best_win_ratio"]
except KeyError:
self.best_win_ratio = 0.0
# num of simulations used for the pure mcts, which is used as
# the opponent to evaluate the trained policy
self.pure_mcts_playout_num = 1000
self.check_freq = int((self.epochs **0.5)*3)
self.data_buffer = deque(maxlen=self.buffer_size)
self.lr_multiplier = 1.0 # adaptively adjust the learning rate based on KL
self.temp = 1.0 # the temperature param
self.c_puct = 5
self.play_batch_size = 1
self.kl_targ = 0.02
model_file_path = f"../models/{str(self.uuid)}/curr.model"
if resume and os.path.exists(model_file_path):
print(f"Loading checkpoint from: {str(self.uuid)}",flush=True)
else:
print("Training new checkpoints.", end = " ")
if os.path.exists(model_file_path):
print("Overriding "+model_file_path, end = "",flush=True)
model_file_path = None
print(flush=True)
if force_cpu:
print("Forced to use CPU only",flush=True)
self.policy_value_net = PolicyValueNet(self.board_width, self.board_height, model_config["nn_type"], model_config["layers"], model_file = model_file_path,force_cpu=force_cpu)
self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.model_playout,
is_selfplay=1)
self.writer = SummaryWriter()
# {"train_progression":[
# [0epoch, 1time, 2loss, 3entropy, 4D_kl],
# ... ,
# ],
# "win_rates":[
# [epoch, win_rate]
# ]
# }
# initialize records
self.records = {"start":"","train_progression":[],"win_rates":[],"end":""}
json.dump(self.records,open(self.output_json_path,"w"))
self.step = 0
def get_equi_data(self, play_data):
"""augment the data set by rotation and flipping
play_data: [(state, mcts_prob, winner_z), ..., ...]
"""
extend_data = []
for state, mcts_porb, winner in play_data:
for i in [1, 2, 3, 4]:
# rotate counterclockwise
equi_state = np.array([np.rot90(s, i) for s in state])
equi_mcts_prob = np.rot90(np.flipud(
mcts_porb.reshape(self.board_height, self.board_width)), i)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
# flip horizontally
equi_state = np.array([np.fliplr(s) for s in equi_state])
equi_mcts_prob = np.fliplr(equi_mcts_prob)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
return extend_data
def collect_selfplay_data(self, n_games=1):
"""collect self-play data for training"""
for i in range(n_games):
winner, play_data = self.game.start_self_play(self.mcts_player,
temp=self.temp)
play_data = list(play_data)[:]
self.episode_len = len(play_data)
# augment the data
play_data = self.get_equi_data(play_data)
self.data_buffer.extend(play_data)
def policy_update(self, epoch):
"""update the policy-value net"""
mini_batch = random.sample(self.data_buffer, self.batch_size)
state_batch = np.array([data[0] for data in mini_batch])
mcts_probs_batch = np.array([data[1] for data in mini_batch])
winner_batch = np.array([data[2] for data in mini_batch])
old_probs, old_v = self.policy_value_net.policy_value(state_batch)
for i in range(5):
loss, entropy = self.policy_value_net.train_step(
state_batch,
mcts_probs_batch,
winner_batch,
self.lr*self.lr_multiplier)
new_probs, new_v = self.policy_value_net.policy_value(state_batch)
kl = np.mean(np.sum(old_probs * (
np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)),
axis=1)
)
if kl > self.kl_targ * 4: # early stopping if D_KL diverges badly
break
# adaptively adjust the learning rate
if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
self.lr_multiplier /= 1.5
elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
self.lr_multiplier *= 1.5
explained_var_old = (1 -
np.var(np.array(winner_batch) - old_v.flatten()) /
np.var(np.array(winner_batch)))
explained_var_new = (1 -
np.var(np.array(winner_batch) - new_v.flatten()) /
np.var(np.array(winner_batch)))
# print(("kl:{:.5f},"
# "lr_multiplier:{:.3f},"
# "loss:{},"
# "entropy:{},"
# "explained_var_old:{:.3f},"
# "explained_var_new:{:.3f}"
# ).format(kl,
# self.lr_multiplier,
# loss,
# entropy,
# explained_var_old,
# explained_var_new))
self.writer.add_scalar("KL Divergence", kl, self.step)
self.writer.add_scalar("Loss", loss, self.step)
self.writer.add_scalar("Entropy", entropy, self.step)
self.step += 1
return loss, entropy, kl
def policy_evaluate(self, n_games=10):
"""
Evaluate the trained policy by playing against the pure MCTS player
Note: this is only for monitoring the progress of training
"""
current_mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.model_playout)
pure_mcts_player = MCTS_Pure(c_puct=5,
n_playout=self.pure_mcts_playout_num)
win_cnt = defaultdict(int)
threads = []
for ii in range(n_games):
new_thread = Eval_Thread(self, current_mcts_player, pure_mcts_player, ii, win_cnt)
threads.append(new_thread)
new_thread.start()
for t in threads:
try:
t.join()
except KeyboardInterrupt:
print("Ignoring a thread",flush=True)
raise KeyboardInterrupt
win_ratio = 1.0*(win_cnt[1] + 0.5*win_cnt[-1]) / n_games
# print("num_playouts:{}, win: {}, lose: {}, tie:{}".format(
# n_games, win_cnt[1], win_cnt[2], win_cnt[-1]))
return win_ratio
def run(self):
"""run the training pipeline"""
try:
timestamp = re.sub(r'[^\w\-_\. ]', '_', datetime.datetime.now().__str__()[2:-7])
self.records["start"]=timestamp
json.dump(self.records,open(self.output_json_path,"w"))
start = time.time()
# Save at the start of training
self.policy_value_net.save_model(f"../models/"
f"{self.uuid}/"
f"curr.model")
self.policy_value_net.save_model(f"../models/"
f"{self.uuid}/"
f"best.model")
for ii in range(self.epochs):
self.collect_selfplay_data(self.play_batch_size)
if len(self.data_buffer) > self.batch_size:
loss, entropy, kl = self.policy_update(ii)
elapsed_time = float(round(time.time()-start,2))
dump = {"epoch": ii, "elapsed_time": float(elapsed_time), "loss": float(loss), "entropy": float(entropy), "kl": float(kl)}
print(json.dumps(dump),flush=True)
self.records["train_progression"].append([int(ii), # epoch
elapsed_time, # elapsed time
float(round(loss,5)),
float(round(entropy,5)),
float(round(float(kl),5))])
json.dump(self.records,open(self.output_json_path,"w"))
# check the performance of the current model,
# and save the model params
if (ii+1) % self.check_freq == 0:
win_ratio = self.policy_evaluate(self.eval_rounds)
d = {"epoch": ii, "win_ratio": float(win_ratio)}
print(json.dumps(d),flush=True)
self.records["win_rates"].append([ii,float(round(win_ratio,2))])
self.policy_value_net.save_model(f"../models/"
f"{self.uuid}/"
f"curr.model")
if win_ratio > self.best_win_ratio:
self.best_win_ratio = win_ratio
self.policy_value_net.save_model(f"../models/"
f"{self.uuid}/"
f"best.model")
# write best_win_ratio
train_config_path = self.io_dir + f"train.json"
with open(train_config_path, encoding='utf-8') as f:
train_config = json.loads(f.read())
train_config["testparameters"]={}
train_config["testparameters"]["best_win_ratio"]=win_ratio
json.dump(train_config,open(train_config_path, "w",encoding='utf-8'))
if (self.best_win_ratio == 1.0 and
self.pure_mcts_playout_num < 5000):
self.pure_mcts_playout_num += 1000
self.best_win_ratio = 0.0
json.dump(self.records,open(self.output_json_path,"w"))
self.policy_evaluate(self.eval_rounds)
# Save at the end of training
self.policy_value_net.save_model(f"../models/"
f"{self.uuid}/"
f"curr.model")
self.writer.close()
except KeyboardInterrupt:
print('\n\rquit',flush=True)
# Save at the end of training
self.policy_value_net.save_model(f"../models/"
f"{self.uuid}/"
f"curr.model")
timestamp = re.sub(r'[^\w\-_\. ]', '_', datetime.datetime.now().__str__()[2:-7])
self.records["end"] = timestamp
json.dump(self.records,open(self.output_json_path,"w"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-u","--uuid", help="UUID is used for reading model parameters and saving, loading models")
parser.add_argument("-r","--resume", action = "store_true" , help="Resume from saved checkpoint", default=False)
parser.add_argument("-c","--cpu", action="store_true",help="Force to run on CPU, without cuda", default=False)
args = parser.parse_args()
winner_cnt_lock = threading.Lock()
test_uuid = args.uuid
test_resume = args.resume
test_force_cpu = args.cpu
training_pipeline = TrainPipeline(test_uuid, test_resume, test_force_cpu)
training_pipeline.run()
exit()
``` |
{
"source": "jirheee/Khan-Academy-Comment-Crawler",
"score": 2
} |
#### File: Khan-Academy-Comment-Crawler/src/crawl.py
```python
from operator import le
import os
from pprint import pp
from pydoc_data.topics import topics
from time import sleep, time
from typing import Dict, List, Set, TypedDict
import selenium
from selenium import webdriver # type: ignore
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
import json
from sympy import content
import util
from glob import glob
ka_base_url = "https://www.khanacademy.org/"
def get_first_order_topics(driver: webdriver.Chrome)->Set[str]:
first_order_topic_links = set()
driver.find_element_by_xpath('//button[@data-test-id="learn-menu-dropdown"]').click()
topic_elements = driver.find_elements_by_xpath('//ul[@data-test-id="learn-menu"]//a')
p = re.compile(ka_base_url+'([a-z]+|-{1})+')
for topic in topic_elements:
href = topic.get_attribute("href")
m = p.match(href)
if m and m.group() == href:
first_order_topic_links.add(href)
return list(first_order_topic_links.difference({
"https://www.khanacademy.org/kids",
"https://www.khanacademy.org/sat",
"https://www.khanacademy.org/college-careers-more",
}))
class SuborderTopicDict(TypedDict):
href: str
unit_hrefs: List[str]
## TODO: Refactor variable names
def get_suborder_topic_dict(driver: webdriver.Chrome, first_order_topic: str) -> Dict[str, SuborderTopicDict]:
suborder_topic_dict: Dict[str, SuborderTopicDict] = {}
print(first_order_topic)
driver.get(first_order_topic)
lecture_tags = driver.find_elements_by_xpath('//div[@data-slug]')
lesson_href_re = re.compile(first_order_topic+"(/([a-z0-9]+|-|:)+){2}")
for lecture_tag in lecture_tags:
lecture_title_element = lecture_tag.find_element_by_xpath('.//h2//a')
lesson_elements = lecture_tag.find_elements_by_xpath('.//a')
unit_hrefs = []
for lesson_element in lesson_elements:
lesson_href = lesson_element.get_attribute("href")
lesson_href_match = lesson_href_re.match(lesson_href)
if lesson_href_match:
unit_hrefs.append(lesson_href_match.group())
suborder_topic_dict[lecture_title_element.text] = {"href": lecture_title_element.get_attribute("href"), "unit_hrefs": unit_hrefs}
return suborder_topic_dict
def get_lecture_links(driver: webdriver.Chrome, suborder_topic_dict: SuborderTopicDict)->List[Dict[str, str]]:
units: List[Dict[str, str]] = []
for unit_href in suborder_topic_dict["unit_hrefs"]:
driver.get(unit_href)
lecture_links = {}
lesson_cards = driver.find_elements_by_xpath('//div[@data-test-id="lesson-card"]')
for lesson_card in lesson_cards:
lesson_card_link = lesson_card.find_element_by_xpath('.//a[@data-test-id="lesson-card-link"]')
lecture_links[lesson_card_link.text] = lesson_card_link.get_attribute("href")
units.append(lecture_links)
return units
def get_lectures(driver: webdriver.Chrome, first_order_links: Set[str]):
for first_order_link in first_order_links:
topic = first_order_link.split("/")[-1]
first_order_path = f"./data/lectures/{topic}"
os.makedirs(first_order_path)
suborder_topic_dict = get_suborder_topic_dict(driver, first_order_link)
for suborder_topic in suborder_topic_dict:
print("suborder topic", suborder_topic)
suborder_path = f"{first_order_path}/{util.string_to_snake_case_filename(suborder_topic)}.json"
units = get_lecture_links(driver, suborder_topic_dict[suborder_topic])
util.write_file(suborder_path, json.dumps(units, indent=4))
def get_content_links(driver: webdriver.Chrome, lecture_link: str):
articles = []
videos = []
driver.get(lecture_link)
content_elements = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, '//div[@aria-label="lesson table of contents"]//li[@role="presentation"]/div/a[@aria-current]')))
href_set = set()
for content_element in content_elements:
href = content_element.get_attribute("href")
if href in href_set:
continue
href_set.add(href)
type_element = content_element.find_element_by_xpath('.//span[@aria-label]')
content_type = type_element.get_attribute("aria-label")
title_element = content_element.find_element_by_xpath('.//div[@title]')
content_title = title_element.get_attribute("title")
content_dict = {"title": content_title, "href": href}
if content_type == "Video":
videos.append(content_dict)
elif content_type == "Article":
articles.append(content_dict)
else:
print(f"this type of content has no comment! {content_title} {content_type}")
return articles, videos
def get_article_video_links(driver: webdriver.Chrome):
topic_directories = glob("./data/lectures/*")
for i, topic_directory in enumerate(topic_directories):
print(f"Topic {i+1}/{len(topic_directories)}")
lecture_jsons = glob(f"{topic_directory}/*.json")
content_topic_dir_path = topic_directory.replace("lectures", "contents")
os.makedirs(content_topic_dir_path, exist_ok=True)
for j, lecture_json in enumerate(lecture_jsons):
print(f"|--Lecture {j+1}/{len(lecture_jsons)}")
lecture_content_dict = []
json_name = lecture_json.split("/")[-1]
lecture_json_path = f"{content_topic_dir_path}/{json_name}"
if os.path.isfile(lecture_json_path):
print("** This lecture is already crawled **")
continue
with open(lecture_json, "r") as f:
loaded_json = "".join([line.strip() for line in f.readlines()])
lessons = json.loads(loaded_json)
for k, lesson_dict in enumerate(lessons):
print(f"|----Lesson {k+1}/{len(lessons)}")
for lesson_name in lesson_dict:
articles, videos = get_content_links(driver, lesson_dict[lesson_name])
lecture_content_dict.append({"articles": articles, "videos": videos})
util.write_file(lecture_json_path, json.dumps(lecture_content_dict,indent=4))
class LessonContentDict():
def __init__(self, lecture_path, lesson_dict, lesson_index) -> None:
self.articles = lesson_dict["articles"]
self.videos = lesson_dict["videos"]
self.lesson_path = f"{lecture_path.replace('/contents/', '/comments/')}/{lesson_index}.json"
def crawl_article_comments(self):
comment_dicts = []
num_comments = 0
try:
for article_dict in self.articles:
video_title = article_dict["title"]
video_href = article_dict["href"]
self.driver.get(video_href)
is_show_more_exist = True
while is_show_more_exist:
try:
show_more_button = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[@id="ka-uid-discussiontabbedpanel-0--tabbedpanel-content"]//button[@class="_1f0fvyce"]')))
show_more_button.click()
except:
print("All comments are revealed")
is_show_more_exist = False
comment_elements = self.driver.find_elements_by_xpath('//div[@data-test-id="discussion-post"]//span[@class="_1glfes6x"]/span')
comments = [comment_element.text for comment_element in comment_elements]
num_comments += len(comments)
comment_dicts.append({"title": video_title, "comments": comments})
except:
return comment_dicts, num_comments
return comment_dicts, num_comments
def crawl_video_comments(self):
comment_dicts = []
num_comments = 0
try:
for video_dict in self.videos:
video_title = video_dict["title"]
video_href = video_dict["href"]
self.driver.get(video_href)
is_show_more_exist = True
while is_show_more_exist:
try:
show_more_button = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[@id="ka-uid-discussiontabbedpanel-0--tabbedpanel-content"]//button[@class="_1f0fvyce"]')))
show_more_button.click()
except:
print("All comments are revealed")
is_show_more_exist = False
comment_elements = self.driver.find_elements_by_xpath('//div[@data-test-id="discussion-post"]//span[@class="_1glfes6x"]/span')
comments = [comment_element.text for comment_element in comment_elements]
num_comments += len(comments)
comment_dicts.append({"title": video_title, "comments": comments})
except:
return comment_dicts, num_comments
return comment_dicts, num_comments
def crawl_comments(self):
print(f"####### Start Crawling...: {self.lesson_path} #######")
if os.path.isfile(self.lesson_path):
print(f"{self.lesson_path} is already crawled")
return 0
self.driver = webdriver.Chrome(executable_path=chromedriver_path)
print(f"{len(self.videos)} Videos / {len(self.articles)} Articles")
print(f"Lesson Path: {self.lesson_path}")
os.makedirs("/".join(self.lesson_path.split("/")[:-1]), exist_ok=True)
article_comments, article_comments_num = self.crawl_article_comments()
video_comments, video_comments_num = self.crawl_video_comments()
util.write_file(self.lesson_path, json.dumps({"articles": article_comments, "videos":video_comments}, indent=2))
print(f"### Crawled {article_comments_num} article comments {video_comments_num} video comments \ntotal {article_comments_num+video_comments_num} comments")
try:
self.driver.quit()
except:
pass
return article_comments_num+video_comments_num
from multiprocessing import Pool
chromedriver_path = "../chromedriver" if "src" == os.getcwd().split("/")[-1] else "./chromedriver"
def get_lesson_dict_comment(arg):
lesson_dict, print_str, lesson_index, path = arg
print(print_str)
crawled_comments = LessonContentDict(path, lesson_dict, lesson_index).crawl_comments()
print(f"|----Crawled {crawled_comments} comments")
return crawled_comments
def get_comments():
topic_dirs = glob("./data/contents/*")
total_comment_num = 0
for topic_index, topic_dir in enumerate(topic_dirs):
lecture_jsons = glob(f"{topic_dir}/*")
for lecture_index, lecture_json in enumerate(lecture_jsons):
with open(lecture_json, "r") as f:
lesson_array = json.loads("".join([line.strip() for line in f.readlines()]))
pool = Pool(processes=5)
def get_print_str(lesson_index):
return f"|--Topic {topic_index+1}/{len(topic_dirs)} Lecture {lecture_index+1}/{len(lecture_jsons)} Lesson {lesson_index+1}/{len(lesson_array)}"
args = [(lesson_dict, get_print_str(lesson_index), lesson_index, lecture_json.replace(".json", ""),) for lesson_index, lesson_dict in enumerate(lesson_array)]
results = pool.map_async(get_lesson_dict_comment, args)
results = results.get()
total_comment_num += sum(results)
print(f"Finished Lesture json {lecture_json} Total comments: {total_comment_num}")
def main():
chromedriver_path = "../chromedriver" if "src" == os.getcwd().split("/")[-1] else "./chromedriver"
# driver = webdriver.Chrome(executable_path=chromedriver_path)
# driver.maximize_window()
# driver.get(ka_base_url)
sleep(1)
"""
{
'https://www.khanacademy.org/test-prep',
'https://www.khanacademy.org/humanities',
'https://www.khanacademy.org/economics-finance-domain',
'https://www.khanacademy.org/science',
'https://www.khanacademy.org/college-careers-more',
'https://www.khanacademy.org/computing',
'https://www.khanacademy.org/math',
'https://www.khanacademy.org/ela'
}
"""
# first_order_links = get_first_order_topics(driver)
# get_lectures(driver, first_order_links)
# get_article_video_links(driver)
get_comments()
sleep(1)
# driver.quit()
if __name__ == "__main__":
main()
``` |
{
"source": "jiria/akri",
"score": 2
} |
#### File: akri/test/run-end-to-end.py
```python
import shared_test_code
import json, os, time, yaml
from kubernetes import client, config
from kubernetes.client.rest import ApiException
def main():
print("End-to-end test main start")
# If this is a PUSH, the test needs to wait for the new containers to be
# built/pushed. In this case, the workflow will set /tmp/sleep_duration.txt to
# the number of seconds to sleep.
# If this is a MANUALLY triggerd or a PULL-REQUEST, no new containers will
# be built/pushed, the workflows will not set /tmp/sleep_duration.txt and
# this test will execute immediately.
shared_test_code.initial_sleep()
# Update Helm and install this version's chart
os.system("helm repo update")
# Get version of akri to test
test_version = shared_test_code.get_test_version()
print("Testing version: {}".format(test_version))
shared_test_code.major_version = "v" + test_version.split(".")[0]
print("Testing major version: {}".format(shared_test_code.major_version))
print("Installing Akri Helm chart: {}".format(test_version))
helm_chart_name = shared_test_code.get_helm_chart_name()
print("Get Akri Helm chart: {}".format(helm_chart_name))
cri_args = shared_test_code.get_cri_args()
print("Providing Akri Helm chart with CRI args: {}".format(cri_args))
helm_install_command = "helm install akri akri-helm-charts/{} --version {} --set debugEcho.enabled=true --set debugEcho.name={} --set debugEcho.shared=false --set agent.allowDebugEcho=true {}".format(helm_chart_name, test_version, shared_test_code.DEBUG_ECHO_NAME, cri_args)
print("Helm command: {}".format(helm_install_command))
os.system(helm_install_command)
try:
res = do_test()
except Exception as e:
print(e)
res = False
finally:
# Best effort cleanup work
try:
# Save Agent and controller logs
shared_test_code.save_agent_and_controller_logs()
finally:
# Delete akri and check that controller and Agent pods deleted
os.system("helm delete akri")
if res:
# Only test cleanup if the test has succeeded up to now
if not shared_test_code.check_akri_state(0, 0, 0, 0, 0, 0):
print("Akri not running in expected state after helm delete")
raise RuntimeError("Scenario Failed")
if not res:
raise RuntimeError("Scenario Failed")
def do_test():
kubeconfig_path = shared_test_code.get_kubeconfig_path()
print("Loading k8s config: {}".format(kubeconfig_path))
config.load_kube_config(config_file=kubeconfig_path)
# Get kubectl command
kubectl_cmd = shared_test_code.get_kubectl_command()
# Ensure Helm Akri installation applied CRDs and set up agent and controller
print("Checking for CRDs")
if not shared_test_code.crds_applied():
print("CRDs not applied by helm chart")
return False
print("Checking for initial Akri state")
if not shared_test_code.check_akri_state(1, 1, 2, 2, 1, 2):
print("Akri not running in expected state")
os.system('sudo {} get pods,services,akric,akrii --show-labels'.format(kubectl_cmd))
return False
# Do offline scenario
print("Writing to Agent pod {} that device offline".format(shared_test_code.agent_pod_name))
os.system('sudo {} exec -i {} -- /bin/bash -c "echo "OFFLINE" > /tmp/debug-echo-availability.txt"'.format(kubectl_cmd, shared_test_code.agent_pod_name))
print("Checking Akri state after taking device offline")
if not shared_test_code.check_akri_state(1, 1, 0, 0, 0, 0):
print("Akri not running in expected state after taking device offline")
os.system('sudo {} get pods,services,akric,akrii --show-labels'.format(kubectl_cmd))
return False
# Do back online scenario
print("Writing to Agent pod {} that device online".format(shared_test_code.agent_pod_name))
os.system('sudo {} exec -i {} -- /bin/bash -c "echo "ONLINE" > /tmp/debug-echo-availability.txt"'.format(kubectl_cmd, shared_test_code.agent_pod_name))
print("Checking Akri state after bringing device back online")
if not shared_test_code.check_akri_state(1, 1, 2, 2, 1, 2):
print("Akri not running in expected state after bringing device back online")
os.system('sudo {} get pods,services,akric,akrii --show-labels'.format(kubectl_cmd))
return False
# Check Akri slot reconiliation logs for success
print("Check logs for Agent slot-reconciliation for pod {}".format(shared_test_code.agent_pod_name))
result = os.system('sudo {} logs {} | grep "get_node_slots - crictl called successfully" | wc -l | grep -v 0'.format(kubectl_cmd, shared_test_code.agent_pod_name))
if result != 0:
print("Akri failed to successfully connect to crictl via the CRI socket")
return False
# Do cleanup scenario
print("Deleting Akri configuration: {}".format(shared_test_code.DEBUG_ECHO_NAME))
os.system("sudo {} delete akric {}".format(kubectl_cmd, shared_test_code.DEBUG_ECHO_NAME))
print("Checking Akri state after deleting configuration")
if not shared_test_code.check_akri_state(1, 1, 0, 0, 0, 0):
print("Akri not running in expected state after deleting configuration")
os.system('sudo {} get pods,services,akric,akrii --show-labels'.format(kubectl_cmd))
return False
return True
main()
``` |
{
"source": "jirian/text_summarizer_czech",
"score": 3
} |
#### File: jirian/text_summarizer_czech/app.py
```python
from flask import Flask, request, render_template, jsonify
import summarizer_machovec_modified
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
text = request.form.get('text') or ''
summary = ''
if text:
print('Summarizing...')
summary = summarizer_machovec_modified.summarize(text)
print(f'\n======summary======\n{summary}')
return jsonify(summary)
if __name__ == '__main__':
app.run()
```
#### File: RDRPOSTagger_python_3/pSCRDRtagger/RDRPOSTagger.py
```python
import os
import sys
os.chdir("RDRPOSTagger_python_3")
sys.setrecursionlimit(100000)
sys.path.append(os.path.abspath(""))
os.chdir("./pSCRDRtagger")
from multiprocessing import Pool
from InitialTagger.InitialTagger import initializeCorpus, initializeSentence
from SCRDRlearner.Object import FWObject
from SCRDRlearner.SCRDRTree import SCRDRTree
from SCRDRlearner.SCRDRTreeLearner import SCRDRTreeLearner
from Utility.Config import NUMBER_OF_PROCESSES, THRESHOLD
from Utility.Utils import getWordTag, getRawText, readDictionary
from Utility.LexiconCreator import createLexicon
def unwrap_self_RDRPOSTagger(arg, **kwarg):
return RDRPOSTagger.tagRawSentence(*arg, **kwarg)
class RDRPOSTagger(SCRDRTree):
"""
RDRPOSTagger for a particular language
"""
def __init__(self):
self.root = None
# added by Joe, 2018-02-25
def get_word_tag(self, word):
return getWordTag(word)
def tagRawSentence(self, DICT, rawLine):
line = initializeSentence(DICT, rawLine)
sen = []
wordTags = line.split()
for i in range(len(wordTags)):
fwObject = FWObject.getFWObject(wordTags, i)
word, tag = getWordTag(wordTags[i])
node = self.findFiredNode(fwObject)
if node.depth > 0:
sen.append(word + "/" + node.conclusion)
else: # Fired at root, return initialized tag
sen.append(word + "/" + tag)
return " ".join(sen)
def tagRawCorpus(self, DICT, rawCorpusPath):
lines = open(rawCorpusPath, "r").readlines()
#Change the value of NUMBER_OF_PROCESSES to obtain faster tagging process!
pool = Pool(processes = NUMBER_OF_PROCESSES)
taggedLines = pool.map(unwrap_self_RDRPOSTagger, zip([self] * len(lines), [DICT] * len(lines), lines))
outW = open(rawCorpusPath + ".TAGGED", "w")
for line in taggedLines:
outW.write(line + "\n")
outW.close()
print (("\nOutput file:", rawCorpusPath + ".TAGGED"))
def printHelp():
print (("\n===== Usage =====" ))
print ('\n#1: To train RDRPOSTagger on a gold standard training corpus:')
print ('\npython RDRPOSTagger.py train PATH-TO-GOLD-STANDARD-TRAINING-CORPUS')
print ('\nExample: python RDRPOSTagger.py train ../data/goldTrain')
print ('\n#2: To use the trained model for POS tagging on a raw text corpus:')
print ('\npython RDRPOSTagger.py tag PATH-TO-TRAINED-MODEL PATH-TO-LEXICON PATH-TO-RAW-TEXT-CORPUS')
print ('\nExample: python RDRPOSTagger.py tag ../data/goldTrain.RDR ../data/goldTrain.DICT ../data/rawTest')
print ('\n#3: Find the full usage at http://rdrpostagger.sourceforge.net !')
def run(args = sys.argv[1:]):
if (len(args) == 0):
printHelp()
elif args[0].lower() == "train":
try:
print ("\n====== Start ======" )
print ("\nGenerate from the gold standard training corpus a lexicon", args[1] + ".DICT")
createLexicon(args[1], 'full')
createLexicon(args[1], 'short')
print ("\nExtract from the gold standard training corpus a raw text corpus", args[1] + ".RAW")
getRawText(args[1], args[1] + ".RAW")
print ("\nPerform initially POS tagging on the raw text corpus, to generate", args[1] + ".INIT")
DICT = readDictionary(args[1] + ".sDict")
initializeCorpus(DICT, args[1] + ".RAW", args[1] + ".INIT")
print ('\nLearn a tree model of rules for POS tagging from %s and %s' % (args[1], args[1] + ".INIT"))
rdrTree = SCRDRTreeLearner(THRESHOLD[0], THRESHOLD[1])
rdrTree.learnRDRTree(args[1] + ".INIT", args[1])
print ("\nWrite the learned tree model to file ", args[1] + ".RDR")
rdrTree.writeToFile(args[1] + ".RDR")
print ('\nDone!')
os.remove(args[1] + ".INIT")
os.remove(args[1] + ".RAW")
os.remove(args[1] + ".sDict")
except Exception as e:
print ("\nERROR ==> ", e)
printHelp()
raise e
elif args[0].lower() == "tag":
try:
r = RDRPOSTagger()
print ("\n=> Read a POS tagging model from", args[1])
r.constructSCRDRtreeFromRDRfile(args[1])
print ("\n=> Read a lexicon from", args[2])
DICT = readDictionary(args[2])
print ("\n=> Perform POS tagging on", args[3])
r.tagRawCorpus(DICT, args[3])
except Exception as e:
print ("\nERROR ==> ", e)
printHelp()
raise e
else:
printHelp()
if __name__ == "__main__":
run()
pass
```
#### File: jirian/text_summarizer_czech/separator.py
```python
import os
def separate(input_string):
file_not_found = False
os.chdir(os.path.dirname(os.path.realpath(__file__)))
message = ""
# abbreviations set - common czech abbreviations:
try:
with open("separator_data/abbreviations.txt", 'r') as abbreviations_file:
abbreviations = frozenset(line.strip() for line in abbreviations_file)
except IOError:
message += "Soubor abbreviations.txt nenalezen"
file_not_found = True
# separators set - symbols that separate sentences:
try:
with open("separator_data/separators.txt", 'r') as separators_file:
separators = frozenset(line.strip() for line in separators_file)
except IOError:
message += "; Soubor separators.txt nenalezen"
file_not_found = True
# starters set - symbols that can appear at the beginning of a sentence:
try:
with open("separator_data/starters.txt", 'r') as starters_file:
starters = frozenset(line.strip() for line in starters_file)
except IOError:
message += "; Soubor starters.txt nenalezen"
file_not_found = True
# terminators set - symbols that can appear at the end of a sentence (after a separator)
try:
with open("separator_data/terminators.txt", 'r') as terminators_file:
terminators = frozenset(line.strip() for line in terminators_file)
except IOError:
message += "; Soubor terminators.txt nenalezen"
file_not_found = True
if file_not_found:
message = message.strip(";").strip()
raise IOError(message)
input_string = input_string.strip()
sentences = list()
begin = 0
end = 0
help_begin = 0
help_end = 0
sep_pos = 0
help_string = ""
make_sentence = False
upper = False
# Big while-cycle reading the whole input_string char after char and performing all the magic
while end < len(input_string):
# New line - end of a paragraph
if input_string[end] == '\n':
sentence = input_string[begin:end].strip()
if len(sentence) > 0:
sentences.append(sentence)
begin = end+1
# The last word of the paragraph can be a sign (one word with small letter at the beginning),
# this must be checked, but only if the sentence was really added (i.e. if it's length is bigger than 0)
if len(sentence) > 0:
help_begin = end-1
help_end = end-1
# Moving help_end to the end of the paragraph text
while input_string[help_end].isspace():
help_end -= 1
# Text of the paragraph is not finished by a separator, there can be a sign
if not input_string[help_end] in separators:
help_begin = help_end
# Moving help_begin before the beginning of the last word before the new line (possible sign)
while help_begin >= 0 and not input_string[help_begin].isspace():
help_begin -= 1
sign = input_string[help_begin+1:help_end+1] #Last word of the paragraph - possible sign
if sign[0].islower(): #First char of the possible sign is lower - it was not separated as a sentence before
while (input_string[help_begin].isspace()): #Moving help_begin to the end of the text before the possible sign
help_begin -= 1
if input_string[help_begin] in separators: #There is a separator before the possible sign - it really is a sign and must be separated
sentences.pop()
sentence = sentence[0:(len(sentence)-len(sign))].strip()
sentences.append(sentence)
sentences.append(sign)
elif input_string[end] in separators: #Sentence separating char (separator) was detected, it depends what follows in the text
sep_pos = end
while (end < len(input_string)-1 and
input_string[end+1] in terminators): #Skipping terminators
end += 1
help_end = end+1
make_sentence = False
while (help_end < len(input_string) and
(input_string[help_end].isspace() or (input_string[help_end] in starters)) and
input_string[help_end] != '\n'):
help_end += 1 #Moves help_end to the first 'sentence-begin-deciding' char behind the separator (starters act like whitespaces, but they are not trimmed when at the beginning of a sentence)
if help_end >= len(input_string): #There are only whitespaces or starters mesh after the separator - end of the text
sentence = input_string[begin:end+1].strip()
if len(sentence) > 0:
sentences.append(sentence)
end = help_end-1
elif input_string[help_end] == '\n': #There is a new line after the separator - will be solved in next round
end = help_end-1
elif (input_string[help_end].isupper() or
input_string[help_end].isdigit()): #There is an upper char or digit after the separator
upper = input_string[help_end].isupper()
if input_string[end] != '.': #The separator is not a dot, it is the end of the sentence
make_sentence = True
else: #The separator is a dot, it can be the end of an abbreviation or a part of an order number
help_begin = sep_pos-1
help_end = sep_pos-1
while (input_string[help_end].isspace()): #Skipping whitespaces before the dot
help_begin -= 1
help_end -=1
while (help_begin >= 0 and
not input_string[help_begin].isspace() and
input_string[help_begin] != '.'): #Moving help_begin to the beginning of the word before the dot
help_begin -= 1
help_begin += 1
#The word before the dot is to be extracted, it can start with any of the starters and these must be ommited
while (help_begin < help_end and input_string[help_begin] in starters):
help_begin += 1
help_string = input_string[help_begin:help_end+1] #The word before the dot
if ((len(help_string) != 1 or help_string.isdigit() or help_string in terminators) and
not help_string.lower() in abbreviations): #The word before the dot is not an abbreviation
if upper: #There is an upper char after the dot, all prerequisities to make a sentence are satisfied
make_sentence = True
elif (len(help_string) > 0 and
not help_string[len(help_string)-1].isdigit()): #There is a digit after the dot, the word before the dot cannot end with a digit to make a sentence
make_sentence = True
if make_sentence:
sentence = input_string[begin:end+1].strip()
if len(sentence) > 0:
sentences.append(sentence)
begin = end+1
end += 1 #End of the big while-cycle
help_end = end-1 #When the whole text is not ended by a separator, last sentence is not included. This must be solved separately.
while (help_end >= 0 and
(input_string[help_end] in terminators or input_string[help_end].isspace())):
help_end -= 1
if help_end >= 0 and not input_string[help_end] in separators:
sentence = input_string[begin:end].strip()
if len(sentence) > 0:
sentences.append(sentence)
return sentences
``` |
{
"source": "jiribrejcha/BakeBit",
"score": 2
} |
#### File: Software/Python/bakebit_nanohat_oled.py
```python
import bakebit_128_64_oled as oled
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import time
import sys
import subprocess
import signal
import os
import socket
import types
import re
from textwrap import wrap
__version__ = "0.32 (beta)"
__author__ = "<EMAIL>"
############################
# Set display size
############################
width=128
height=64
############################
# Set page sleep control
############################
pageSleep=300
pageSleepCountdown=pageSleep
####################################
# Initialize the SEEED OLED display
####################################
oled.init()
#Set display to normal mode (i.e non-inverse mode)
oled.setNormalDisplay()
oled.setHorizontalMode()
#######################################
# Initialize drawing & fonts variables
#######################################
# This variable is shared between activities and is set to True if a
# drawing action in already if progress (e.g. by another activity). An activity
# happens during each cycle of the main while loop or when a button is pressed
# (This does not appear to be threading or process spawning)
drawing_in_progress = False
#####################################
# Create global draw/image objects
#####################################
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
reboot_image = Image.open('reboot.png').convert('1')
#######################
# Define display fonts
#######################
smartFont = ImageFont.truetype('DejaVuSansMono-Bold.ttf', 10);
font11 = ImageFont.truetype('DejaVuSansMono.ttf', 11);
font12 = ImageFont.truetype('DejaVuSansMono.ttf', 12);
fontb12 = ImageFont.truetype('DejaVuSansMono-Bold.ttf', 12);
font14 = ImageFont.truetype('DejaVuSansMono.ttf', 14);
fontb14 = ImageFont.truetype('DejaVuSansMono-Bold.ttf', 14);
fontb24 = ImageFont.truetype('DejaVuSansMono-Bold.ttf', 24);
#######################################
# Initialize various global variables
#######################################
shutdown_in_progress = False # True when shutdown or reboot started
screen_cleared = False # True when display cleared (e.g. screen save)
current_menu_location = [0] # Pointer to current location in menu structure
option_selected = 0 # Content of currently selected menu level
sig_fired = False # Set to True when button handler fired
home_page_name = "Home" # Display name for top level menu
current_mode = "classic" # Currently selected mode (e.g. wconsole/classic)
nav_bar_top = 55 # top pixel of nav bar
current_scroll_selection = 0 # where we currently are in scrolling table
table_list_length = 0 # Total length of currently displayed table
result_cache = False # used to cache results when paging info
display_state = 'page' # current display state: 'page' or 'menu'
start_up = True # True if in initial (home page) start-up state
#######################################
# Initialize file variables
#######################################
# Mode changer scripts
wconsole_mode_file = '/etc/wconsole/wconsole.on'
hotspot_mode_file = '/etc/wlanpihotspot/hotspot.on'
wiperf_mode_file = '/home/wlanpi/wiperf/wiperf.on'
wconsole_switcher_file = '/etc/wconsole/wconsole_switcher'
hotspot_switcher_file = '/etc/wlanpihotspot/hotspot_switcher'
wiperf_switcher_file = '/home/wlanpi/wiperf/wiperf_switcher'
# helper scripts to launch misc processes
kismet_ctl_file = '/home/wlanpi/NanoHatOLED/BakeBit/Software/Python/scripts/kismet_ctl'
bettercap_ctl_file = '/home/wlanpi/NanoHatOLED/BakeBit/Software/Python/scripts/bettercap_ctl'
profiler_ctl_file = '/home/wlanpi/NanoHatOLED/BakeBit/Software/Python/scripts/profiler_ctl'
# cdp and lldp networkinfo data file names
lldpneigh_file = '/tmp/lldpneigh.txt'
cdpneigh_file = '/tmp/cdpneigh.txt'
ipconfig_file = '/home/wlanpi/NanoHatOLED/BakeBit/Software/Python/scripts/networkinfo/ipconfig.sh 2>/dev/null'
reachability_file = '/home/wlanpi/NanoHatOLED/BakeBit/Software/Python/scripts/networkinfo/reachability.sh'
publicip_cmd = '/home/wlanpi/NanoHatOLED/BakeBit/Software/Python/scripts/networkinfo/publicip.sh'
# Linux programs
ifconfig_file = '/sbin/ifconfig'
iw_file = '/usr/sbin/iw'
ufw_file = '/usr/sbin/ufw'
ethtool_file = '/sbin/ethtool'
# check our current mode
if os.path.isfile(wconsole_mode_file):
current_mode = 'wconsole'
if os.path.isfile(hotspot_mode_file):
current_mode = 'hotspot'
if os.path.isfile(wiperf_mode_file):
current_mode = 'wiperf'
# get & the current version of WLANPi image
ver_cmd = "grep \"WLAN Pi v\" /var/www/html/index.html | sed \"s/<[^>]\+>//g\""
try:
wlanpi_ver = subprocess.check_output(ver_cmd, shell = True ).strip()
except:
wlanpi_ver = "unknown"
# get hostname
try:
hostname = subprocess.check_output('hostname', shell = True)
except:
hostname = "unknown"
#############################
# Get current IP for display
#############################
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
##########################
# Draw navigation buttons
##########################
def nav_button(label, position):
global draw
global nav_bar_top
draw.text((position,nav_bar_top),label,font=smartFont,fill=255)
return
def back_button(label="Back"):
nav_button(label, 100)
return
def next_button(label="Next"):
nav_button(label,50)
return
def down_button(label="Down"):
nav_button(label, 0)
return
##############################################
# Page & menu functions
##############################################
def clear_display():
'''
Paint display black prior to painting new page
'''
global width
global height
global draw
# Draw a black filled box to clear the display.
draw.rectangle((0,0,width,height), outline=0, fill=0)
def display_simple_table(item_list, back_button_req=0, title='', font="small"):
'''
This function takes a list and paints each entry as a line on a
page. It also displays appropriate up/down scroll buttons if the
entries passed exceed a page length (one line at a time)
'''
global drawing_in_progress
global draw
global oled
global current_scroll_selection
global table_list_length
global display_state
drawing_in_progress = True
display_state = 'page'
# Clear display prior to painting new item
clear_display()
y = 0
x = 0
font_offset = 0
if font == "small":
font_type = smartFont
font_size = 11
item_length_max = 20
table_display_max = 5
elif font == "medium":
font_type = font11
font_size = 11
item_length_max = 17
table_display_max = 4
# write title if present
if title != '':
draw.text((x, y + font_offset), title.center(item_length_max, " "), font=font_type, fill=255)
font_offset += font_size
table_display_max -=1
previous_table_list_length = table_list_length
table_list_length = len(item_list)
# if table length changes, reset current scroll selection
# e.g. when showing lldp table info and eth cable
# pulled so list size changes
if table_list_length != previous_table_list_length:
current_scroll_selection = 0
# if we're going to scroll of the end of the list, adjust pointer
if current_scroll_selection + table_display_max > table_list_length:
current_scroll_selection -=1
# modify list to display if scrolling required
if table_list_length > table_display_max:
table_bottom_entry = current_scroll_selection + table_display_max
item_list = item_list[current_scroll_selection: table_bottom_entry]
# show down if not at end of list in display window
if table_bottom_entry < table_list_length:
down_button()
# show an up button if not at start of list
if current_scroll_selection > 0:
next_button(label="Up")
for item in item_list:
if len(item) > item_length_max:
item = item[0:item_length_max]
draw.text((x, y + font_offset), item, font=font_type, fill=255)
font_offset += font_size
# Back button
if back_button_req:
back_button(label="Exit")
oled.drawImage(image)
display_state = 'page'
drawing_in_progress = False
return
def display_dialog_msg(msg, back_button_req=0, wrap_limit=17, font="medium"):
'''
display informational dialog box
'''
global draw
global oled
global drawing_in_progress
global display_state
msg_list = wrap(msg, wrap_limit)
display_simple_table(msg_list, back_button_req, title='Info:', font=font)
def display_paged_table(table_data, back_button_req=0):
'''
This function takes several pages of information and displays on the
display with appropriate pg up/pg down buttons
table data is in format:
data = {
'title' = 'page title',
'pages' = [
['Page 1 line 1', Page 1 line 2, 'Page 1 line 3', 'Page 1 line 4'],
['Page 2 line 1', Page 2 line 2, 'Page 2 line 3', 'Page 2 line 4'],
['Page 3 line 1', Page 3 line 2, 'Page 3 line 3', 'Page 3 line 4'],
...etc.
]
}
'''
global drawing_in_progress
global draw
global oled
global current_scroll_selection
global table_list_length
global display_state
drawing_in_progress = True
display_state = 'page'
# Clear display prior to painting new item
clear_display()
y = 0
x = 0
font_offset = 0
font_size = 11
item_length_max = 20
table_display_max = 4
# write title
title = table_data['title']
total_pages = len(table_data['pages'])
if total_pages > 1:
title += " ({}/{})".format(current_scroll_selection + 1, total_pages)
draw.text((x, y + font_offset), title.center(item_length_max, " "), font=smartFont, fill=255)
font_offset += font_size
# Extract pages data
table_pages = table_data['pages']
page_count = len(table_pages)
# Display the page selected - correct over-shoot of page down
if current_scroll_selection == page_count:
current_scroll_selection -=1
# Correct over-shoot of page up
if current_scroll_selection == -1:
current_scroll_selection = 0
page = table_pages[current_scroll_selection]
# If the page has greater than table_display_max entries, slice it
if len(page) > table_display_max:
page = page[0:table_display_max]
for item in page:
if len(item) > item_length_max:
item = item[0:item_length_max]
draw.text((x, y + font_offset), item, font=smartFont, fill=255)
font_offset += font_size
# if we're going need to scroll through pages, create buttons
if (page_count > 1):
#if (current_scroll_selection < page_count) and (current_scroll_selection < page_count-1):
if current_scroll_selection < page_count-1:
down_button(label="PgDn")
if (current_scroll_selection > 0) and (current_scroll_selection <= page_count -1):
next_button(label="PgUp")
# Back button
if back_button_req:
back_button(label="Exit")
oled.drawImage(image)
display_state = 'page'
drawing_in_progress = False
return
def display_list_as_paged_table(item_list, back_button_req=0, title=''):
'''
This function builds on display_paged_table() and creates a paged display
from a simple list of results. This provides a better experience that the
simple line-by-line scrolling provided in display_simple_table()
See display_paged_table() for required data structure
'''
data = {}
data['title'] = title
data['pages'] = []
# slice up list in to pages
table_display_max = 4
counter=0
while item_list:
slice = item_list[counter: counter+table_display_max]
data['pages'].append(slice)
item_list = item_list[counter+table_display_max:]
display_paged_table(data, back_button_req)
return
##############################################
# Main function to draw menu navigation pages
##############################################
def draw_page():
global drawing_in_progress
global image
global draw
global oled
global font
global fontb12
global font14
global smartFont
global width
global height
global width
global height
global pageSleepCountdown
global current_menu_location
global option_selected
global option_number_selected
global menu
global home_page_name
global display_state
# Drawing already in progress - return
if drawing_in_progress:
return
# signal we are drawing
drawing_in_progress = True
################################################
# show menu list based on current menu position
################################################
#FIXME: This feels clunky. Would be best to access menu locations
# via evaluated location rather than crawling over menu
menu_structure = menu
location_search = []
depth = 0
section_name = [home_page_name]
# Crawl the menu structure until we hit the current specified location
while current_menu_location != location_search:
# List that will be used to build menu items to display
menu_list = []
# Current menu location choice specified in list format:
# current_menu_location = [2,1]
#
# As we move though menu depths, inpsect next level of
# menu structure
node = current_menu_location[depth]
# figure out the number of menu options at this menu level
number_menu_choices = len(menu_structure)
if node == number_menu_choices:
# we've fallen off the end of menu choices, fix item by zeroing
node = 0
current_menu_location[depth] = 0
location_search.append(node)
item_counter = 0
for menu_item in menu_structure:
item_name = menu_item['name']
# this is the currently selected item, pre-pend name with '*'
if (item_counter == node):
section_name.append(item_name)
item_name = "*" + item_name
menu_list.append((item_name))
item_counter = item_counter + 1
depth = depth + 1
# move down to next level of menu structure & repeat for new level
menu_structure = menu_structure[node]['action']
option_number_selected = node
option_selected = menu_structure
# if we're at the top of the menu tree, show the home page title
if depth == 1:
page_name = home_page_name
else:
# otherwise show the name of the parent menu item
page_name = section_name[-2]
page_title = ("[ " + page_name + " ]").center(17, " ")
# Clear display prior to painting new item
clear_display()
# paint the page title
draw.text((1, 1), page_title, font=fontb12, fill=255)
# vertical starting point for menu (under title) & incremental offset for
# subsequent items
y=15
y_offset=13
# define display window limit for menu table
table_window = 3
# determine the menu list to show based on current selection and window limits
if (len(menu_list) > table_window):
# We've got more items than we can fit in our window, need to slice to fit
if (option_number_selected >= table_window):
menu_list = menu_list[(option_number_selected - (table_window - 1)): option_number_selected + 1]
else:
# We have enough space for the menu items, so no special treatment required
menu_list = menu_list[0 : table_window]
# paint the menu items, highlighting selected menu item
for menu_item in menu_list:
rect_fill=0
text_fill=255
# this is selected menu item: highlight it and remove * character
if (menu_item[0] == '*'):
rect_fill=255
text_fill=0
menu_item = menu_item[1:len(menu_item)]
# convert menu item to std width format with nav indicator
menu_item = "{:<17}>".format(menu_item)
draw.rectangle((0, y, 127, y+y_offset), outline=0, fill=rect_fill)
draw.text((1, y+1), menu_item, font=font11, fill=text_fill)
y += y_offset
# add nav buttons
down_button()
next_button()
# Don't show back button at top level of menu
if depth != 1:
back_button()
else:
back_button(label="Exit")
oled.drawImage(image)
drawing_in_progress = False
####################################
# dispatcher (menu) functions here
####################################
def show_summary():
'''
Summary page - taken from original bakebit script
'''
global width
global height
global draw
global oled
global display_state
# The commands here take quite a while to execute, so lock screen early
# (normally done by page drawing function)
drawing_in_progress = True
IPAddress = get_ip()
# determine CPU load
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
try:
CPU = subprocess.check_output(cmd, shell = True )
except:
CPU = "unknown"
#determine mem useage
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%sMB %.2f%%\", $3,$2,$3*100/$2 }'"
try:
MemUsage = subprocess.check_output(cmd, shell = True )
except:
MemUsage = "unknown"
# determine disk util
cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d/%dGB %s\", $3,$2,$5}'"
try:
Disk = subprocess.check_output(cmd, shell = True )
except:
Disk = "unknown"
# determine temp
try:
tempI = int(open('/sys/class/thermal/thermal_zone0/temp').read())
except:
tempI = "unknown"
if tempI>1000:
tempI = tempI/1000
tempStr = "CPU TEMP: %sC" % str(tempI)
results = [
"IP: " + str(IPAddress),
str(CPU),
str(MemUsage),
str(Disk),
tempStr
]
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(results, back_button_req=1)
return
def show_date():
'''
Date page - taken from original bakebit script & modified to add TZ
'''
global width
global height
global draw
global oled
global display_state
drawing_in_progress = True
# Clear display prior to painting new item
clear_display()
text = time.strftime("%A")
draw.text((1,0),text,font=font12,fill=255)
text = time.strftime("%e %b %Y")
draw.text((1,13),text,font=font12,fill=255)
text = time.strftime("%X")
draw.text((1,26),text,font=fontb14,fill=255)
text = time.strftime("%Z")
draw.text((1,41),"TZ: " + text,font=font12,fill=255)
# Back button
back_button()
oled.drawImage(image)
display_state = 'page'
drawing_in_progress = False
def show_interfaces():
'''
Return a list of network interfaces found to be up, with IP address if available
'''
global ifconfig_file
global iw_file
global display_state
try:
ifconfig_info = subprocess.check_output(ifconfig_file, shell=True)
except Exception as ex:
interfaces= [ "Err: ifconfig error" ]
display_simple_table(interfaces, back_button_req=1)
return
# Extract interface info with a bit of regex magic
interface_re = re.findall('^(\w+?)\: flags(.*?)RX packets', ifconfig_info, re.DOTALL|re.MULTILINE)
if interface_re is None:
# Something broke is our regex - report an issue
interfaces = [ "Error: match error"]
else:
interfaces = []
for result in interface_re:
# save the interface name
interface_name = result[0]
# look at the rest of the interface info & extract IP if available
interface_info = result[1]
inet_search = re.search("inet (.+?) ", interface_info, re.MULTILINE)
if inet_search is None:
ip_address = "No IP address"
# do check if this is an interface in monitor mode
if (re.search("wlan\d", interface_name, re.MULTILINE)):
# fire up 'iw' for this interface (hmmm..is this a bit of an un-necessary ovehead?)
try:
iw_info = subprocess.check_output('{} {} info'.format(iw_file, interface_name), shell=True)
if re.search("type monitor", iw_info, re.MULTILINE):
ip_address = "(Monitor)"
except:
ip_address = "unknown"
else:
ip_address = inet_search.group(1)
interfaces.append( '{}: {}'.format(interface_name, ip_address))
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_list_as_paged_table(interfaces, back_button_req=1, title="--Interfaces--")
def show_wlan_interfaces():
'''
Create pages to summarise WLAN interface info
'''
global ifconfig_file
global iw_file
global display_state
try:
ifconfig_info = subprocess.check_output('{} -s'.format(ifconfig_file), shell=True)
except Exception as ex:
interfaces= [ "Err: ifconfig error" ]
display_simple_table(interfaces, back_button_req=1)
return
# Extract interface info
interface_re = re.findall('^(wlan\d) ', ifconfig_info, re.DOTALL|re.MULTILINE)
if interface_re is None:
interfaces = [ "Error: match error"]
else:
interfaces = []
for interface_name in interface_re:
interface_info = []
# use iw to find further info for each wlan interface
try:
iw_info = subprocess.check_output("{} {} info".format(iw_file, interface_name), shell=True)
except:
iw_info = "Err: iw cmd failed"
# split the output in to an array
iw_list = iw_info.split('\n')
interface_details = {}
for iw_item in iw_list:
iw_item = iw_item.strip()
fields = iw_item.split()
# skip empty lines
if not fields:
continue
interface_details[fields[0]] = fields[1:]
# construct our page data - start with name
interface_info.append("Interface: " + interface_name)
# SSID (if applicable)
if 'ssid' in interface_details.keys():
interface_info.append("SSID: " + str(interface_details['ssid'][0]))
else:
interface_info.append("SSID: N/A")
# Mode
if 'type' in interface_details.keys():
interface_info.append("Mode: " + str(interface_details['type'][0]))
else:
interface_info.append("Mode: N/A")
# Channel
if 'channel' in interface_details.keys():
interface_info.append("Ch: {} ({}Mhz)".format( str(interface_details['channel'][0]), str(interface_details['channel'][4]) ) )
else:
interface_info.append("Ch: unknown")
# MAC
if 'addr' in interface_details.keys():
interface_info.append("Addr: " + str(interface_details['addr']))
else:
interface_info.append("Addr: unknown")
interfaces.append(interface_info)
# if we had no WLAN interfaces, insert message
if len(interfaces) == 0:
interfaces.append(['No Wlan Interfaces'])
data = {
'title': '--WLAN I/F--',
'pages': interfaces
}
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_paged_table(data, back_button_req=1)
def show_usb():
'''
Return a list of non-Linux USB interfaces found with the lsusb command
'''
global display_state
lsusb = '/usr/bin/lsusb | /bin/grep -v Linux | /usr/bin/cut -d\ -f7-'
lsusb_info = []
try:
lsusb_output = subprocess.check_output(lsusb, shell=True)
lsusb_info = lsusb_output.split('\n')
except Exception as ex:
error_descr = "Issue getting usb info using lsusb command"
interfaces= [ "Err: lsusb error" ]
display_simple_table(interfaces, back_button_req=1)
return
interfaces = []
for result in lsusb_info:
# chop down the string to fit the display
result = result[0:19]
interfaces.append(result)
if len(interfaces) == 0:
interfaces.append("No devices detected")
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(interfaces, back_button_req=1, title='--USB Interfaces--')
return
def show_ufw():
'''
Return a list ufw ports
'''
global ufw_file
global result_cache
global display_state
ufw_info = []
# check ufw is available
if not os.path.isfile(ufw_file):
display_dialog_msg('UFW not installed', back_button_req=1)
display_state = 'page'
return
# If no cached ufw data from previous screen paint, run ufw status
if result_cache == False:
try:
ufw_output = subprocess.check_output("sudo {} status".format(ufw_file), shell=True)
ufw_info = ufw_output.split('\n')
result_cache = ufw_info # cache results
except Exception as ex:
error_descr = "Issue getting ufw info using ufw command"
interfaces= [ "Err: ufw error" ]
display_simple_table(interfaces, back_button_req=1)
return
else:
# we must have cached results from last time
ufw_info = result_cache
port_entries = []
# Add in status line
port_entries.append(ufw_info[0])
# lose top 4 & last 2 lines of output
ufw_info = ufw_info[4:-2]
for result in ufw_info:
# tidy/compress the output
result = result.strip()
result_list = result.split()
final_result = ' '.join(result_list)
port_entries.append(final_result)
if len(port_entries) == 0:
port_entries.append("No ufw info detected")
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_list_as_paged_table(port_entries, back_button_req=1, title='--UFW Summary--')
return
def show_eth0_ipconfig():
'''
Return IP configuration of eth0 including IP, default gateway, DNS servers
'''
global display_state
eth0_ipconfig_info = []
try:
ipconfig_output = subprocess.check_output(ipconfig_file, shell=True)
ipconfig_info = ipconfig_output.split('\n')
except Exception as ex:
error_descr = "Issue getting ipconfig"
ipconfigerror= [ "Err: ipconfig command error" ]
display_simple_table(ipconfigerror, back_button_req=1)
return
if len(ipconfig_info) == 0:
eth0_ipconfig_info.append("Nothing to display")
for n in ipconfig_info:
eth0_ipconfig_info.append(n)
# chop down output to fit up to 2 lines on display
choppedoutput = []
for n in eth0_ipconfig_info:
choppedoutput.append(n[0:20])
if len(n) > 20:
choppedoutput.append(n[20:40])
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(choppedoutput, back_button_req=1, title='--Eth0 IP Config--')
return
def show_lldp_neighbour():
'''
Display LLDP neighbour on eth0
'''
global display_state
neighbour_info = []
neighbour_cmd = "sudo cat " + lldpneigh_file
if os.path.exists(lldpneigh_file):
try:
neighbour_output = subprocess.check_output(neighbour_cmd, shell=True)
neighbour_info = neighbour_output.split('\n')
except Exception as ex:
error_descr = "Issue getting LLDP neighbour"
error= [ "Err: Neighbour command error" ]
display_simple_table(error, back_button_req=1)
return
if len(neighbour_info) == 0:
neighbour_info.append("No neighbour")
# chop down output to fit up to 2 lines on display
choppedoutput = []
for n in neighbour_info:
choppedoutput.append(n[0:20])
if len(n) > 20:
choppedoutput.append(n[20:40])
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(choppedoutput, back_button_req=1, title='--LLDP Neighbour--')
def show_cdp_neighbour():
'''
Display CDP neighbour on eth0
'''
global display_state
neighbour_info = []
neighbour_cmd = "sudo cat " + cdpneigh_file
if os.path.exists(cdpneigh_file):
try:
neighbour_output = subprocess.check_output(neighbour_cmd, shell=True)
neighbour_info = neighbour_output.split('\n')
except Exception as ex:
error_descr = "Issue getting LLDP neighbour"
error= [ "Err: Neighbour command error" ]
display_simple_table(error, back_button_req=1)
return
if len(neighbour_info) == 0:
neighbour_info.append("No neighbour")
# chop down output to fit up to 2 lines on display
choppedoutput = []
for n in neighbour_info:
choppedoutput.append(n[0:20])
if len(n) > 20:
choppedoutput.append(n[20:40])
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(choppedoutput, back_button_req=1, title='--CDP Neighbour--')
def show_reachability():
'''
Check if default gateway, internet and DNS are reachable and working
'''
global display_state
reachability_info = []
reachability_cmd = "sudo " + reachability_file
try:
reachability_output = subprocess.check_output(reachability_cmd, shell=True)
reachability_info = reachability_output.split('\n')
except Exception as ex:
error_descr = "Issue getting reachability info"
error= [ "Err: Reachability command error" ]
display_simple_table(error, back_button_req=1)
return
if len(reachability_info) == 0:
reachability_info.append("No output sorry")
# chop down output to fit up to 2 lines on display
choppedoutput = []
for n in reachability_info:
choppedoutput.append(n[0:20])
if len(n) > 20:
choppedoutput.append(n[20:40])
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(choppedoutput, back_button_req=1, title='--Reachability--')
def show_vlan():
'''
Display untagged VLAN number on eth0
Todo: Add tagged VLAN info
'''
global display_state
vlan_info = []
vlan_cmd = "sudo grep -a VLAN " + lldpneigh_file + " || grep -a VLAN " + cdpneigh_file
if os.path.exists(lldpneigh_file):
try:
vlan_output = subprocess.check_output(vlan_cmd, shell=True)
vlan_info = vlan_output.split('\n')
except Exception as ex:
error_descr = "Issue getting VLAN info"
error= [ "No VLAN found" ]
display_simple_table(error, back_button_req=1)
return
if len(vlan_info) == 0:
vlan_info.append("No VLAN found")
# final chop down of the string to fit the display
for n in vlan_info:
n = n[0:19]
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(vlan_info, back_button_req=1, title='--Eth0 VLAN--')
def show_wpa_passphrase():
'''
Show WPA passphrase
'''
global display_state
swpc = "sudo grep 'wpa_passphrase' /etc/hostapd.conf | cut -d '=' -f2"
try:
wpa_passphrase = []
wpa_passphrase_output = subprocess.check_output(swpc, shell=True)
wpa_passphrase.append(wpa_passphrase_output)
except Exception as ex:
error_descr = "Issue getting WPA passphrase"
swperror= [ "Err: WPA passphrase" ]
display_simple_table(swperror, back_button_req=1)
return
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
# chop down output to fit up to 2 lines on display
choppedoutput = []
for n in wpa_passphrase:
choppedoutput.append(n[0:20])
if len(n) > 20:
choppedoutput.append(n[20:40])
display_simple_table(choppedoutput, back_button_req=1, title='--WPA passphrase--')
def show_speedtest():
'''
Run speedtest.net speed test and format output to fit the OLED screen
'''
global display_state
display_dialog_msg('Running Speedtest...', back_button_req=1)
speedtest_info = []
speedtest_cmd = "speedtest | egrep -w \"Testing from|Download|Upload\" | sed 's/Testing from /My IP: /g; s/\.\.\.//g; s/Download/D/g; s/Upload/U/g; s/(//g; s/)//g; s/bit\/s/bps/g'"
try:
speedtest_output = subprocess.check_output(speedtest_cmd, shell=True)
speedtest_info = speedtest_output.split('\n')
except Exception as ex:
error_descr = "Speedtest error"
error= [ "Err: Speedtest error" ]
display_simple_table(error, back_button_req=1)
return
if len(speedtest_info) == 0:
speedtest_info.append("No output sorry")
# chop down output to fit up to 2 lines on display
choppedoutput = []
for n in speedtest_info:
choppedoutput.append(n[0:20])
if len(n) > 20:
choppedoutput.append(n[20:40])
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(choppedoutput, back_button_req=1, title='--Speedtest--')
time.sleep(300)
def show_publicip():
'''
Shows public IP address and related details, works with any interface with internet connectivity
'''
global display_state
publicip_info = []
try:
publicip_output = subprocess.check_output(publicip_cmd, shell=True)
publicip_info = publicip_output.split('\n')
except Exception as ex:
error_descr = "Public IP Error"
error= [ "Err: Public IP" ]
display_simple_table(error, back_button_req=1)
return
if len(publicip_info) == 0:
publicip_info.append("No output sorry")
# chop down output to fit up to 2 lines on display
choppedoutput = []
for n in publicip_info:
choppedoutput.append(n[0:20])
if len(n) > 20:
choppedoutput.append(n[20:40])
# final check no-one pressed a button before we render page
if display_state == 'menu':
return
display_simple_table(choppedoutput, back_button_req=1, title='--Public IP Address--')
time.sleep(10)
def show_menu_ver():
global __version__
display_simple_table(["Menu version:", __version__], back_button_req=1, font="medium")
def shutdown():
global oled
global shutdown_in_progress
global screen_cleared
display_dialog_msg('Shutting down...', back_button_req=0)
time.sleep(1)
oled.clearDisplay()
screen_cleared = True
os.system('systemctl poweroff')
shutdown_in_progress = True
return
def reboot():
global oled
global shutdown_in_progress
global screen_cleared
global reboot_image
display_dialog_msg('Rebooting...', back_button_req=0)
time.sleep(1)
oled.drawImage(reboot_image)
screen_cleared = True
os.system('systemctl reboot')
shutdown_in_progress = True
return
def switcher(resource_title, resource_switcher_file, mode_name):
'''
Function to perform generic set of operations to switch wlanpi mode
'''
global oled
global shutdown_in_progress
global screen_cleared
global current_mode
global display_state
global reboot_image
# check resource is available
if not os.path.isfile(resource_switcher_file):
display_dialog_msg('{} not available'.format(resource_title), back_button_req=1)
display_state = 'page'
return
# Resource switcher was detected, so assume it's installed
back_button_req=0
if current_mode == "classic":
# if in classic mode, switch to the resource
dialog_msg = 'Switching to {} mode (rebooting...)'.format(resource_title)
switch = "on"
elif current_mode == mode_name:
dialog_msg = 'Switching to Classic mode (rebooting...)'
switch = "off"
else:
dialog_msg('Unknown mode: {}'.format(current_mode), back_button_req=1)
display_state = 'page'
return False
# Flip the mode
display_dialog_msg(dialog_msg, back_button_req)
shutdown_in_progress = True
time.sleep(2)
oled.drawImage(reboot_image)
screen_cleared = True
try:
dialog_msg = subprocess.check_output("{} {}".format(resource_switcher_file, switch), shell=True) # reboots
except Exception as ex:
dialog_msg = mode_name
# We only get to this point if the switch has failed for some reason
# (Note that the switcher script reboots the WLANPi)
shutdown_in_progress = False
screen_cleared = False
display_dialog_msg("Switch failed: {}".format(dialog_msg), back_button_req=0)
display_state = 'menu'
# allow 5 secs to view failure msg
time.sleep(3)
# move back up to menu branch
global current_menu_location
current_menu_location.pop()
return False
def wconsole_switcher():
global wconsole_switcher_file
resource_title = "Wi-Fi Console"
mode_name = "wconsole"
resource_switcher_file = wconsole_switcher_file
# switch
switcher(resource_title, resource_switcher_file, mode_name)
return True
def hotspot_switcher():
global hotspot_switcher_file
resource_title = "Hotspot"
mode_name = "hotspot"
resource_switcher_file = hotspot_switcher_file
switcher(resource_title, resource_switcher_file, mode_name)
return True
def wiperf_switcher():
global wiperf_switcher_file
resource_title = "Wiperf"
mode_name = "wiperf"
resource_switcher_file = wiperf_switcher_file
switcher(resource_title, resource_switcher_file, mode_name)
return True
def kismet_ctl(action="status"):
'''
Function to start/stop and get status of Kismet processes
'''
global kismet_ctl_file
global display_state
# check resource is available
if not os.path.isfile(kismet_ctl_file):
display_dialog_msg('{} not available'.format(kismet_ctl_file), back_button_req=1)
display_state = 'page'
return
if action=="status":
# check kismet status & return text
try:
dialog_msg = subprocess.check_output("{} {}".format(kismet_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Status failed!'.format(ex)
elif action=="start":
try:
dialog_msg = subprocess.check_output("{} {}".format(kismet_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Start failed!'.format(ex)
elif action=="stop":
try:
dialog_msg = subprocess.check_output("{} {}".format(kismet_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Stop failed! {}'.format(ex)
display_dialog_msg(dialog_msg, back_button_req=1)
display_state = 'page'
return True
def kismet_status():
kismet_ctl(action="status")
return
def kismet_stop():
kismet_ctl(action="stop")
return
def kismet_start():
kismet_ctl(action="start")
return
def bettercap_ctl(action="status"):
'''
Function to start/stop and get status of Kismet processes
'''
global bettercap_ctl_file
global display_state
# check resource is available
if not os.path.isfile(bettercap_ctl_file):
display_dialog_msg('{} not available'.format(bettercap_ctl_file), back_button_req=1)
display_state = 'page'
return
if action=="status":
# check bettercap status & return text
try:
dialog_msg = subprocess.check_output("{} {}".format(bettercap_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Status failed!'.format(ex)
elif action=="start":
try:
dialog_msg = subprocess.check_output("{} {}".format(bettercap_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Start failed!'.format(ex)
elif action=="stop":
try:
dialog_msg = subprocess.check_output("{} {}".format(bettercap_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Stop failed!'.format(ex)
display_dialog_msg(dialog_msg, back_button_req=1)
display_state = 'page'
return True
def bettercap_status():
bettercap_ctl(action="status")
return
def bettercap_stop():
bettercap_ctl(action="stop")
return
def bettercap_start():
bettercap_ctl(action="start")
return
def profiler_ctl(action="status"):
'''
Function to start/stop and get status of Profiler processe
'''
global profiler_ctl_file
global display_state
# check resource is available
if not os.path.isfile(profiler_ctl_file):
display_dialog_msg('not available'.format(profiler_ctl_file), back_button_req=1)
display_state = 'page'
return
if action=="status":
# check profiler status & return text
try:
status_file_content = subprocess.check_output("{} {}".format(profiler_ctl_file, action), shell=True)
item_list = status_file_content.splitlines()
except Exception as ex:
item_list = ['Status failed!', str(ex)]
display_simple_table(item_list, back_button_req=1, title='Profiler Status')
display_state = 'page'
return True
elif action=="start":
try:
dialog_msg = subprocess.check_output("{} {}".format(profiler_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Start failed!'.format(ex)
elif action=="start_no11r":
try:
dialog_msg = subprocess.check_output("{} {}".format(profiler_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Start failed!'.format(ex)
elif action=="stop":
try:
dialog_msg = subprocess.check_output("{} {}".format(profiler_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Stop failed!'.format(ex)
elif action=="purge":
try:
dialog_msg = subprocess.check_output("{} {}".format(profiler_ctl_file, action), shell=True)
except Exception as ex:
dialog_msg = 'Report purge failed!'.format(ex)
display_dialog_msg(dialog_msg, back_button_req=1)
display_state = 'page'
return True
def profiler_status():
profiler_ctl(action="status")
return
def profiler_stop():
profiler_ctl(action="stop")
return
def profiler_start():
profiler_ctl(action="start")
return
def profiler_start_no11r():
profiler_ctl(action="start_no11r")
return
def profiler_purge():
profiler_ctl(action="purge")
return
def check_wiperf_status():
status_file = '/tmp/wiperf_status.txt'
if os.path.exists(status_file):
try:
statusf = open(status_file, 'r')
msg = statusf.readline()
except:
# not much we can do, fail silently
return ''
# return extracted line
return " ({})".format(msg)
else:
return ''
def home_page():
global draw
global oled
global wlanpi_ver
global current_mode
global hostname
global drawing_in_progress
global display_state
global ethtool_file
drawing_in_progress = True
display_state = 'page'
if current_mode == "wconsole":
# get wlan0 IP
if_name = "wlan0"
mode_name = "Wi-Fi Console"
elif current_mode == "hotspot":
# get wlan0 IP
if_name = "wlan0"
mode_name = "Hotspot " + wifi_client_count() + " clients"
elif current_mode == "wiperf":
# get wlan0 IP
if_name = "wlan0"
mode_name = "Wiperf" + check_wiperf_status()
else:
# get eth0 IP
if_name = "eth0"
mode_name = ""
# get Ethernet port info (...for Jerry)
try:
#eth_speed_info = subprocess.check_output("{} eth0 | grep -i speed | cut -d' ' -f2".format(ethtool_file), shell=True)
eth_info = subprocess.check_output('{} eth0 2>/dev/null'.format(ethtool_file), shell=True)
speed_re = re.findall('Speed\: (.*\/s)', eth_info, re.MULTILINE)
duplex_re = re.findall('Duplex\: (.*)', eth_info, re.MULTILINE)
link_re = re.findall('Link detected\: (.*)', eth_info, re.MULTILINE)
if (speed_re is None) or (duplex_re is None) or (link_re is None):
# Our pattern matching failed...silently fail....we must set up logging at some stage
mode_name = ""
elif (link_re[0] == "no"):
# Ethernet link is down, report msg instead of speed & duplex
mode_name = "Link down"
else:
# Report the speed & duplex messages from ethtool
mode_name = "{} {}".format(speed_re[0], duplex_re[0])
except Exception as ex:
# Something went wrong...show nothing
mode_name = ""
# If eth0 is down, lets show the usb0 IP address
# in case anyone uses OTG conection & is confused
if mode_name == "Link down":
if_name = "usb0"
mode_name = ""
ip_addr_cmd = "ip addr show {} 2>/dev/null | grep -Po \'inet \K[\d.]+\' | head -n 1".format(if_name)
try:
ip_addr = subprocess.check_output(ip_addr_cmd, shell=True)
except Exception as ex:
ip_addr = "No IP Addr"
clear_display()
draw.text((0,1),str(wlanpi_ver),font=smartFont,fill=255)
draw.text((0,11),str(hostname),font=font11,fill=255)
draw.text((95,20),if_name,font=smartFont,fill=255)
draw.text((0,29),str(ip_addr),font=font14,fill=255)
draw.text((0,43),str(mode_name),font=smartFont,fill=255)
back_button('Menu')
oled.drawImage(image)
drawing_in_progress = False
return
#######################
# other functions here
#######################
def wifi_client_count():
wccc = "sudo /sbin/iw dev wlan0 station dump | grep 'Station' | wc -l"
try:
client_count = subprocess.check_output(wccc, shell=True)
except Exception as ex:
error_descr = "Issue getting number of Wi-Fi clients"
wccerror= [ "Err: Wi-Fi client count" ]
display_simple_table(wccerror, back_button_req=1)
return
return client_count.strip()
def menu_down():
global current_menu_location
global menu
global current_scroll_selection
global display_state
# If we are in a table, scroll down (unless at bottom of list)
if display_state == 'page':
current_scroll_selection +=1
return
# Menu not currently shown, do nothing
if display_state != 'menu':
return
# pop the last menu list item, increment & push back on
current_selection = current_menu_location.pop()
current_selection = current_selection +1
current_menu_location.append(current_selection)
draw_page()
def menu_right():
global current_menu_location
global menu
global option_number_selected
global option_selected
global current_scroll_selection
global display_state
# If we are in a table, scroll up (unless at top of list)
if display_state == 'page':
if current_scroll_selection == 0:
return
else:
current_scroll_selection -=1
return
# Check if the "action" field at the current location is an
# array or a function.
# if we have an array, append the current selection and re-draw menu
if (type(option_selected) is list):
current_menu_location.append(0)
draw_page()
elif (isinstance(option_selected, types.FunctionType)):
# if we have a function (dispatcher), execute it
display_state = 'page'
option_selected()
def menu_left():
global current_menu_location
global menu
global option_number_selected
global option_selected
global current_scroll_selection
global table_list_length
global result_cache
global display_state
global start_up
# If we're in a table we need to exit, reset table scroll counters, reset
# result cache and draw the menu for our current level
if display_state == 'page':
current_scroll_selection = 0
table_list_length = 0
display_state = 'menu'
display_state = 'menu'
draw_page()
result_cache = False
return
if display_state == 'menu':
# check to make sure we aren't at top of menu structure
if len(current_menu_location) == 1:
# If we're at the top and hit exit (back) button, revert to start-up state
start_up = True
home_page()
else:
current_menu_location.pop()
draw_page()
else:
display_state = 'menu'
draw_page()
def go_up():
# executed when the back navigation item is selected
global current_menu_location
global display_state
display_state = 'menu'
if len(current_menu_location) == 1:
# we must be at top level, do nothing
return
else:
# Take off last level of menu structure to go up
# Set index to 0 so top menu item selected
current_menu_location.pop()
current_menu_location[-1] = 0
draw_page()
#######################
# menu structure here
#######################
# assume classic mode menu initially...
menu = [
{ "name": "Network", "action": [
{ "name": "Interfaces", "action": show_interfaces},
{ "name": "WLAN Interfaces", "action": show_wlan_interfaces},
{ "name": "Eth0 IP Config", "action": show_eth0_ipconfig},
{ "name": "Eth0 VLAN", "action": show_vlan},
{ "name": "LLDP Neighbour", "action": show_lldp_neighbour},
{ "name": "CDP Neighbour", "action": show_cdp_neighbour},
{ "name": "Public IP Address", "action": show_publicip},
]
},
{ "name": "Utils", "action": [
{ "name": "Reachability", "action": show_reachability},
{ "name": "Speedtest", "action": show_speedtest},
{ "name": "WPA Passphrase", "action": show_wpa_passphrase},
{ "name": "USB Devices", "action": show_usb},
{ "name": "UFW Ports", "action": show_ufw},
]
},
{ "name": "Modes", "action": [
{ "name": "Wi-Fi Console", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": wconsole_switcher},
]
},
{ "name": "Hotspot", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": hotspot_switcher},
]
},
{ "name": "Wiperf", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": wiperf_switcher},
]
},
]
},
{ "name": "Apps", "action": [
{ "name": "Kismet", "action": [
{ "name": "Status", "action": kismet_status},
{ "name": "Stop", "action": kismet_stop},
{ "name": "Start", "action": kismet_start},
]
},
{ "name": "Bettercap", "action": [
{ "name": "Status", "action": bettercap_status},
{ "name": "Stop", "action": bettercap_stop},
{ "name": "Start", "action": bettercap_start},
]
},
{ "name": "Profiler", "action": [
{ "name": "Status", "action": profiler_status},
{ "name": "Stop", "action": profiler_stop},
{ "name": "Start", "action": profiler_start},
{ "name": "Start (no 11r)", "action": profiler_start_no11r},
{ "name": "Purge Reports", "action": profiler_purge},
]
},
]
},
{ "name": "System", "action": [
{ "name": "Shutdown", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": shutdown},
]
},
{ "name": "Reboot", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": reboot},
]
},
{ "name": "Summary", "action": show_summary},
{ "name": "Date/Time", "action": show_date},
{ "name": "Version", "action": show_menu_ver},
]
},
]
'''
Old menu structure...just in case
menu = [
{ "name": "1.Network", "action": [
{ "name": "1.Interfaces", "action": show_interfaces},
{ "name": "2.WLAN Interfaces", "action": show_wlan_interfaces},
{ "name": "3.USB Devices", "action": show_usb},
{ "name": "4.UFW Ports", "action": show_ufw},
{ "name": "5.Eth0 IP Config", "action": show_eth0_ipconfig},
{ "name": "6.Eth0 VLAN", "action": show_vlan},
{ "name": "7.LLDP Neighbour", "action": show_lldp_neighbour},
{ "name": "8.CDP Neighbour", "action": show_cdp_neighbour},
{ "name": "9.WPA Passphrase", "action": show_wpa_passphrase},
{ "name": "10.Reachability", "action": show_reachability},
]
},
{ "name": "2.Status", "action": [
{ "name": "1.Summary", "action": show_summary},
{ "name": "2.Date/Time", "action": show_date},
{ "name": "3.Version", "action": show_menu_ver},
]
},
{ "name": "3.Apps", "action": [
{ "name": "1.Kismet", "action": [
{ "name": "Status", "action": kismet_status},
{ "name": "Stop", "action": kismet_stop},
{ "name": "Start", "action": kismet_start},
]
},
{ "name": "2.Bettercap", "action": [
{ "name": "Status", "action": bettercap_status},
{ "name": "Stop", "action": bettercap_stop},
{ "name": "Start", "action": bettercap_start},
]
},
{ "name": "3.Profiler", "action": [
{ "name": "Status", "action": profiler_status},
{ "name": "Stop", "action": profiler_stop},
{ "name": "Start", "action": profiler_start},
{ "name": "Start (no 11r)", "action": profiler_start_no11r},
{ "name": "Purge Reports", "action": profiler_purge},
]
},
]
},
{ "name": "4.Actions", "action": [
{ "name": "1.W-Console", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": wconsole_switcher},
]
},
{ "name": "2.Hotspot", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": hotspot_switcher},
]
},
{ "name": "3.Wiperf", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": wiperf_switcher},
]
},
{ "name": "4.Reboot", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": reboot},
]
},
{ "name": "5.Shutdown", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": shutdown},
]
},
]
}
]
'''
# update menu options data structure if we're in non-classic mode
if current_mode == "wconsole":
switcher_dispatcher = wconsole_switcher
home_page_name = "Wi-Fi Console"
if current_mode == "hotspot":
switcher_dispatcher = hotspot_switcher
home_page_name = "Hotspot"
if current_mode == "wiperf":
switcher_dispatcher = wiperf_switcher
home_page_name = "Wiperf"
if current_mode != "classic":
menu[2] = { "name": "Mode", "action": [
{ "name": "Classic Mode", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": switcher_dispatcher},
]
},
]
}
menu.pop(3)
'''
Old menu
if current_mode != "classic":
menu[2] = { "name": "3.Actions", "action": [
{ "name": "1.Classic Mode", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": switcher_dispatcher},
]
},
{ "name": "2.Reboot", "action": [
{ "name": "Cancel", "action": go_up},
{ "name": "Confirm", "action": reboot},
]
},
]
}
menu.pop(3)
'''
# Set up handlers to process key presses
def receive_signal(signum, stack):
global pageSleepCountdown
global pageSleep
global current_menu_location
global shutdown_in_progress
global screen_cleared
global sig_fired
global start_up
if (sig_fired):
# signal handler already in progress, ignore this one
return
#user pressed a button, reset the sleep counter
pageSleepCountdown = pageSleep
start_up = False
if drawing_in_progress or shutdown_in_progress:
return
# if display has been switched off to save screen, power back on and show home menu
if screen_cleared:
screen_cleared = False
pageSleepCountdown = pageSleep
return
# Key 1 pressed - Down key
if signum == signal.SIGUSR1:
sig_fired = True
menu_down()
sig_fired = False
return
# Key 2 pressed - Right/Selection key
if signum == signal.SIGUSR2:
sig_fired = True
menu_right()
sig_fired = False
return
# Key 3 pressed - Left/Back key
if signum == signal.SIGALRM:
sig_fired = True
menu_left()
sig_fired = False
return
###############################################################################
#
# ****** MAIN *******
#
###############################################################################
# First time around (power-up), draw logo on display
image0 = Image.open('wlanprologo.png').convert('1')
oled.drawImage(image0)
time.sleep(2)
# Set signal handlers for button presses - these fire every time a button
# is pressed
signal.signal(signal.SIGUSR1, receive_signal)
signal.signal(signal.SIGUSR2, receive_signal)
signal.signal(signal.SIGALRM, receive_signal)
##############################################################################
# Constant 'while' loop to paint images on display or execute actions in
# response to selections made with buttons. When any of the 3 WLANPi buttons
# are pressed, I believe the signal handler takes over the Python interpreter
# and executes the code associated with the button. The original flow continues
# once the button press action has been completed.
#
# The current sleep period of the while loop is ignored when a button is
# pressed.
#
# All global variables defined outside of the while loop are preserved and may
# read/set as required. The same variables are available for read/write even
# when a button is pressed and an interrupt occurs: no additional thread or
# interpreter with its own set of vars appears to be launched. For this reason,
# vars may be used to signal between the main while loop and any button press
# activity to indicate that processes such as screen paints are in progress.
#
# Despite the sample code suggesting threading is used I do not believe this
# is the case, based on testing with variable scopes and checking for process
# IDs when different parts of the script are executing.
##############################################################################
while True:
try:
if shutdown_in_progress or screen_cleared or drawing_in_progress:
# we don't really want to do anything at the moment, lets
# nap and loop around
time.sleep(1)
continue
# Draw a menu or execute current action (dispatcher)
if display_state != 'menu':
# no menu shown, so must be executing action.
# if we've just booted up, show home page
if start_up == True:
option_selected = home_page
# Re-run current action to refresh screen
option_selected()
else:
# lets try drawing our page (or refresh if already painted)
draw_page()
# if screen timeout is zero, clear it if not already done (blank the
# display to reduce screenburn)
if pageSleepCountdown == 0 and screen_cleared == False:
oled.clearDisplay()
screen_cleared = True
pageSleepCountdown = pageSleepCountdown - 1
# have a nap before we start our next loop
time.sleep(1)
except KeyboardInterrupt:
break
except IOError as ex:
print ("Error " + str(ex))
'''
Discounted ideas
1. Vary sleep timer for main while loop (e.g. longer for less frequently
updating data) - doesn;t work as main while loop may be in middle of
long sleep when button action taken, so screen refresh very long.
'''
``` |
{
"source": "jiriburant/azure-sdk-for-python",
"score": 2
} |
#### File: azure-ai-metricsadvisor/samples/sample_credential_entities.py
```python
import os
def sample_create_credential_entity():
# [START create_credential_entity]
from azure.ai.metricsadvisor import MetricsAdvisorKeyCredential, MetricsAdvisorAdministrationClient
from azure.ai.metricsadvisor.models import SqlConnectionStringCredentialEntity
service_endpoint = os.getenv("METRICS_ADVISOR_ENDPOINT")
subscription_key = os.getenv("METRICS_ADVISOR_SUBSCRIPTION_KEY")
api_key = os.getenv("METRICS_ADVISOR_API_KEY")
connection_string = os.getenv("SQL_SERVER_CONNECTION_STRING")
client = MetricsAdvisorAdministrationClient(service_endpoint,
MetricsAdvisorKeyCredential(subscription_key, api_key))
credential_entity = client.create_credential_entity(
credential_entity=SqlConnectionStringCredentialEntity(
name="sql credential entity",
connection_string=connection_string,
description="my credential entity",
)
)
return credential_entity
# [END create_credential_entity]
def sample_get_credential_entity(credential_entity_id):
# [START get_credential_entity]
from azure.ai.metricsadvisor import MetricsAdvisorKeyCredential, MetricsAdvisorAdministrationClient
service_endpoint = os.getenv("METRICS_ADVISOR_ENDPOINT")
subscription_key = os.getenv("METRICS_ADVISOR_SUBSCRIPTION_KEY")
api_key = os.getenv("METRICS_ADVISOR_API_KEY")
client = MetricsAdvisorAdministrationClient(service_endpoint,
MetricsAdvisorKeyCredential(subscription_key, api_key))
credential_entity = client.get_credential_entity(credential_entity_id)
print("Type: {}".format(credential_entity.type))
print("Name: {}".format(credential_entity.name))
print("Description: {}".format(credential_entity.description))
# [END get_credential_entity]
def sample_list_credential_entities():
# [START list_credential_entities]
from azure.ai.metricsadvisor import MetricsAdvisorKeyCredential, MetricsAdvisorAdministrationClient
service_endpoint = os.getenv("METRICS_ADVISOR_ENDPOINT")
subscription_key = os.getenv("METRICS_ADVISOR_SUBSCRIPTION_KEY")
api_key = os.getenv("METRICS_ADVISOR_API_KEY")
client = MetricsAdvisorAdministrationClient(service_endpoint,
MetricsAdvisorKeyCredential(subscription_key, api_key))
credential_entities = client.list_credential_entities()
for credential_entity in credential_entities:
print("Type: {}".format(credential_entity.type))
print("Name: {}".format(credential_entity.name))
print("Description: {}\n".format(credential_entity.description))
# [END list_credential_entities]
def sample_update_credential_entity(credential_entity):
# [START update_credential_entity]
from azure.ai.metricsadvisor import MetricsAdvisorKeyCredential, MetricsAdvisorAdministrationClient
service_endpoint = os.getenv("METRICS_ADVISOR_ENDPOINT")
subscription_key = os.getenv("METRICS_ADVISOR_SUBSCRIPTION_KEY")
api_key = os.getenv("METRICS_ADVISOR_API_KEY")
client = MetricsAdvisorAdministrationClient(service_endpoint,
MetricsAdvisorKeyCredential(subscription_key, api_key))
credential_entity.description = "updated description"
updated = client.update_credential_entity(credential_entity)
print("Type: {}".format(updated.type))
print("Name: {}".format(updated.name))
print("Description: {}\n".format(updated.description))
# [END update_credential_entity]
def sample_delete_credential_entity(credential_entity_id):
# [START delete_credential_entity]
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.metricsadvisor import MetricsAdvisorKeyCredential, MetricsAdvisorAdministrationClient
service_endpoint = os.getenv("METRICS_ADVISOR_ENDPOINT")
subscription_key = os.getenv("METRICS_ADVISOR_SUBSCRIPTION_KEY")
api_key = os.getenv("METRICS_ADVISOR_API_KEY")
client = MetricsAdvisorAdministrationClient(service_endpoint,
MetricsAdvisorKeyCredential(subscription_key, api_key))
client.delete_credential_entity(credential_entity_id)
try:
client.get_credential_entity(credential_entity_id)
except ResourceNotFoundError:
print("Credential entity successfully deleted.")
# [END delete_credential_entity]
if __name__ == '__main__':
print("---Creating credential entity...")
credential_entity = sample_create_credential_entity()
print("Credential_entity successfully created...")
print("\n---Get a credential entity...")
sample_get_credential_entity(credential_entity.id)
print("\n---List credential entities...")
sample_list_credential_entities()
print("\n---Update a credential entity...")
sample_update_credential_entity(credential_entity)
print("\n---Delete a credential entity...")
sample_delete_credential_entity(credential_entity.id)
``` |
{
"source": "jiricejchan/AnonymniAnalytici",
"score": 3
} |
#### File: algorithms/shared/btc-1500966539226.py
```python
from catalyst.api import order_target_percent, record, symbol, set_benchmark
def initialize(context):
context.ASSET_NAME = 'USDT_BTC'
context.asset = symbol(context.ASSET_NAME)
set_benchmark(context.asset)
# For all trading pairs in the poloniex bundle, the default denomination
# currently supported by Catalyst is 1/1000th of a full coin. Use this
# constant to scale the price of up to that of a full coin if desired.
context.TICK_SIZE = 1000.0
# Start this trading algorithm when market is bullish
context.i = 0
context.IS_MARKET_BEAR = False
def handle_data(context, data):
# Get price history for the last two months. Find peak, bottom, and last
# prices for the period
price_history = data.history(context.asset, fields='price', bar_count=60, frequency="1d")
peak = price_history.max()
bottom = price_history.min()
price = price_history.ix[-1]
# Trading logic:
# If current price is more than 20% lower than highest-closing price over a
# 2-month period, market enters Bear territory and algorithm sells all
# asset and holds only cash. Market exits bear market when prices are at
# least 20% higher than lowest-closing price over a 2-month period. In this
# case, algorithm invests 90% of portfolio in the asset.
if price < 0.75*peak :
context.IS_MARKET_BEAR = True
elif price > 1.2*bottom:
context.IS_MARKET_BEAR = False
if context.IS_MARKET_BEAR:
order_target_percent(
context.asset,
0.3,
)
else:
order_target_percent(
context.asset,
0.75,
)
Portfolio_cumulative_return = (context.portfolio.portfolio_value/context.portfolio.starting_cash-1)*100
# Save values for later inspection
record(price=price,
peak=peak,
bottom=bottom,
cash=context.portfolio.cash,
leverage=context.account.leverage,
Portfolio_cumulative_return=Portfolio_cumulative_return,
)
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
from os.path import basename
# Plot the portfolio and asset data.
ax1 = plt.subplot(221)
(context.TICK_SIZE * results[[
'price',
'peak',
'bottom',
]]).plot(ax=ax1)
ax1.set_ylabel('{asset} (USD)'.format(asset=context.ASSET_NAME))
trans = results.ix[[t != [] for t in results.transactions]]
buys = trans.ix[
[t[0]['amount'] > 0 for t in trans.transactions]
]
sells = trans.ix[
[t[0]['amount'] < 0 for t in trans.transactions]
]
ax1.plot(
buys.index,
context.TICK_SIZE * results.price[buys.index],
'^',
markersize=10,
color='g',
)
ax1.plot(
sells.index,
context.TICK_SIZE * results.price[sells.index],
'v',
markersize=10,
color='r',
)
ax2 = plt.subplot(222, sharex=ax1)
ax2.set_ylabel('Percent Return (%)')
results[[
'algorithm_period_return',
'benchmark_period_return',
]].plot(ax=ax2)
ax3 = plt.subplot(223, sharex=ax1)
results[['leverage']].plot(ax=ax3)
ax3.set_ylabel('Leverage ')
ax4 = plt.subplot(224, sharex=ax1)
results[['cash']].plot(ax=ax4)
ax4.set_ylabel('Cash (USD)')
plt.legend(loc=3)
# Show the plot.
plt.gcf().set_size_inches(16, 8)
plt.show()
# Save results in CSV file
filename = os.path.splitext(basename(sys.argv[3]))[0]
results.to_csv(filename + '.csv')
```
#### File: algorithms/shared/dynamic_rebalancing-1500964615447.py
```python
from catalyst.api import order_target_percent, record, symbol, cancel_order, get_open_orders
def initialize(context):
context.ASSET_NAME = 'USDT_ETH'
context.asset = symbol(context.ASSET_NAME)
# For all trading pairs in the poloniex bundle, the default denomination
# currently supported by Catalyst is 1/1000th of a full coin. Use this
# constant to scale the price of up to that of a full coin if desired.
context.TICK_SIZE = 1.0
def handle_data(context, data):
# Cancel any outstanding orders
orders = get_open_orders(context.asset) or []
for order in orders:
cancel_order(order)
# Define base price and make initial trades to achieve target investment ratio of 0.5
order_target_percent(
context.asset,
0.5,
)
# Retrieve current asset price from pricing data
price = data[context.asset].price
#Compute portfolio cumulative return
Portfolio_cumulative_return = (context.portfolio.portfolio_value/context.portfolio.starting_cash-1)*100
# Save values for later inspection
record(price=price,
cash=context.portfolio.cash,
leverage=context.account.leverage,
Portfolio_cumulative_return=Portfolio_cumulative_return
)
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
# Plot the portfolio and asset data.
ax1 = plt.subplot(221)
results[['Portfolio_cumulative_return']].plot(ax=ax1)
ax1.set_ylabel('Percent Return (%)')
ax2 = plt.subplot(222, sharex=ax1)
ax2.set_ylabel('{asset} (USD)'.format(asset=context.ASSET_NAME))
(context.TICK_SIZE * results[[
'price',
]]).plot(ax=ax2)
trans = results.ix[[t != [] for t in results.transactions]]
buys = trans.ix[
[t[0]['amount'] > 0 for t in trans.transactions]
]
sells = trans.ix[
[t[0]['amount'] < 0 for t in trans.transactions]
]
ax2.plot(
buys.index,
context.TICK_SIZE * results.price[buys.index],
'^',
markersize=10,
color='g',
)
ax2.plot(
sells.index,
context.TICK_SIZE * results.price[sells.index],
'v',
markersize=10,
color='r',
)
ax3 = plt.subplot(223, sharex=ax1)
results[['leverage']].plot(ax=ax3)
ax3.set_ylabel('Leverage ')
ax4 = plt.subplot(224, sharex=ax1)
results[['cash']].plot(ax=ax4)
ax4.set_ylabel('Cash (USD)')
plt.legend(loc=3)
# Show the plot.
plt.gcf().set_size_inches(16, 8)
plt.show()
```
#### File: algorithms/shared/mr_btc-1500963590682.py
```python
from catalyst.api import (
order_target_percent,
record,
symbol,
get_open_orders,
set_max_leverage,
schedule_function,
date_rules,
attach_pipeline,
pipeline_output,
)
from catalyst.pipeline import Pipeline
from catalyst.pipeline.data import CryptoPricing
from catalyst.pipeline.factors.crypto import SimpleMovingAverage
from catalyst.pipeline.factors.crypto import AnnualizedVolatility
import math
def initialize(context):
context.ASSET_NAME = 'USDT_BTC'
context.WINDOW= 30
# For all trading pairs in the poloniex bundle, the default denomination
# currently supported by Catalyst is 1/1000th of a full coin. Use this
# constant to scale the price of up to that of a full coin if desired.
context.TICK_SIZE = 1000.0
context.i = 0
context.asset = symbol(context.ASSET_NAME)
attach_pipeline(make_pipeline(context), 'mr_pipeline')
schedule_function(
rebalance,
date_rules.every_day(),
)
def before_trading_start(context, data):
context.pipeline_data = pipeline_output('mr_pipeline')
def make_pipeline(context):
return Pipeline(
columns={
'price': CryptoPricing.open.latest,
'sma': SimpleMovingAverage(
inputs=[CryptoPricing.close],
window_length=context.WINDOW,
),
'std': AnnualizedVolatility(
inputs=[CryptoPricing.close],
window_length=context.WINDOW,
annualization_factor=1,
),
}
)
def rebalance(context, data):
context.i += 1
# Skip first LONG_WINDOW bars to fill windows
if context.i < context.WINDOW:
return
# Get pipeline data for asset of interest
pipeline_data = context.pipeline_data
pipeline_data = pipeline_data[pipeline_data.index == context.asset].iloc[0]
# Compute the necessary statistics
sma = pipeline_data.sma
std = pipeline_data.std()
price = pipeline_data.price
# Compute buy and sell thresholds
# Buy threshold is the simple moving average value plus one standard dev.
# Sell threshold is the simple moving average value minus one standard dev.
buy_threshold = sma-std/math.sqrt(context.WINDOW)
sell_threshold = sma+std/math.sqrt(context.WINDOW)
# Check that the order has not already been placed
open_orders = get_open_orders()
if context.asset not in open_orders:
# check that the asset of interest can currently be traded
if data.can_trade(context.asset):
# Trading logic: if price is less than the buy threshold, mean
# reversion should drive price up. Algorithm invests 100% in the
# asset. In the opposite case, mean reversion should drive price
# down. Algorithm invests 50% in cash and 50% in the asset. If
# price is between buy and sell thresholds, algorithm invests 25%
# in cash and 75% in the asset.
if price < buy_threshold:
order_target_percent(
context.asset,
1.0,
)
elif price > sell_threshold:
order_target_percent(
context.asset,
0.5,
)
else:
order_target_percent(
context.asset,
0.75,
)
record(
price=price,
leverage=context.account.leverage,
sma=sma,
std=std,
buy_threshold=buy_threshold,
sell_threshold=sell_threshold,
)
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
# Plot the portfolio and asset data.
ax1 = plt.subplot(411)
results[['portfolio_value']].plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = plt.subplot(412, sharex=ax1)
ax2.set_ylabel('{asset} (USD)'.format(asset=context.ASSET_NAME))
(context.TICK_SIZE*results[['price', 'sma', 'buy_threshold','sell_threshold']]).plot(ax=ax2)
trans = results.ix[[t != [] for t in results.transactions]]
amounts = [t[0]['amount'] for t in trans.transactions]
buys = trans.ix[
[t[0]['amount'] > 0 for t in trans.transactions]
]
sells = trans.ix[
[t[0]['amount'] < 0 for t in trans.transactions]
]
ax2.plot(
buys.index,
context.TICK_SIZE * results.price[buys.index],
'^',
markersize=10,
color='g',
)
ax2.plot(
sells.index,
context.TICK_SIZE * results.price[sells.index],
'v',
markersize=10,
color='r',
)
ax3 = plt.subplot(413, sharex=ax1)
results[['leverage']].plot(ax=ax3)
ax3.set_ylabel('Leverage (USD)')
results[[
'algorithm',
'benchmark',
]] = results[[
'algorithm_period_return',
'benchmark_period_return',
]]
ax4 = plt.subplot(414, sharex=ax1)
results[[
'algorithm',
'benchmark',
]].plot(ax=ax4)
ax4.set_ylabel('Percent Change')
plt.legend(loc=3)
# Show the plot.
plt.gcf().set_size_inches(18, 8)
plt.show()
``` |
{
"source": "jiricodes/42-sudoku",
"score": 3
} |
#### File: tests/scripts/parser.py
```python
import sys
import os
def parse_input(input):
pass
def parse_output(output):
pass
def read_lines(filename):
file = open(filename, 'r')
lines = file.readlines()
file.close()
return lines
def main():
input_file=sys.argv[1]
output_file=sys.argv[2]
try:
raw_input=read_lines(filename)
if mode == "in":
elif mode == "out":
else:
print("Wrong mode")
exit()
if __name__ == "__main__":
main()
``` |
{
"source": "jiridanek/qpid-dispatch",
"score": 2
} |
#### File: qpid-dispatch/tests/system_tests_failover_list.py
```python
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, TestTimeout
from system_test import unittest
from proton.handlers import MessagingHandler
from proton.reactor import Container
class RouterTest(TestCase):
@classmethod
def setUpClass(cls):
"""Start a router"""
super(RouterTest, cls).setUpClass()
def router(name):
config = [
('router', {'mode': 'standalone', 'id': name}),
('listener', {'port': cls.tester.get_port()}),
# failoverList has been deprecated. We are using it here to test backward compatibility.
('listener', {'port': cls.tester.get_port(), 'failoverList': 'other-host:25000'}),
('listener', {'port': cls.tester.get_port(), 'failoverUrls': 'second-host:25000, amqps://third-host:5671'})
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
router('A')
cls.routers[0].wait_ready()
def test_01_no_failover_list(self):
test = FailoverTest(self.routers[0].addresses[0], 0)
test.run()
self.assertIsNone(test.error)
def test_02_single_failover_host(self):
test = FailoverTest(self.routers[0].addresses[1], 1, [{'network-host': 'other-host', 'port': '25000'}])
test.run()
self.assertIsNone(test.error)
def test_03_double_failover_host(self):
test = FailoverTest(self.routers[0].addresses[2], 2,
[{'network-host': 'second-host', 'port': '25000'}, {'scheme': 'amqps', 'network-host': 'third-host', 'port': '5671'}])
test.run()
self.assertIsNone(test.error)
class FailoverTest(MessagingHandler):
def __init__(self, host, count, elements=[]):
super(FailoverTest, self).__init__()
self.host = host
self.count = count
self.elements = elements
self.conn = None
self.error = None
def timeout(self):
self.error = "Timeout Expired"
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.conn = event.container.connect(self.host)
def on_connection_opened(self, event):
properties = event.connection.remote_properties
fol = None
try:
fol = properties['failover-server-list']
except:
fol = None
if self.count == 0:
if fol is not None and fol != []:
self.error = "Expected no failover-list, got: %r" % fol
elif fol.__class__ != list:
self.error = "Expected list, got: %r" % fol.__class__
elif self.count != len(fol):
self.error = "Expected list of size %d, got size %d" % (self.count, len(fol))
for i in range(self.count):
got = fol[i]
want = self.elements[i]
if got != want:
self.error = "Expected %r, got %r" % (want, got)
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
```
#### File: qpid-dispatch/tests/system_tests_user_id_proxy.py
```python
import os
from system_test import TestCase, Qdrouterd, DIR, main_module
from system_test import unittest
import proton
from proton import SSLDomain, Delivery
from proton.utils import BlockingConnection
from qpid_dispatch_internal.compat import BINARY
class QdSSLUseridTest(TestCase):
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
@classmethod
def setUpClass(cls):
super(QdSSLUseridTest, cls).setUpClass()
ssl_profile1_json = os.path.join(DIR, 'displayname_files', 'profile_names1.json')
ssl_profile2_json = os.path.join(DIR, 'displayname_files', 'profile_names2.json')
policy_config_path = os.path.join(DIR, 'policy-4')
config = Qdrouterd.Config([
('router', {'id': 'QDR', 'workerThreads': 1}),
('policy', {'maxConnections': 20, 'policyDir': policy_config_path, 'enableVhostPolicy': 'true'}),
# sha1
('sslProfile', {'name': 'server-ssl1',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1',
'password': '<PASSWORD>'}),
# sha256
('sslProfile', {'name': 'server-ssl2',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '2',
'password': '<PASSWORD>'}),
# sha512
('sslProfile', {'name': 'server-ssl3',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '5',
'password': '<PASSWORD>'}),
# sha256 combination
('sslProfile', {'name': 'server-ssl4',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '2noucs',
'password': '<PASSWORD>'}),
# sha1 combination
('sslProfile', {'name': 'server-ssl5',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1cs',
'password': '<PASSWORD>'}),
# sha512 combination
('sslProfile', {'name': 'server-ssl6',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': 'cs5',
'password': '<PASSWORD>'}),
# no fingerprint field
('sslProfile', {'name': 'server-ssl7',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': 'nsuco',
'password': '<PASSWORD>'}),
# no fingerprint field variation
('sslProfile', {'name': 'server-ssl8',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': 'scounl',
'password': '<PASSWORD>'}),
# no uidFormat
('sslProfile', {'name': 'server-ssl9',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'password': '<PASSWORD>'}),
# one component of uidFormat is invalid (x), the unrecognized component will be ignored,
# this will be treated like 'uidFormat': '1'
('sslProfile', {'name': 'server-ssl10',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1x',
'uidNameMappingFile': ssl_profile2_json,
'password': '<PASSWORD>'}),
# All components in the uidFormat are unrecognized, pn_get_transport_user will be returned
('sslProfile', {'name': 'server-ssl11',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': 'abxd',
'password': '<PASSWORD>'}),
('sslProfile', {'name': 'server-ssl12',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1',
'uidNameMappingFile': ssl_profile1_json,
'password': '<PASSWORD>'}),
# should translate a display name
('sslProfile', {'name': 'server-ssl13',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '2',
# displayNameFile has been deprecated. We are using it here to test backward compatibility.
'displayNameFile': ssl_profile2_json,
'password': '<PASSWORD>'}),
('sslProfile', {'name': 'server-ssl14',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1',
'uidNameMappingFile': ssl_profile1_json,
'password': '<PASSWORD>'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl1', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl2', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl3', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl4', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl5', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl6', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl7', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl8', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl9', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl10', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl11', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
# peer is not being authenticated here. the user must "anonymous" which is what pn_transport_get_user
# returns
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl12', 'authenticatePeer': 'no',
'requireSsl': 'yes', 'saslMechanisms': 'ANONYMOUS'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl13', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl14', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'authenticatePeer': 'no'})
])
cls.router = cls.tester.qdrouterd('ssl-test-router', config, wait=True)
def address(self, index):
return self.router.addresses[index]
def create_ssl_domain(self, ssl_options_dict, mode=SSLDomain.MODE_CLIENT):
"""Return proton.SSLDomain from command line options or None if no SSL options specified.
@param opts: Parsed optoins including connection_options()
"""
certificate, key, trustfile, password = ssl_options_dict.get('ssl-certificate'), \
ssl_options_dict.get('ssl-key'), \
ssl_options_dict.get('ssl-trustfile'), \
ssl_options_dict.get('ssl-password')
if not (certificate or trustfile):
return None
domain = SSLDomain(mode)
if trustfile:
domain.set_trusted_ca_db(str(trustfile))
domain.set_peer_authentication(SSLDomain.VERIFY_PEER, str(trustfile))
if certificate:
domain.set_credentials(str(certificate), str(key), str(password))
return domain
class QdSSLUseridProxy(QdSSLUseridTest):
def test_message_user_id_proxy_bad_name_disallowed(self):
ssl_opts = dict()
ssl_opts['ssl-trustfile'] = self.ssl_file('ca-certificate.pem')
ssl_opts['ssl-certificate'] = self.ssl_file('client-certificate.pem')
ssl_opts['ssl-key'] = self.ssl_file('client-private-key.pem')
ssl_opts['ssl-password'] = '<PASSWORD>'
# create the SSL domain object
domain = self.create_ssl_domain(ssl_opts)
# Send a message with bad user_id. This message should be rejected.
# Connection has user_id 'user13'.
addr = self.address(13).replace("amqp", "amqps")
blocking_connection = BlockingConnection(addr, ssl_domain=domain)
blocking_sender = blocking_connection.create_sender("$management")
request = proton.Message()
request.user_id = BINARY("bad-user-id")
result = Delivery.ACCEPTED
try:
delivery = blocking_sender.send(request, timeout=10)
result = delivery.remote_state
except proton.utils.SendException as e:
result = e.state
self.assertTrue(result == Delivery.REJECTED,
"Router accepted a message with user_id that did not match connection user_id")
def test_message_user_id_proxy_zzz_credit_handled(self):
# Test for DISPATCH-519. Make sure the REJECTED messages result
# in the client receiving credit.
credit_limit = 250 # router issues 250 credits
ssl_opts = dict()
ssl_opts['ssl-trustfile'] = self.ssl_file('ca-certificate.pem')
ssl_opts['ssl-certificate'] = self.ssl_file('client-certificate.pem')
ssl_opts['ssl-key'] = self.ssl_file('client-private-key.pem')
ssl_opts['ssl-password'] = '<PASSWORD>'
# create the SSL domain object
domain = self.create_ssl_domain(ssl_opts)
# Send a message with bad user_id. This message should be rejected.
# Connection has user_id 'user13'.
addr = self.address(13).replace("amqp", "amqps")
blocking_connection = BlockingConnection(addr, ssl_domain=domain)
blocking_sender = blocking_connection.create_sender("$management")
request = proton.Message()
request.user_id = BINARY("bad-user-id")
for i in range(0, credit_limit + 1):
result = Delivery.ACCEPTED
try:
delivery = blocking_sender.send(request, timeout=10)
result = delivery.remote_state
except proton.utils.SendException as e:
result = e.state
except proton.utils.Timeout as e:
self.fail("Timed out waiting for send credit")
self.assertTrue(result == Delivery.REJECTED,
"Router accepted a message with user_id that did not match connection user_id")
if __name__ == '__main__':
unittest.main(main_module())
```
#### File: qpid-dispatch/tests/TCP_echo_client.py
```python
import argparse
import selectors
import signal
import socket
import sys
from threading import Thread
import time
import traceback
from system_test import Logger
from system_test import TIMEOUT
class GracefulExitSignaler:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
def split_chunk_for_display(raw_bytes):
"""
Given some raw bytes, return a display string
Only show the beginning and end of largish (2xMAGIC_SIZE) arrays.
:param raw_bytes:
:return: display string
"""
MAGIC_SIZE = 50 # Content repeats after chunks this big - used by echo client, too
if len(raw_bytes) > 2 * MAGIC_SIZE:
result = repr(raw_bytes[:MAGIC_SIZE]) + " ... " + repr(raw_bytes[-MAGIC_SIZE:])
else:
result = repr(raw_bytes)
return result
class TcpEchoClient:
def __init__(self, prefix, host, port, size, count, timeout, logger):
"""
:param host: connect to this host
:param port: connect to this port
:param size: size of individual payload chunks in bytes
:param count: number of payload chunks
:param strategy: "1" Send one payload; # TODO more strategies
Recv one payload
:param logger: Logger() object
:return:
"""
# Start up
self.sock = None
self.prefix = prefix
self.host = host
self.port = int(port)
self.size = size
self.count = count
self.timeout = timeout
self.logger = logger
self.keep_running = True
self.is_running = False
self.exit_status = None
self.error = None
self._thread = Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
def run(self):
self.logger.log("%s Client is starting up" % self.prefix)
try:
start_time = time.time()
self.is_running = True
self.logger.log('%s Connecting to host:%s, port:%d, size:%d, count:%d' %
(self.prefix, self.host, self.port, self.size, self.count))
total_sent = 0
total_rcvd = 0
if self.count > 0 and self.size > 0:
# outbound payload only if count and size both greater than zero
payload_out = []
out_list_idx = 0 # current _out array being sent
out_byte_idx = 0 # next-to-send in current array
out_ready_to_send = True
# Generate unique content for each message so you can tell where the message
# or fragment belongs in the whole stream. Chunks look like:
# b'[localhost:33333:6:0]ggggggggggggggggggggggggggggg'
# host: localhost
# port: 33333
# index: 6
# offset into message: 0
CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo server, too
for idx in range(self.count):
body_msg = ""
padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
while len(body_msg) < self.size:
chunk = "[%s:%d:%d:%d]" % (self.host, self.port, idx, len(body_msg))
padlen = CONTENT_CHUNK_SIZE - len(chunk)
chunk += padchar * padlen
body_msg += chunk
if len(body_msg) > self.size:
body_msg = body_msg[:self.size]
payload_out.append(bytearray(body_msg.encode()))
# incoming payloads
payload_in = []
in_list_idx = 0 # current _in array being received
for i in range(self.count):
payload_in.append(bytearray())
else:
# when count or size .LE. zero then just connect-disconnect
self.keep_running = False
# set up connection
host_address = (self.host, self.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(host_address)
self.sock.setblocking(False)
# set up selector
sel = selectors.DefaultSelector()
sel.register(self.sock,
selectors.EVENT_READ | selectors.EVENT_WRITE)
# event loop
while self.keep_running:
if self.timeout > 0.0:
elapsed = time.time() - start_time
if elapsed > self.timeout:
self.exit_status = "%s Exiting due to timeout. Total sent= %d, total rcvd= %d" % \
(self.prefix, total_sent, total_rcvd)
break
for key, mask in sel.select(timeout=0.1):
sock = key.fileobj
if mask & selectors.EVENT_READ:
recv_data = sock.recv(1024)
if recv_data:
total_rcvd = len(recv_data)
payload_in[in_list_idx].extend(recv_data)
if len(payload_in[in_list_idx]) == self.size:
self.logger.log("%s Rcvd message %d" % (self.prefix, in_list_idx))
in_list_idx += 1
if in_list_idx == self.count:
# Received all bytes of all chunks - done.
self.keep_running = False
# Verify the received data
if not payload_in == payload_out:
for idxc in range(self.count):
if not payload_in[idxc] == payload_out[idxc]:
for idxs in range(self.size):
ob = payload_out[idxc][idxs]
ib = payload_in[idxc][idxs]
if ob != ib:
self.error = "%s ERROR Rcvd message verify fail. row:%d, col:%d, " \
"expected:%s, actual:%s" \
% (self.prefix, idxc, idxs, repr(ob), repr(ib))
break
else:
out_ready_to_send = True
sel.modify(sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
elif len(payload_in[in_list_idx]) > self.size:
self.error = "ERROR Received message too big. Expected:%d, actual:%d" % \
(self.size, len(payload_in[in_list_idx]))
break
else:
pass # still accumulating a message
else:
# socket closed
self.keep_running = False
if not in_list_idx == self.count:
self.error = "ERROR server closed. Echoed %d of %d messages." % (in_list_idx, self.count)
if self.keep_running and mask & selectors.EVENT_WRITE:
if out_ready_to_send:
n_sent = self.sock.send(payload_out[out_list_idx][out_byte_idx:])
total_sent += n_sent
out_byte_idx += n_sent
if out_byte_idx == self.size:
self.logger.log("%s Sent message %d" % (self.prefix, out_list_idx))
out_byte_idx = 0
out_list_idx += 1
sel.modify(self.sock, selectors.EVENT_READ) # turn off write events
out_ready_to_send = False # turn on when rcvr receives
else:
pass # logger.log("DEBUG: ignoring EVENT_WRITE")
# shut down
sel.unregister(self.sock)
self.sock.close()
except Exception:
self.error = "ERROR: exception : '%s'" % traceback.format_exc()
self.sock.close()
self.is_running = False
def wait(self, timeout=TIMEOUT):
self.logger.log("%s Client is shutting down" % self.prefix)
self.keep_running = False
self._thread.join(timeout)
def main(argv):
retval = 0
# parse args
p = argparse.ArgumentParser()
p.add_argument('--host', '-b',
help='Required target host')
p.add_argument('--port', '-p', type=int,
help='Required target port number')
p.add_argument('--size', '-s', type=int, default=100, const=1, nargs='?',
help='Size of payload in bytes must be >= 0. Size of zero connects and disconnects with no data traffic.')
p.add_argument('--count', '-c', type=int, default=1, const=1, nargs='?',
help='Number of payloads to process must be >= 0. Count of zero connects and disconnects with no data traffic.')
p.add_argument('--name',
help='Optional logger prefix')
p.add_argument('--timeout', '-t', type=float, default=0.0, const=1, nargs="?",
help='Timeout in seconds. Default value "0" disables timeouts')
p.add_argument('--log', '-l',
action='store_true',
help='Write activity log to console')
del argv[0]
args = p.parse_args(argv)
# host
if args.host is None:
raise Exception("User must specify a host")
host = args.host
# port
if args.port is None:
raise Exception("User must specify a port number")
port = args.port
# size
if args.size < 0:
raise Exception("Size must be greater than or equal to zero")
size = args.size
# count
if args.count < 0:
raise Exception("Count must be greater than or equal to zero")
count = args.count
# name / prefix
prefix = args.name if args.name is not None else "ECHO_CLIENT (%d_%d_%d)" % \
(port, size, count)
# timeout
if args.timeout < 0.0:
raise Exception("Timeout must be greater than or equal to zero")
signaller = GracefulExitSignaler()
logger = None
try:
# logging
logger = Logger(title="%s host:%s port %d size:%d count:%d" % (prefix, host, port, size, count),
print_to_console=args.log,
save_for_dump=False)
client = TcpEchoClient(prefix, host, port, size, count, args.timeout, logger)
keep_running = True
while keep_running:
time.sleep(0.1)
if client.error is not None:
logger.log("%s Client stopped with error: %s" % (prefix, client.error))
keep_running = False
retval = 1
if client.exit_status is not None:
logger.log("%s Client stopped with status: %s" % (prefix, client.exit_status))
keep_running = False
if signaller.kill_now:
logger.log("%s Process killed with signal" % prefix)
keep_running = False
if keep_running and not client.is_running:
logger.log("%s Client stopped with no error or status" % prefix)
keep_running = False
except Exception:
client.error = "ERROR: exception : '%s'" % traceback.format_exc()
if logger is not None:
logger.log("%s Exception: %s" % (prefix, traceback.format_exc()))
retval = 1
if client.error is not None:
# write client errors to stderr
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
elines = client.error.split("\n")
for line in elines:
eprint("ERROR:", prefix, line)
return retval
if __name__ == "__main__":
sys.exit(main(sys.argv))
``` |
{
"source": "jiridanek/YamlConfiger",
"score": 2
} |
#### File: YamlConfiger/amqcfg/profiles.py
```python
import itertools
import logging
import os
import yaml
from jinja2 import Environment, FileSystemLoader
from .exceptions import ProfileError, TemplateError
from .files import select_profile_file, get_profiles_path
LOG = logging.getLogger(__name__)
def load_tuning_files(tuning_files=None):
"""Load tuning data from requested tuning files in order and
provides list of tuning data for further processing.
:param tuning_files: list of tuning files names
:type tuning_files: list[str] | None
:return: list of tuning data loaded from yaml tuning files
:rtype: list[dict]
"""
tuning_values_list = []
if tuning_files:
for tuning_file in tuning_files:
try:
tuning_values_list.append(yaml.load(open(tuning_file, 'r')))
except IOError as exc:
raise ProfileError(
'Unable to open tuning file "{}" {}'.format(
tuning_file, exc
)
)
except yaml.YAMLError as exc:
raise ProfileError(
'Unable to parse YAML tuning file "%s" %s'.format(
tuning_files, exc
)
)
LOG.debug('Tuning file {} loaded', tuning_file)
else:
LOG.debug('No tuning files requested.')
return tuning_values_list
def load_tuning(profile_defaults=None, tuning_files_list=None,
tuning_data_list=None):
"""Load and apply all tuning, from profile defaults, from tuning
files, and then directly provided tuning data. If provided.
All data is applied in order.
:param profile_defaults: profile defaults data to be tuned
:type profile_defaults: dict | None
:param tuning_files_list: list of tuning files names
:type tuning_files_list: list[str] | None
:param tuning_data_list: list of tuning data directly provided
:type tuning_data_list: list[dict] | None
:return: compound overlaid tuning data in order of appearance
:rtype: dict
"""
result = {}
files_tuning_values = load_tuning_files(tuning_files_list)
if profile_defaults:
result.update(profile_defaults)
if tuning_data_list is None:
tuning_data_list = []
for tuning_data in itertools.chain(files_tuning_values, tuning_data_list):
result.update(tuning_data)
return result
def get_tuned_profile(profile, tuning_files_list=None, tuning_data_list=None):
"""Get selected profile and use tuning data to fine tune
it's variable values.
:param profile: profile name (packaged) or path to profile
(user specified)
:type profile: str
:param tuning_files_list: list of files with tuning data to be used
:type tuning_files_list: list[str] | None
:param tuning_data_list: data used to tune the variable values.
:type tuning_data_list: list[dict] | None
:raises ProfileError: when tuned profile is not valid.
:return: compound tuned config data, and tuned profile yaml
:rtype: dict, str
"""
tuning_data = load_tuning(
profile_defaults=load_profile_defaults(profile),
tuning_files_list=tuning_files_list,
tuning_data_list=tuning_data_list,
)
tuning_profile = get_profile_template(profile)
tuning_data['profile_path'] = tuning_profile.name
tuned_profile = tuning_profile.render(tuning_data)
try:
config_data = yaml.load(stream=tuned_profile, Loader=yaml.SafeLoader)
except yaml.YAMLError as exc:
raise ProfileError(
'Unable to parse tuned profile "{}" {}'.format(
profile, exc
)
)
return config_data, tuned_profile
def load_profile_defaults(profile):
"""Load defaults variables from a profile if available
.. note: profile will be rendered as scratch without any values
to be able to be loaded as valid yaml.
:param profile: profile name (from package or from user)
:type profile: str
:return: defaults values mapping, if not available then empty dict
:rtype: dict
"""
# scratch render of profile template for _defaults extraction
scratch_profile = get_profile_template(profile)
scratch_profile = scratch_profile.render()
tmp_data = yaml.load(stream=scratch_profile, Loader=yaml.SafeLoader)
tuning_data = tmp_data.get('_defaults', {})
LOG.debug('Tuning data: %s', tuning_data)
return tuning_data
def get_profile_template(profile_name):
"""Get a jinja2 template via env generated for selected profile
(for fine-tuning of profile)
:param profile_name: name of template set
(alternatively path to user specified template set)
:type profile_name: str
:return: jinja2 profile template for fine tuning template
:rtype: Template
"""
selected_template_name, selected_template_path = \
select_profile_file(profile_name)
if not os.path.isdir(selected_template_path):
raise TemplateError(
'Unable to load requested profile location "%s"' % profile_name
)
env = Environment(
loader=FileSystemLoader([
selected_template_path, # selected template
get_profiles_path(),
]),
trim_blocks=True,
lstrip_blocks=True,
)
template = env.get_template(selected_template_name)
return template
```
#### File: test/amqcfg_batch/test_generate_all_profiles.py
```python
import mock
import pytest
import amqcfg
from amqcfg_batch.amqcfg_batch import generate_all_profiles, GenerateData
from amqcfg_batch.exceptions import AmqcfgBatchException
@mock.patch('amqcfg.amqcfg.generate', mock.Mock())
def test_no_profile_name(*_):
input_path = ''
output_path = ''
default = GenerateData()
common = GenerateData()
profile_file_data = {
'service': {'pass': True}
}
with pytest.raises(AmqcfgBatchException):
generate_all_profiles(
input_path,
output_path,
default,
common,
profile_file_data
)
@mock.patch('amqcfg.amqcfg.generate', mock.Mock())
def test_basic(*_):
input_path = ''
output_path = ''
default = GenerateData()
common = GenerateData()
profile_file_data = {
'service': {
'profile': 'test',
}
}
generate_all_profiles(
input_path,
output_path,
default,
common,
profile_file_data
)
# noinspection PyUnresolvedReferences
amqcfg.amqcfg.generate.assert_called_with(
profile='test',
template=None,
output_path=None,
tuning_files_list=None,
tuning_data_list=None,
)
@mock.patch('amqcfg.amqcfg.generate', mock.Mock())
def test_advanced(*_):
input_path = ''
output_path = 'test'
default = GenerateData()
default.profile_name = 'test2'
common = GenerateData()
profile_file_data = {
'_default': {
'profile': 'test2',
},
'service': {
'profile': 'test',
},
'service2': {
'tuning_files': ['a']
},
'service3': {
'template': 'My Template',
'tuning': {
'a': 1
}
}
}
generate_all_profiles(
input_path,
output_path,
default,
common,
profile_file_data
)
import os
calls = [
mock.call(profile='test', template=None, output_path=os.path.join('test','service'),
tuning_files_list=None, tuning_data_list=None),
mock.call(profile='test2', template=None, output_path=os.path.join('test','service2'),
tuning_files_list=['a'], tuning_data_list=None),
mock.call(profile='test2', template='My Template',
output_path=os.path.join('test','service3'), tuning_files_list=None,
tuning_data_list=[{'a': 1}]),
]
# noinspection PyUnresolvedReferences
amqcfg.amqcfg.generate.assert_has_calls(calls, any_order=True)
```
#### File: test/amqcfg_batch/test_generate.py
```python
import mock
import amqcfg_batch
from amqcfg_batch.amqcfg_batch import generate
from .fakes import (
fake_iter_gen_profiles_one,
fake_iter_gen_profiles_two,
fake_common_one,
fake_default_one,
fake_common_two,
fake_default_two,
)
@mock.patch('amqcfg_batch.amqcfg_batch.generate_all_profiles', mock.Mock())
@mock.patch('amqcfg_batch.amqcfg_batch.iter_gen_profiles',
fake_iter_gen_profiles_one)
def test_one(*_):
input_files = ['a/b.yaml']
generate(input_files)
# noinspection PyUnresolvedReferences
amqcfg_batch.amqcfg_batch.generate_all_profiles.assert_called_with(
'a',
None,
fake_default_one,
fake_common_one,
next(fake_iter_gen_profiles_one(None))
)
@mock.patch('amqcfg_batch.amqcfg_batch.generate_all_profiles', mock.Mock())
@mock.patch('amqcfg_batch.amqcfg_batch.iter_gen_profiles',
fake_iter_gen_profiles_two)
def test_two(*_):
input_files = ['a/b.yaml']
generate(input_files)
profile_data = list(fake_iter_gen_profiles_two(None))
calls = [
mock.call('a', None, fake_default_one,
fake_common_one, profile_data[0]),
mock.call('a', None, fake_default_two,
fake_common_two, profile_data[1]),
]
# noinspection PyUnresolvedReferences
amqcfg_batch.amqcfg_batch.generate_all_profiles.assert_has_calls(
calls
)
@mock.patch('amqcfg_batch.amqcfg_batch.generate_all_profiles', mock.Mock())
@mock.patch('amqcfg_batch.amqcfg_batch.iter_gen_profiles',
fake_iter_gen_profiles_two)
def test_two_files_two(*_):
input_files = ['a/b.yaml', 'c/d.yaml']
generate(input_files)
profile_data = list(fake_iter_gen_profiles_two(None))
calls = [
mock.call('a', None, fake_default_one,
fake_common_one, profile_data[0]),
mock.call('a', None, fake_default_two,
fake_common_two, profile_data[1]),
mock.call('c', None, fake_default_one,
fake_common_one, profile_data[0]),
mock.call('c', None, fake_default_two,
fake_common_two, profile_data[1]),
]
# noinspection PyUnresolvedReferences
amqcfg_batch.amqcfg_batch.generate_all_profiles.assert_has_calls(
calls
)
```
#### File: amqcfg/config_data/test_metadata.py
```python
import pytest
import amqcfg.config_data
dataset_metadata_members = (
'tool_name',
'datetime',
)
@pytest.mark.parametrize('member', dataset_metadata_members)
def test_add_template_metadata_check_member(member):
data = {}
amqcfg.config_data.add_template_metadata(data)
assert member in data['metadata']
dataset_metadata_datetime_members = (
'datetime',
'year',
'time',
'date',
'unix',
)
@pytest.mark.parametrize('member', dataset_metadata_datetime_members)
def test_add_template_metadata_datetime_check_member(member):
data = {}
amqcfg.config_data.add_template_metadata(data)
assert member in data['metadata']['datetime']
``` |
{
"source": "jirifilip/CBA",
"score": 3
} |
#### File: pyarc/algorithms/m1algorithm.py
```python
import collections
from .rule_algorithm import RuleBuilderAlgorithm
from .classifier import Classifier
import time
import random
class M1Algorithm(RuleBuilderAlgorithm):
""" M1 Algorithm implementation.
"""
def build(self):
# list for storing rules to be used in the classifier
classifier = []
# list for storing default classes associated
# with rules in the classifier
default_classes = []
# list for storing errors of said default classes
default_classes_errors = []
# list for storing rule errors from classifier
rule_errors = []
# list for storing total errors
# (rule_errors + default_classes_errors)
total_errors = []
# class distribution
# for calculating the default's rule confidence
# and support
class_distribution = collections.Counter(self.y)
classdist_keys = list(class_distribution.keys())
# sorting rules based on the precedence operator
self.rules.sort(reverse=True)
# converting TransactionDB to a set
# so that set intersection and difference can be used
dataset = set(self.dataset)
# obtaining the set's length. We do this only once to
# save processing time.
# this is a constant variable
dataset_len = len(dataset)
# When we want to update the dataset_len, we use
# this variable. Length is updated by subtracting
# absolute support of a rule from it
dataset_len_updated = dataset_len
for rule in self.rules:
# if all data cases have been covered
# break the loop to save time
if (dataset_len_updated <= 0):
break
# temp serves for storing datacases
# that have been covered by current rule
temp = set()
# temp len is for determining temp's length
# without using len(temp) to save time
temp_len = 0
# number of rule that satisfy both antecedent
# and consequent of the current rule
temp_satisfies_conseq_cnt = 0
for datacase in dataset:
# if datacase satisfies rule's antecedent
# we'll store it in temp and increment
# temp's len
if rule.antecedent <= datacase:
temp.add(datacase)
temp_len += 1
# we'll mark the rule if datacase
# satisfies its consequent. And increment
# the counter
if rule.consequent == datacase.class_val:
temp_satisfies_conseq_cnt += 1
rule.marked = True
# if rule satisfied at least one consequent
if rule.marked:
classifier.append(rule)
# we subtract already covered rules
# from dataset
dataset -= temp
# and update dataset's length
dataset_len_updated -= temp_len
# we'll obtain Counter of remaining class values
# in the dataset using map to save time
class_distribution = collections.Counter(map(lambda d: d.class_val.value, dataset))
# the most common value from the counter will be
# the default class
most_common_tuple = class_distribution.most_common(1)
# here we'll do some checking in case
# the counter is empty
most_common_cnt = 0
most_common_label = "None"
try:
most_common_tuple = most_common_tuple[0]
most_common_cnt = most_common_tuple[1]
most_common_label = most_common_tuple[0]
except IndexError:
pass
# the most common label will be inserted into
# the list
default_classes.append(most_common_label)
# number of errors the rule will make =>
#
# difference of:
# all transactions that satisfy its antecedent
# and
# all transactions that satisfy both antecedent and consequent
rule_errors.append(temp_len - temp_satisfies_conseq_cnt)
# default errors
#
# difference of:
# length of remaining dataset
# and
# count of most common class
dflt_class_err = dataset_len_updated - most_common_cnt
err_cnt = dflt_class_err
default_classes_errors.append(err_cnt)
total_errors.append(err_cnt + sum(rule_errors))
# finding the smallest number of errors
# but checking if at least one rule classified an instance
if len(total_errors) != 0:
min_errors = min(total_errors)
# finding the index of smallest number of errors
idx_to_cut = total_errors.index(min_errors)
final_classifier = classifier[:idx_to_cut+1]
default_class = default_classes[idx_to_cut]
# creating the final classifier
clf = Classifier()
clf.rules = final_classifier
clf.default_class = default_class
clf.default_class_attribute = classdist_keys[0][0]
else:
clf = Classifier()
clf.rules = []
possible_default_classes = list(class_distribution)
random_class_idx = random.randrange(0, len(possible_default_classes))
default_class_att, default_class_value = classdist_keys[random_class_idx]
clf.default_class = default_class_value
clf.default_class_attribute = default_class_att
self.calculate_default_class_properties(clf)
return clf
```
#### File: pyarc/algorithms/rule_algorithm.py
```python
from collections import Counter
from ..data_structures import ClassAssocationRule, Antecedent, Consequent
class RuleBuilderAlgorithm:
"""Common ancestor for M1 and M2 Algorithms
to provide common interface.
"""
def __init__(self, rules, dataset):
self.rules = rules
self.dataset = dataset
self.y = dataset.class_labels
def update_class_distr(self, classdist, rule):
return classdist - rule.class_cases_covered
def calculate_default_class_properties(self, clf):
"""This function is used for calculating
default class support and confidence
"""
default_class = clf.default_class
class_distribution = Counter([ value for _, value in self.y])
clf.default_class_support = class_distribution[default_class] / len(self.y)
clf.default_class_confidence = class_distribution[default_class] / len(self.y)
default_rule_ant = Antecedent({})
default_rule_conseq = Consequent(clf.default_class_attribute, clf.default_class)
clf.default_rule = ClassAssocationRule(
default_rule_ant,
default_rule_conseq,
clf.default_class_support,
clf.default_class_confidence
)
```
#### File: pyarc/data_structures/car.py
```python
import collections
class ClassAssocationRule():
"""ClassAssociationRule (CAR) is defined by its antecedent, consequent,
support, confidence and id.
It has a set of Items in its antecedent and one Item in its
Consequent.
__lt__ and __gt__ operators are overriden so that list of CARs can
be sorted.
Parameters
----------
antecedent: Antecedent
Items that a Transaction has to satisfy
consequent: Consequent
Target class of a Transaction that satisfies
antecedent
support: float
how many transactions satisfy the rule, relatively
confidence: float
relative degree of certainty that consequent holds
given antecedent
Attributes
----------
antecedent
conseqent
support
confidence
rid: int
rule id
support_count: int
absolute support count
marked: bool
class_cases_covered: collections.Counter
counter for determining which transactions are
covered by the antecedent. Important for M2Algorithm.
replace: set of ClassAssociationRule
set of rules that have higher precedence than
this rule and can replace it in M2Algorithm.
"""
id = 0
def __init__(self, antecedent, consequent, support, confidence):
self.antecedent = antecedent
self.consequent = consequent
self.support = support
self.confidence = confidence
self.rulelen = len(antecedent) + 1
self.rid = ClassAssocationRule.id
ClassAssocationRule.id += 1
self.support_count = 0
self.marked = False
self.class_cases_covered = collections.Counter()
self.replace = set()
def __gt__(self, other):
"""
precedence operator. Determines if this rule
has higher precedence. Rules are sorted according
to their confidence, support, length and id.
"""
if (self.confidence > other.confidence):
return True
elif (self.confidence == other.confidence and
self.support > other.support):
return True
elif (self.confidence == other.confidence and
self.support == other.support and
self.rulelen < other.rulelen):
return True
elif(self.confidence == other.confidence and
self.support == other.support and
self.rulelen == other.rulelen and
self.rid < other.rid):
return True
else:
return False
def __lt__(self, other):
"""
rule precedence operator
"""
return not self > other
def __len__(self):
"""
returns
-------
length of this rule
"""
return len(self.antecedent) + len(self.consequent)
def __repr__(self):
args = [self.antecedent.string(), "{" + self.consequent.string() + "}", self.support, self.confidence, self.rulelen, self.rid]
text = "CAR {} => {} sup: {:.2f} conf: {:.2f} len: {}, id: {}".format(*args)
return text
```
#### File: pyarc/utils/plotting.py
```python
from ..data_structures import TransactionDB
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
import numpy
import re
movies = pd.read_csv("../data/movies.csv", sep=";")
movies_discr = movies.copy(True)
budget_bins = range(0, 350, 50)
budget_bins_names = [ "<{0};{1})".format(i, i + 50) for i in budget_bins[:-1] ]
celebrities_bins = range(0, 10, 2)
celebrities_bins_names = [ "<{0};{1})".format(i, i + 2) for i in celebrities_bins[:-1] ]
transactionDB = TransactionDB.from_DataFrame(movies_discr, unique_transactions=True)
movies_vals = movies.get_values()
x = range(0, 350, 50)
y = range(1, 9)
x_points = list(map(lambda n: n[0], movies_vals))
y_points = list(map(lambda n: n[1], movies_vals))
data_class = list(movies['class'])
appearance = {
'box-office-bomb': ('brown', "o"),
'main-stream-hit': ('blue', "o"),
'critical-success': ('green', "o")
}
rule_appearance = {
'box-office-bomb': 'tan',
'main-stream-hit': 'aqua',
'critical-success': 'lightgreen'
}
plt.style.use('seaborn-white')
def plot_qrule(qrule, plt):
interval_regex = "(?:<|\()(\d+(?:\.(?:\d)+)?);(\d+(?:\.(?:\d)+)?)(?:\)|>)"
lower_y = 0
area_y = celebrities_bins[-1]
lower_x = 0
area_x = budget_bins[-1]
antecedent = qrule.antecedent
if len(antecedent) != 0:
if antecedent[0][0] == "a-list-celebrities":
y = antecedent[0]
y_boundaries = re.search(interval_regex, repr(y[1]))
lower_y = float(y_boundaries.group(1))
upper_y = float(y_boundaries.group(2))
area_y = upper_y - lower_y
axis = plt.gca()
else:
x = antecedent[0]
x_boundaries = re.search(interval_regex, repr(x[1]))
lower_x = float(x_boundaries.group(1))
upper_x = float(x_boundaries.group(2))
area_x = upper_x - lower_x
if len(antecedent) > 1:
if antecedent[1][0] == "a-list-celebrities":
y = antecedent[0]
y_boundaries = re.search(interval_regex, repr(y[1]))
lower_y = float(y_boundaries.group(1))
upper_y = float(y_boundaries.group(2))
area_y = upper_y - lower_y
axis = plt.gca()
else:
x = antecedent[1]
x_boundaries = re.search(interval_regex, repr(x[1]))
lower_x = float(x_boundaries.group(1))
upper_x = float(x_boundaries.group(2))
area_x = upper_x - lower_x
axis = plt.gca()
class_name = qrule.consequent[1]
axis.add_patch(
patches.Rectangle((lower_x, lower_y), area_x, area_y, zorder=-2, facecolor=rule_appearance[class_name], alpha=qrule.confidence)
)
def plot_rule(rule, plt):
interval_regex = "<(\d+);(\d+)\)"
lower_y = 0
area_y = celebrities_bins[-1]
lower_x = 0
area_x = budget_bins[-1]
if len(rule.antecedent) != 0:
if rule.antecedent[0][0] == "a-list-celebrities":
y = rule.antecedent[0]
y_boundaries = re.search(interval_regex, y[1])
lower_y = float(y_boundaries.group(1))
upper_y = float(y_boundaries.group(2))
area_y = upper_y - lower_y
axis = plt.gca()
else:
x = rule.antecedent[0]
x_boundaries = re.search(interval_regex, x[1])
lower_x = float(x_boundaries.group(1))
upper_x = float(x_boundaries.group(2))
area_x = upper_x - lower_x
if len(rule.antecedent) > 1:
if rule.antecedent[1][0] == "a-list-celebrities":
y = rule.antecedent[1]
y_boundaries = re.search(interval_regex, y[1])
lower_y = float(y_boundaries.group(1))
upper_y = float(y_boundaries.group(2))
area_y = upper_y - lower_y
axis = plt.gca()
else:
x = rule.antecedent[1]
x_boundaries = re.search(interval_regex, x[1])
lower_x = float(x_boundaries.group(1))
upper_x = float(x_boundaries.group(2))
area_x = upper_x - lower_x
axis = plt.gca()
class_name = rule.consequent[1]
axis.add_patch(
patches.Rectangle((lower_x, lower_y), area_x, area_y, zorder=-2, facecolor=rule_appearance[class_name], alpha=rule.confidence)
)
def plot_quant_rules(qrules):
for r in qrules:
plot_qrule(r, plt)
# data cases
for i in range(len(x_points)):
plt.scatter(x_points[i], y_points[i], marker=appearance[data_class[i]][1], color=appearance[data_class[i]][0], s=60)
# rule boundary lines
for i, n in enumerate(x):
plt.axhline(y=y[i], color = "grey", linestyle="dashed")
plt.axvline(x=x[i], color = "grey", linestyle="dashed")
plt.xlabel('Estimated Budget (1000$)')
plt.ylabel('A-List Celebrities')
def plot_rules(rules):
for r in rules:
plot_rule(r, plt)
# data cases
for i in range(len(x_points)):
plt.scatter(x_points[i], y_points[i], marker=appearance[data_class[i]][1], color=appearance[data_class[i]][0], s=60)
# rule boundary lines
for i, n in enumerate(x):
plt.axhline(y=y[i], color = "grey", linestyle="dashed")
plt.axvline(x=x[i], color = "grey", linestyle="dashed")
plt.xlabel('Estimated Budget (1000$)')
plt.ylabel('A-List Celebrities')
```
#### File: qcba/test/test_interval.py
```python
import unittest
from pyarc.qcba.data_structures import Interval
class TestInterval(unittest.TestCase):
def test_overlaps_with(self):
i1 = Interval(3, 5, True, True)
i2 = Interval(4, 4.5, True, True)
i3 = Interval(4, 6, True, True)
i4 = Interval(3, 4, True, True)
i5 = Interval(6, 7, True, True)
assert i2.overlaps_with(i1)
assert i1.overlaps_with(i2)
assert i1.overlaps_with(i3)
assert i3.overlaps_with(i1)
assert i1.overlaps_with(i4)
assert i4.overlaps_with(i1)
assert not i1.overlaps_with(i5)
assert not i5.overlaps_with(i1)
```
#### File: pyarc/test/test_antecedent.py
```python
import unittest
from pyarc.data_structures import Item, Antecedent
class TestAntecedentClass(unittest.TestCase):
def test_getattr(self):
item1 = Item("a", 3)
item2 = Item("b", 3)
item3 = Item("c", 2)
ant1 = Antecedent([item1, item2, item3])
assert ant1.a == "3"
assert ant1.b == "3"
assert ant1.c == "2"
def test_getitem(self):
item1 = Item("a", 3)
item2 = Item("b", 3)
item3 = Item("c", 2)
ant1 = Antecedent([item1, item2, item3])
assert ant1[0] in [item1, item2, item3]
assert ant1[1] in [item1, item2, item3]
assert ant1[2] in [item1, item2, item3]
def test_init(self):
item1 = Item("a", 3)
item2 = Item("a", 3)
item3 = Item("c", 2)
ant1 = Antecedent([item1, item2, item3])
assert len(ant1.itemset) == 2
def test_len(self):
item1 = Item("a", 3)
item2 = Item("b", 3)
item3 = Item("c", 2)
item4 = Item("c", 4)
ant1 = Antecedent([item1, item2, item3])
ant2 = Antecedent([item1, item2, item3, item4])
assert len(ant1) == 3
assert len(ant2) == 3
def test_hash(self):
item1 = Item("a", 3)
item2 = Item("b", 3)
item3 = Item("c", 2)
ant1 = Antecedent([item1, item2, item3])
ant2 = Antecedent([item1, item2, item3])
assert hash(ant1) == hash(ant2)
assert ant1 == ant2
```
#### File: pyarc/test/test_rule_generation.py
```python
import unittest
from pyarc.data_structures import (
Item,
Antecedent,
Consequent,
ClassAssocationRule,
Transaction,
TransactionDB
)
from pyarc.algorithms import (
createCARs,
generateCARs,
top_rules,
)
from utils import HiddenPrints
class TestRuleGeneration(unittest.TestCase):
def test_generateCARs(self):
header1 = ["A", "B", "Y"]
rows1 = [
[1, 1, 0],
[1, 1, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 1],
[0, 0, 1]
]
transactionDB1 = TransactionDB(rows1, header1)
rules = generateCARs(transactionDB1, support=50)
car1 = ClassAssocationRule([], Consequent("Y", 1), support=0.5, confidence=0.5)
car1.id = rules[0].id
car2 = ClassAssocationRule([], Consequent("Y", 0), support=0.5, confidence=0.5)
car1.id = rules[1].id
car1 == rules[0]
car2 == rules[1]
def test_createCARs(self):
generated_rules = [
('Y:=:1', (), 0.5, 0.5),
('Y:=:0', (), 0.5, 0.5),
('Y:=:1', ('A:=:1',), 0.5, 1 / 3)
]
cars = createCARs(generated_rules)
assert cars[0].consequent == Consequent("Y", 1)
assert cars[0].confidence == 0.5
assert cars[0].support == 0.5
assert cars[1].consequent == Consequent("Y", 0)
assert cars[1].confidence == 0.5
assert cars[1].support == 0.5
assert cars[2].consequent == Consequent("Y", 1)
assert cars[2].antecedent == Antecedent([Item("A", 1)])
assert cars[2].confidence == 1 / 3
assert cars[2].support == 0.5
def test_top_rules(self):
header1 = ["A", "B", "Y"]
rows1 = [
[1, 1, 0],
[1, 1, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 1],
[0, 0, 1]
]
transactionDB1 = TransactionDB(rows1, header1)
rules = None
with HiddenPrints():
rules = top_rules(transactionDB1.string_representation, appearance=transactionDB1.appeardict)
expected_rules = [
('Y:=:1', ('A:=:1',), 1/6, 1/3),
('Y:=:0', ('A:=:1',), 1/3, 2/3),
('Y:=:1', ('B:=:1',), 1/6, 1/3),
('Y:=:0', ('B:=:1',), 1/3, 2/3),
('Y:=:1', ('B:=:1', 'A:=:1'), 1/6, 1/3),
('Y:=:0', ('B:=:1', 'A:=:1'), 1/3, 2/3),
('Y:=:1', ('A:=:0',), 1/3, 2/3),
('Y:=:0', ('A:=:0',), 1/6, 1/3),
('Y:=:1', ('B:=:0',), 1/3, 2/3),
('Y:=:0', ('B:=:0',), 1/6, 1/3),
('Y:=:1', ('B:=:0', 'A:=:0'), 1/3, 2/3),
('Y:=:0', ('B:=:0', 'A:=:0'), 1/6, 1/3)
]
for r in rules:
assert r in expected_rules
```
#### File: pyarc/test/test_transaction.py
```python
import unittest
from pyarc.data_structures import (
Transaction,
UniqueTransaction,
Item,
Antecedent
)
class TestTransaction(unittest.TestCase):
def test_init(self):
row1 = [1, 1, 0]
header1 = ["A", "B", "C"]
transaction1 = Transaction(row1, header1, ("Class", 0))
transaction2 = UniqueTransaction(row1, header1, ("Class", 0))
def test_getclass(self):
row1 = [1, 1, 0]
header1 = ["A", "B", "C"]
transaction1 = Transaction(row1, header1, ("Class", 0))
assert transaction1.getclass() == ("Class", 0)
def test_unique_hash(self):
row1 = [1, 1, 0]
header1 = ["A", "B", "C"]
transaction2 = UniqueTransaction(row1, header1, ("Class", 0))
hash(transaction2) == hash(transaction2.tid)
def test_getitem(self):
row1 = [1, 1, 0]
header1 = ["A", "B", "C"]
transaction1 = Transaction(row1, header1, ("Class", 0))
assert transaction1[0] == Item("A", 1)
assert transaction1[1] == Item("B", 1)
assert transaction1[2] == Item("C", 0)
def test_hash(self):
row1 = [1, 1, 0]
header1 = ["A", "B", "C"]
row2 = [1, 1, 0]
header2 = ["A", "B", "C"]
row3 = [1, 1, 1]
header3 = "cde"
transaction1 = Transaction(row1, header1, ("Class", 0))
transaction2 = Transaction(row2, header2, ("Class", 0))
transaction3 = Transaction(row3, header3, ("Class", 2))
assert transaction1 == transaction2
assert transaction1 != transaction3
assert transaction2 != transaction3
def test_string_items(self):
row1 = [1, 1, 0]
header1 = ["A", "B", "C"]
transaction1 = Transaction(row1, header1, ("Y", 0))
assert transaction1.string_items == ["A:=:1", "B:=:1", "C:=:0", "Y:=:0"]
``` |
{
"source": "jirijanata/pybricks-projects",
"score": 2
} |
#### File: ev3-home-bonus/wack3m/wack3m.py
```python
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import Motor, TouchSensor, InfraredSensor
from pybricks.media.ev3dev import ImageFile, SoundFile
from pybricks.parameters import Direction, Port, Stop, Color
from pybricks.tools import wait
from time import sleep, time
from random import randint, uniform
class Wack3m:
N_WHACK_TIMES = 10
def __init__(
self,
left_motor_port: str = Port.B, right_motor_port: str = Port.C,
middle_motor_port: str = Port.A,
touch_sensor_port: str = Port.S1, ir_sensor_port: str = Port.S4):
self.ev3_brick = EV3Brick()
self.left_motor = Motor(port=left_motor_port,
positive_direction=Direction.CLOCKWISE)
self.right_motor = Motor(port=right_motor_port,
positive_direction=Direction.CLOCKWISE)
self.middle_motor = Motor(port=middle_motor_port,
positive_direction=Direction.CLOCKWISE)
self.touch_sensor = TouchSensor(port=touch_sensor_port)
self.ir_sensor = InfraredSensor(port=ir_sensor_port)
def start_up(self):
self.ev3_brick.light.on(color=Color.RED)
self.ev3_brick.screen.print('WACK3M')
self.left_motor.run_time(
speed=-1000,
time=1000,
then=Stop.HOLD,
wait=True)
self.left_motor.reset_angle(angle=0)
self.middle_motor.run_time(
speed=-1000,
time=1000,
then=Stop.HOLD,
wait=True)
self.middle_motor.reset_angle(angle=0)
self.right_motor.run_time(
speed=-1000,
time=1000,
then=Stop.HOLD,
wait=True)
self.right_motor.reset_angle(angle=0)
def play(self):
while True:
self.ev3_brick.speaker.play_file(file=SoundFile.START)
self.ev3_brick.screen.load_image(ImageFile.TARGET)
self.ev3_brick.light.on(color=Color.ORANGE)
while not self.touch_sensor.pressed():
wait(10)
self.ev3_brick.speaker.play_file(file=SoundFile.GO)
self.ev3_brick.light.on(color=Color.GREEN)
total_response_time = 0
sleep(1)
for _ in range(self.N_WHACK_TIMES):
self.ev3_brick.light.on(color=Color.GREEN)
self.ev3_brick.screen.load_image(ImageFile.EV3_ICON)
sleep(uniform(0.1, 3))
which_motor = randint(1, 3)
if which_motor == 1:
self.left_motor.run_angle(
speed=1000,
rotation_angle=90,
then=Stop.COAST,
wait=True)
start_time = time()
self.ev3_brick.screen.load_image(ImageFile.MIDDLE_LEFT)
self.left_motor.run_time(
speed=-1000,
time=500,
then=Stop.HOLD,
wait=True)
proximity = self.ir_sensor.distance()
while abs(self.ir_sensor.distance() - proximity) <= 4:
wait(10)
elif which_motor == 2:
self.middle_motor.run_angle(
speed=1000,
rotation_angle=210,
then=Stop.COAST,
wait=True)
start_time = time()
self.ev3_brick.screen.load_image(ImageFile.NEUTRAL)
self.middle_motor.run_time(
speed=-1000,
time=500,
then=Stop.COAST,
wait=True)
proximity = self.ir_sensor.distance()
while abs(self.ir_sensor.distance() - proximity) <= 5:
wait(10)
else:
self.right_motor.run_angle(
speed=1000,
rotation_angle=90,
then=Stop.COAST,
wait=True)
start_time = time()
self.ev3_brick.screen.load_image(ImageFile.MIDDLE_RIGHT)
self.right_motor.run_time(
speed=-1000,
time=500,
then=Stop.HOLD,
wait=True)
proximity = self.ir_sensor.distance()
while abs(self.ir_sensor.distance() - proximity) <= 5:
wait(10)
response_time = time() - start_time
self.ev3_brick.screen.load_image(ImageFile.DIZZY)
self.ev3_brick.screen.print(response_time)
self.ev3_brick.light.on(color=Color.RED)
self.ev3_brick.speaker.play_file(file=SoundFile.BOING)
total_response_time += response_time
average_response_time = total_response_time / self.N_WHACK_TIMES
self.ev3_brick.screen.clear()
self.ev3_brick.screen.print(
'Avg. Time: {:.1f}s'.format(average_response_time))
if average_response_time <= 1:
self.ev3_brick.speaker.play_file(file=SoundFile.FANTASTIC)
else:
self.ev3_brick.speaker.play_file(SoundFile.GOOD_JOB)
self.ev3_brick.speaker.play_file(file=SoundFile.GAME_OVER)
self.ev3_brick.light.on(color=Color.RED)
sleep(4)
```
#### File: robot-inventor/steerbot/main.py
```python
from pybricks.hubs import InventorHub
from pybricks.pupdevices import Motor, ColorSensor
from pybricks.parameters import Port, Button, Stop
from pybricks.tools import wait
# Initialize the hub, motors, and sensor
hub = InventorHub()
steer_motor = Motor(Port.A)
drive_motor = Motor(Port.B)
sensor = ColorSensor(Port.C)
def WaitForButton(b):
# Wait for press
while b not in hub.buttons.pressed():
wait (10)
# and release
while b in hub.buttons.pressed():
wait (10)
# Use the color saturation value to track line
def GetLight():
return (sensor.hsv().s)
def Calibrate():
global aSteerLimit
global lMin, lMax, signEdge
# Find the Right and Left hard limits
aRightLimit = steer_motor.run_until_stalled(400, then=Stop.BRAKE, duty_limit=100)
aLeftLimit = steer_motor.run_until_stalled(-400, then=Stop.BRAKE, duty_limit=100)
# Calculate the steering limit as average of two extremes then reset
# angle to the negative limit since steering motor is now at neg. extreme
aSteerLimit = (aRightLimit-aLeftLimit)//2
steer_motor.reset_angle(-aSteerLimit)
# Scan from -30 to 30 to get min max of light sensor value
steer_motor.run_target(1000,-30, then=Stop.BRAKE)
lMin = 1024; lMax = 0
lLeft = GetLight()
steer_motor.run(100)
c = 0
while steer_motor.angle() < 30:
c += 1
l = GetLight()
if l > lMax: lMax = l
if l < lMin: lMin = l
wait(5)
steer_motor.stop()
lRight = GetLight()
# signEdge is positive 1 if left edge and -1 if right edge
signEdge = 1 if lLeft < lRight else -1
# Center the steering
steer_motor.run_target(1000,0,then=Stop.BRAKE,wait=False)
SPEED_MAX = 1000
SPEED_TURN = 500
SPEED_OFFLINE = 400
def TrackSpeedControl():
global lMin, lMax, signEdge
global aSteerLimit
lMid = (lMax+lMin+1)//2
m = 20.0/(lMax-lMid)
# Calculate a threshold to determine steering is not near the edge
lOffEdgeThresh = (lMax-lMid) * 0.7
# Set max speed, acceleration, and max power for drive motor
drive_motor.stop() # must be stopped to set limits
drive_motor.control.limits(1000,2000,100)
while hub.buttons.pressed() == []:
# Get a new light value and subtract mid to get signed error from edge
l = signEdge * (GetLight()-lMid)
# Create a new target for the steering motor to move toward the
# approximate position of the edge
a = steer_motor.angle()
t = a - m*l
# Clamp the target angle to within +- aSteerLimit
t = min(t, aSteerLimit)
t = max(t, -aSteerLimit)
# Now update target to move toward edge of line
steer_motor.track_target(t)
# Speed control
if abs(l) < lOffEdgeThresh:
# On edge of line
if abs(t) < 25:
# and going straight
drive_motor.run(SPEED_MAX)
else:
drive_motor.run(SPEED_TURN)
else:
drive_motor.run(SPEED_OFFLINE)
wait(3)
drive_motor.run(0)
steer_motor.track_target(0)
wait(200)
steer_motor.stop()
drive_motor.stop()
while not any(hub.buttons.pressed()):
wait(10)
while True:
WaitForButton(Button.RIGHT)
Calibrate()
TrackSpeedControl()
``` |
{
"source": "jirikadlec2/django-jquery-file-upload",
"score": 2
} |
#### File: resumable/tests/app.py
```python
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.views.generic.edit import FormView
from django.forms import Form
from django.core.urlresolvers import reverse
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from resumable.views import ResumableUploadView
from resumable.fields import ResumableFileField
class ResumableForm(Form):
file = ResumableFileField(
allowed_mimes=("audio/ogg",),
upload_url=lambda: reverse('upload'),
chunks_dir=getattr(settings, 'FILE_UPLOAD_TEMP_DIR')
)
class TestFormView(FormView):
form_class = ResumableForm
template_name = 'form.html'
@property
def success_url(self):
return reverse('form')
urlpatterns = staticfiles_urlpatterns()
urlpatterns = [
url('^$', TestFormView.as_view(), name='form'),
url('^upload/$', ResumableUploadView.as_view(), name='upload')
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
``` |
{
"source": "jirikadlec2/garmin-client",
"score": 3
} |
#### File: jirikadlec2/garmin-client/test_selenium.py
```python
from selenium import webdriver
import time
#auxiliary functions
def read_saved_track_names(track_file):
tracks = set()
with open(track_file) as f:
for line in f:
line2 = line.strip()
tracks.add(line2)
return tracks
def save_garmin_tracks(activity_links, track_file, mode):
with open(track_file, mode) as myfile:
for link in activity_links:
link = link.strip()
myfile.write(link+'\n')
def extract_activity_links(browser, new_links, activity_links):
activities_el = browser.find_element_by_id('gridForm:gridList:tb')
for anchor in activities_el.find_elements_by_tag_name('a'):
activity_link = anchor.get_attribute("href")
if not activity_link is None:
if '/activity/' in activity_link:
activity_links.add(activity_link)
new_links.add(activity_link)
def move_to_next_page(browser):
footer_el = browser.find_element_by_class_name('resultsFooter')
btn_found = False
for btn in footer_el.find_elements_by_class_name('rich-datascr-button'):
if btn.text == '»':
btn_found = True
btn.click()
break
return btn_found
def select_start_date(browser, n_years):
#move one year back..
for i in range(1, n_years):
calendar1 = browser.find_element_by_id('exploreSearchForm:startDateCalendarPopupButton')
calendar1.click()
time.sleep(1)
calendar_button = browser.find_element_by_class_name('rich-calendar-tool-btn')
calendar_button.click()
time.sleep(1)
#choose date..
date_button = browser.find_element_by_id('exploreSearchForm:startDateCalendarDayCell7')
date_button.click()
time.sleep(2)
def zoom_out_map(browser, n_zooms):
for i in range(1, n_zooms):
mapZoomOut = browser.find_element_by_class_name("map-zoom-out")
mapZoomOut.click()
time.sleep(5)
################################################
# saves the GARMIN activity links for selected
# CITY and the number of the past years
################################################
def save_garmin_activity_links(city, n_years, track_file):
activity_links = read_saved_track_names(track_file)
new_links = set()
browser = webdriver.Firefox()
url = "https://sso.garmin.com/sso/login?service=https%3A%2F%2Fconnect.garmin.com%2FminExplore&webhost=olaxpw-connect00&source=https%3A%2F%2Fconnect.garmin.com%2Fen-US%2Fsignin&redirectAfterAccountLoginUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&redirectAfterAccountCreationUrl=https%3A%2F%2Fconnect.garmin.com%2Fpost-auth%2Flogin&gauthHost=https%3A%2F%2Fsso.garmin.com%2Fsso&locale=en_US&id=gauth-widget&cssUrl=https%3A%2F%2Fstatic.garmincdn.com%2Fcom.garmin.connect%2Fui%2Fcss%2Fgauth-custom-v1.1-min.css&clientId=GarminConnect&rememberMeShown=true&rememberMeChecked=false&createAccountShown=true&openCreateAccount=false&usernameShown=false&displayNameShown=false&consumeServiceTicket=false&initialFocus=true&embedWidget=false&generateExtraServiceTicket=false"
browser.get(url)
time.sleep(10)
username = browser.find_element_by_id("username")
password = browser.find_element_by_id("password")
username.send_keys("<EMAIL>")
password.send_keys("<PASSWORD>)")
login_attempt = browser.find_element_by_xpath("//*[@type='submit']")
login_attempt.submit()
#now show filters..
time.sleep(10)
show_filters = browser.find_element_by_id("showFilters")
show_filters.click()
#select the activity type option
el = browser.find_element_by_id('exploreSearchForm:activityType')
for option in el.find_elements_by_tag_name('option'):
if option.text == 'Cross Country Skiing':
option.click()
break
#select the time period option
time.sleep(2)
time_el = browser.find_element_by_id('exploreSearchForm:timePeriodSelect')
for option in time_el.find_elements_by_tag_name('option'):
if option.text == 'Custom Dates':
option.click()
break
#select the start date (10 years back..)
select_start_date(browser, n_years)
#select the end date (start of current month..)
time.sleep(2)
calendar2 = browser.find_element_by_id('exploreSearchForm:endDateCalendarPopupButton')
calendar2.click()
date_button = browser.find_element_by_id('exploreSearchForm:endDateCalendarDayCell7')
date_button.click()
#now search a new location ..
time.sleep(5)
location = browser.find_element_by_id("exploreSearchForm:location")
location.send_keys(city)
searchButton = browser.find_element_by_id("searchButton")
searchButton.submit()
#find the grid list
next_active = True
while next_active:
time.sleep(10)
len1 = len(new_links)
extract_activity_links(browser, new_links, activity_links)
len2 = len(new_links)
next_active = len2 > len1
time.sleep(2)
move_to_next_page(browser)
save_garmin_tracks(activity_links, track_file, "w")
browser.close()
print(city + ' : ' + str(len(new_links)))
f = "garmin_tracks2.txt"
trk = read_saved_track_names(f)
save_garmin_tracks(trk, f, "w")
trk = []
#save_garmin_activity_links('Brno', 10, f)
#save_garmin_activity_links('<NAME>', 10, f)
#save_garmin_activity_links('Chomutov', 10, f)
#save_garmin_activity_links('Kvilda', 10, f)
#save_garmin_activity_links('Klingenthal', 10, f)
#save_garmin_activity_links('Jablunkov', 10, f)
#save_garmin_activity_links('Svratka', 10, f)
#save_garmin_activity_links('Jilemnice', 10, f)
#save_garmin_activity_links('Trutnov', 10, f)
#save_garmin_activity_links('Mladkov', 10, f)
#save_garmin_activity_links('Mikulovice', 10, f)
#save_garmin_activity_links('Olomouc', 10, f)
#save_garmin_activity_links('Protivanov', 10, f)
#save_garmin_activity_links('Karolinka', 10, f)
#save_garmin_activity_links('Jihlava', 10, f)
#save_garmin_activity_links('Kocelovice', 10, f)
#save_garmin_activity_links('Altenberg', 10, f)
#save_garmin_activity_links('Oberwiesenthal', 10, f)
#save_garmin_activity_links('Zittau', 10, f)
#save_garmin_activity_links('Heroltovice', 10, f)
#save_garmin_activity_links('Rokytno', 10, f)
cities1 = [
'Flossenburg', 'Olbernhau', '<NAME>',
'Kvan', 'Rozmital', '<NAME>', 'Primda', 'Honezovice',
'Tremosna', 'Cunkov', 'Jistebnice', 'Hartvikov', 'Frymburk',
'<NAME>', 'Pisek', 'Pribram', '<NAME>',
'<NAME>', '<NAME>', 'Ricany', 'Chotebor',
'Hlinsko', 'Napajedla', 'Zlin', 'Rajnochovice', 'Papajci', '<NAME>',
'Zdobnice', 'Sedlonov', 'Krnov', 'Vitkov', '<NAME>', 'Kouty nad Desnou',
'<NAME>', '<NAME>', '<NAME>', 'Bruntal',
'<NAME>']
cities2 = ['Sternberk', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', 'Hodonin', 'Hartmanice',
'Brcalnik', 'Keply', 'Vimperk', 'Klet', 'Teskov', '<NAME>',
'<NAME>', 'Teskov', 'Letohrad','Johanngeorgenstadt','Pernink','Medenec',
'Bublava','<NAME>', 'Johstadt', 'Vejprty', 'Bolebor']
cities3 = ['Holzhau',
'Moldava', 'Horazdovice','Sedlcany','Neveklov','Rymarov','Hanusovice',
'Sumperk']
cities4 = ['<NAME>', '<NAME>', '<NAME>', 'Varnsdorf',
'Modlibohov','Hodkovice nad Mohelkou', 'Jablonec nad Nisou','Rakovnik']
cities5 = ['Kladno', 'Luhacovice','Vyskov','Vizovice','Roznov pod Radhostem',
'Celadna','Hrcava', 'Rokytnice v Orlickych Horach','Hostinne',
'Vrchlabi','Hejnice']
cities6 = ['Nove Mesto pod Smrkem','Vernerice',
'Zdar nad Sazavou','Nova Bystrice','Kamenice nad Lipou','Telc']
cities7 = ['Bad Brambach','Becov nad Teplou','Rokycany','Stozec','Borova Lada',
'Lam','<NAME>','Karlstift','Svetla nad Sazavou','Cechtice',
'Policka','Jimramov','Cenkovice','Kraliky','Miedzylesie','Zacler',
'<NAME>','<NAME>','Pec pod Snezkou','Horice',
'<NAME>','Strakonice','Kralovice','Strani','Lazy pod Makytou',
'Seiffen','Znojmo','Drahany','Kurim','<NAME>','Capartice',
'Rusava','Javornik','Vapenna','Lipova Lazne','Usti nad Orlici',
'Hronov','Police nad Metuji','Mezimesti','Jetrichovice','Dobris',
'Pelhrimov','Sec','Kyjov','Kaplice','Volary','Bayerisch Eisenstein',
'<NAME>','Aigen im Muhlkreis','Litschau','Waldmunchen',
'Selb','Auersberg','Sindelova','Nejdek','Marianska','Abertamy']
for city in cities7:
save_garmin_activity_links(city, 10, f)
``` |
{
"source": "jirikadlec2/hydrodata",
"score": 3
} |
#### File: hydrodata/hydrodata-py/voda_gov_cz.py
```python
import argparse
import os
import time
from datetime import datetime
from requests import get
from requests_html import HTMLSession
from urllib.parse import urljoin
def fetch_vodagov_charts(dst_dir, agency, base_url, subpages, datatype_prefix):
"""
Fetch graphs and html tables from voda.gov.cz
fetch_vodagov_charts(dst_dir='/home/jiri/meteodata',
agency_prefix='pod',
base_url='http://www.pvl.cz/portal/SaP/pc/?',
subpages=['oid=1', 'oid=2'],
datatype_prefix='streamflow',
agency_prefix='pod')
:param dst_dir: destination directory where to save the data (subdirs are created automatically)
:param base_url: the base url [for example http://www.pvl.cz/portal/SaP/pc/? for streamflow,
http://www.pvl.cz/portal/srazky/pc/? for precipitation]
:param subpages: the list of sub-pages (for example ['oid=1', 'oid=2', 'oid=3'])
:param datatype_prefix: the data type. use 'streamflow' or 'precip'
:param agency: the short name of the operating agency. use pla, poh, pod, pvl or pmo
:return: number of charts and html pages downloaded
"""
#if datatype_prefix == 'streamflow':
#pvl_base = 'http://sap.poh.cz/portal/SaP/pc/?'
#else:
#pvl_base = 'http://sap.poh.cz/portal/Srazky/PC/?'
session = HTMLSession()
n_charts = 0
for subpage in subpages:
url = base_url + subpage
print('-----------------------------')
print(url)
print('-----------------------------')
r = session.get(url)
for lnk in r.html.absolute_links:
if 'Mereni.aspx?id=' or 'mereni.aspx?id=' in lnk:
try:
r_st = session.get(lnk)
images = r_st.html.find('img')
for img in images:
if 'src' not in img.attrs:
continue
src = img.attrs['src']
if ('graf' in src or 'Graf' in src) and ('miniatury' not in src) and ("&" not in src) and (".ashx" not in src):
if 'maska' in src:
continue
img_src_absolute = urljoin(lnk, src)
img_response = get(img_src_absolute)
if img_response.status_code == 200:
img_dir = os.path.join(dst_dir, datatype_prefix, agency, os.path.splitext(os.path.basename(img_src_absolute))[0])
if not os.path.exists(img_dir):
os.makedirs(img_dir)
utc_timestamp_text = datetime.utcnow().strftime('_%Y-%m-%dT%H0000z.png')
img_filename = os.path.basename(img_src_absolute).replace('.png', utc_timestamp_text)
img_path = os.path.join(img_dir, img_filename)
print(img_path)
with open(img_path, 'wb') as f:
f.write(img_response.content)
# also save the HTML
html_path = img_path.replace('.png', '.html')
html_response = get(lnk)
if html_response.status_code == 200:
print(html_path)
with open(html_path, 'wb') as f:
f.write(html_response.content)
n_charts += 1
except ValueError:
print('ERROR fetching ' + lnk)
return n_charts
def fetch_pmo_charts(dst_dir, agency, base_url, subpages, datatype_prefix):
"""
Fetch graphs and html tables from pmo (Povodi Moravy) water board
fetch_pmo_charts(dst_dir='/home/jiri/meteodata',
agency_prefix='pmo',
base_url='http://www.pmo.cz/portal/srazky/en/',
subpages=['prehled_tab_1_chp.htm', 'prehled_tab_2_chp.htm', 'prehled_tab_3_chp.htm'],
datatype_prefix='precip',
agency='pmo')
:param dst_dir: destination directory where to save the data (subdirs are created automatically)
:param base_url: the base url [for example http://www.pvl.cz/portal/SaP/pc/? for streamflow,
http://www.pvl.cz/portal/srazky/pc/? for precipitation]
:param subpages: the list of sub-pages (for example ['oid=1', 'oid=2', 'oid=3'])
:param datatype_prefix: the data type. use 'streamflow' or 'precip'
:param agency: the short name of the operating agency. use pla, poh, pod, pvl or pmo
:return: number of charts and html pages downloaded
"""
agency = "pmo"
session = HTMLSession()
n_charts = 0
for subpage in subpages:
url = base_url + subpage
print('-----------------------------')
print(url)
print('-----------------------------')
r = session.get(url)
anchors = r.html.find('a')
a_hrefs = [a for a in r.html.find('a') if "DoMereni" in a.attrs["href"]]
for a in a_hrefs:
id = a.attrs["href"].split("'")[1]
url_html = '{:s}/en/mereni_{:s}.htm'.format(base_url, id)
print(url_html)
if datatype_prefix == 'precip':
url_img = '{:s}/grafy/sr{:s}_en.gif'.format(base_url, id)
else:
url_img = '{:s}/grafy/{:s}.gif'.format(base_url, id)
print(url_img)
img_response = get(url_img)
if img_response.status_code == 200:
img_dir = os.path.join(dst_dir, datatype_prefix, agency, os.path.splitext(os.path.basename(url_img))[0])
if not os.path.exists(img_dir):
os.makedirs(img_dir)
utc_timestamp_text = datetime.utcnow().strftime('_%Y-%m-%dT%H0000z.gif')
img_filename = os.path.basename(url_img).replace('.gif', utc_timestamp_text)
img_path = os.path.join(img_dir, img_filename)
print(img_path)
with open(img_path, 'wb') as f:
f.write(img_response.content)
n_charts += 1
# also save the HTML
html_path = img_path.replace('.gif', '.htm')
html_response = get(url_html)
if html_response.status_code == 200:
print(html_path)
with open(html_path, 'wb') as f:
f.write(html_response.content)
return n_charts
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Downloads precipitation or streamflow data from voda.gov.cz")
parser.add_argument('-a', '--agency', help='code of the data provider agency (pla, poh, pod, pvl)', required=True)
parser.add_argument('-dt', '--datatype', help='data type name (streamflow, precip)', required=True)
parser.add_argument('-o', '--output', help='output directory name', required=True)
args = parser.parse_args()
config_streamflow = {
'poh':{'base_url':'https://sap.poh.cz/portal/SaP/en/pc/?oid=','subpages':['1', '2', '3']},
'pla':{'base_url':'http://www.pla.cz/portal/SaP/en/PC/?oid=','subpages':['1','2']},
'pod':{'base_url':'http://www.pod.cz/portal/SaP/en/pc/?oid=','subpages':['1','2']},
'pvl':{'base_url':'http://www.pvl.cz/portal/SaP/en/pc/?oid=','subpages':['1','2','3']},
'pmo':{'base_url':'http://www.pmo.cz/portal/sap','subpages':['/en/prehled_tab_1_chp.htm',
'/en/prehled_tab_2_chp.htm',
'/en/prehled_tab_3_chp.htm']}
}
config_precip = {
'poh':{'base_url':'https://sap.poh.cz/portal/Srazky/en/pc/?oid=','subpages':['1', '2', '3']},
'pla':{'base_url':'http://www.pla.cz/portal/Srazky/en/PC/?oid=','subpages':['1','2']},
'pod':{'base_url':'http://www.pod.cz/portal/Srazky/en/pc/?oid=','subpages':['1','2']},
'pvl':{'base_url':'http://www.pvl.cz/portal/Srazky/en/pc/?oid=','subpages':['1','2','3']},
'pmo':{'base_url':'http://www.pmo.cz/portal/srazky','subpages':['/en/prehled_tab_1_chp.htm',
'/en/prehled_tab_2_chp.htm',
'/en/prehled_tab_3_chp.htm']}
}
dst_dir = '/home/jiri/meteodata'
agencies = ["poh", "pla", "pod", "pmo", "pvl"]
if args.agency == "all":
agencies = ["poh", "pla", "pod", "pmo", "pvl"]
elif args.agency in agencies:
agencies = [args.agency]
else:
raise KeyError("bad agency name {:s}. the agency must be poh, pla, pod, pmo, pvl or all".format(args.agency))
for agency in agencies:
if args.datatype == "streamflow":
datasource = config_streamflow[agency]
else:
datasource = config_precip[agency]
if agency == 'pmo':
n_results = fetch_pmo_charts(dst_dir=args.output,
agency=agency,
datatype_prefix=args.datatype,
base_url=datasource['base_url'],
subpages=datasource['subpages'],
)
else:
for subpage in datasource['subpages']:
n_results = fetch_vodagov_charts(dst_dir=args.output,
agency=agency,
datatype_prefix=args.datatype,
base_url=datasource['base_url'],
subpages=[subpage],
)
MAX_RETRIES = 5
retry = 0
while n_results == 0 and retry <= MAX_RETRIES:
time.sleep(20)
retry += 1
print('RETRY DOWNLOAD {:d} for {:s}'.format(retry, subpage))
n_results = fetch_vodagov_charts(dst_dir=args.output,
agency=agency,
datatype_prefix=args.datatype,
base_url=datasource['base_url'],
subpages=[subpage],
)
print('downloaded results from {:s}: {:d}'.format(agency, n_results))
``` |
{
"source": "jirikadlec2/rushvalley",
"score": 3
} |
#### File: jirikadlec2/rushvalley/fetch_dxd.py
```python
__author__ = 'Jiri'
import xlrd
from lxml import etree
from os import listdir
from os.path import isfile, join
def get_dxd_passwords(password_file):
book = xlrd.open_workbook(password_file)
sheets = book.sheets()
sheet0 = sheets[0]
nr = sheet0.nrows
nc = sheet0.ncols
password_list = []
for i in range(1, nr):
logger = sheet0.cell_value(i, 0)
password = sheet0.cell_value(i, 1)
password_list.append({"logger": logger, "password": password})
return password_list
def read_mrid(dxd_file):
print 'reading dxd_file: %s' % dxd_file
doc = etree.parse(dxd_file)
root = doc.getroot()
for element in root.iter():
if 'Data' in element.tag:
rid = int(element.get('rid'))
return rid
return 0
def create_download_script(password_file, dxd_folder, out_file):
email = '<EMAIL>'
userpass = '<PASSWORD>'
url = 'http://api.ech2odata.com/dfmp/dxd.cgi'
sh = open(out_file, 'w')
dxd_info = get_dxd_passwords(password_file)
for dxd in dxd_info:
logger = dxd['logger']
password = dxd['password']
dxd_file = '%s/%s.dxd'% (dxd_folder, logger)
print dxd_file
mrid = read_mrid(dxd_file)
cmd = "curl --trace tracelog.txt -A BYU -d 'email=%s' -d 'userpass=%s' -d 'deviceid=%s' -d 'devicepass=%s' \
-d 'report=1' -d 'mrid=%s' '%s' > '%s.dxd'" % (email, userpass, logger, password, mrid, url, logger)
print cmd
sh.write(cmd)
sh.write('\n')
sh.close()
if __name__ == '__main__':
password_file = 'C:\\jiri\\Dropbox\\BYU\\hydroinformatics\\project\\passwords.xlsx'
passwords = get_dxd_passwords(password_file)
print passwords
script = create_download_script(password_file, 'C:/jiri/Dropbox/BYU/hydroinformatics/project/dxd',
'C:/jiri/Dropbox/BYU/hydroinformatics/project/decagon.sh')
```
#### File: rushvalley/python/converter.py
```python
__author__ = 'Jiri'
import bitstring
import math
####################################################################
# Base Class for Converting Decagon Data from raw data to SI Units #
# Expand this class for other sensors or data loggers #
####################################################################
class Converter(object):
#create a new converter based on class name
def create(sensor):
if sensor == "MPS-6":
return MPS6()
if sensor == "GS3":
return GS3()
if sensor == "SRS-Nr" or sensor == "SRS":
return SRSNr()
if sensor == "SRS-Ni":
return SRSNi()
if sensor == "PYR":
return PYR()
if sensor == "ECRN50Precip" or sensor == "ECRN50":
return ECRN50Precip()
if sensor == "VP3":
return VP3()
if sensor == "Anemo":
return Anemo()
assert 0, "The sensor type is not supported: " + sensor
create = staticmethod(create)
#convert raw bits from the port to the numeric raw value
def port(raw_bits, start_bit, end_bit):
#bits in the DECAGON specs are counted from right to left
start = 31 - end_bit
end = 32 - start_bit
raw_bits = bitstring.BitArray(uint=raw_bits, length=32)
subset = bitstring.BitArray(bin=raw_bits[start:end].bin)
return subset.uint
port = staticmethod(port)
#################################################################
# MPS-6 sensor (water potential, temperature) #
#################################################################
class MPS6(Converter):
def convert(self, response, raw_value):
#MPS-6 water potential
if response == 1:
nodata = -999999
rw = self.port(raw_value, start_bit=0, end_bit=15)
if rw == 65535:
return nodata
else:
return (10 ** (0.0001 * rw)) / -10.20408
#MPS-6 temperature
elif response == 2:
nodata = -9999
rt = self.port(raw_value, start_bit=16, end_bit=25)
if rt == 1023:
return nodata
elif rt <= 900:
return float(rt - 400) / 10.0
else:
return ((900 + 5 *(rt - 900)) - 400) / 10
##################################################################
# GS-3 sensor (WWC, temperature, EC) #
##################################################################
class GS3(Converter):
def convert(self, response, raw_value):
#volumnometric water content
if response == 1:
re = self.port(raw_value, start_bit=0, end_bit=11)
ea = float(re) / 50.0
wwc = 5.89e-6 * ea**3 - 7.62e-4 * ea**2 + 3.67e-2 * ea - 7.53e-2
return wwc
#temperature
elif response == 2:
rt = self.port(raw_value, start_bit=22, end_bit=31)
if rt <= 900:
return float(rt - 400) / 10.0
else:
return ((900 + 5 *(rt - 900)) - 400) / 10
#bulk electrical conductivity
elif response == 3:
rec = self.port(raw_value, start_bit=12, end_bit=21)
ec = float(10.0 ** float(rec / 215.0)) / 1000.0
return ec
####################################################################
# SRS-Nr NDVI Field Stop (sensor #114) #
####################################################################
class SRSNr(Converter):
def get_red(self, raw_value):
r630 = self.port(raw_value, start_bit=1, end_bit=11)
if r630 > 0:
return (10 ** float(r630 /480.0)) / 10000.0
else:
return 0
def get_nir(self, raw_value):
r800 = self.port(raw_value, start_bit=12, end_bit=22)
if r800 > 0:
return (10 ** float(r800 / 480.0)) / 10000.0
else:
return 0
def convert(self, response, raw_value):
#red spectral radiance (630 nm)
if response == 1:
return self.get_red(raw_value)
#NIR spectral radiance (800 nm)
elif response == 2:
return self.get_nir(raw_value)
#NDVI
elif response == 3:
nodata = -9999
ra = self.port(raw_value, start_bit=25, end_bit=31)
orientation = self.port(raw_value, start_bit=23, end_bit=24)
#if the sensor returns alpha=1, use the predefined default alpha
#otherwise, get alpha from the measured incident radiation
#print "raw_alpha: %s" % ra
if ra >= 50 and ra <= 126:
alpha = 100.0 / float(ra)
else:
alpha = 1.86
#print "alpha: %s" % alpha
red = self.get_red(raw_value)
nir = self.get_nir(raw_value)
if red > 0 and nir > 0:
return float(alpha * nir - red)/float(alpha * nir + red)
else:
return nodata
####################################################################
# SRS-Ni (sensor #115) #
####################################################################
class SRSNi(Converter):
#orientation: 0 up-facing, 1 down-facing,
# 2 up-facing, 3 bad reading
def get_orientation(self, raw_value):
return self.port(raw_value, start_bit=23, end_bit=24)
def get_red(self, raw_value):
orientation = self.get_orientation(raw_value)
nodata = -9999
if orientation == 3:
return nodata
r630 = self.port(raw_value, start_bit=1, end_bit=11)
print "r630: %s" % r630
if r630 == 0:
return 0
return (10 ** float(r630 /480.0)) / 10000.0
def get_nir(self, raw_value):
orientation = self.get_orientation(raw_value)
nodata = -9999
if orientation == 3:
return nodata
r800 = self.port(raw_value, start_bit=12, end_bit=22)
print "r800: %s" % r800
if r800 == 0:
return 0
return (10 ** float(r800 / 480.0)) / 10000.0
def convert(self, response, raw_value):
nodata = -9999
if raw_value == 0:
return nodata
if response == 1:
return self.get_red(raw_value)
elif response == 2:
return self.get_nir(raw_value)
elif response == 3:
ra = self.port(raw_value, start_bit=25, end_bit=31)
#check valid raw alpha is in range [50, 126]
if ra >= 50 and ra <= 126:
alpha = float(ra) / 100.0
else:
alpha = 100.0 / float(ra)
red = self.get_red(raw_value)
nir = self.get_nir(raw_value)
orientation = self.get_orientation(raw_value)
if orientation == 2:
alpha = red / nir
print "alpha: %s" % alpha
if red > 0 and nir > 0:
return float(alpha * nir - red)/float(alpha * nir + red)
else:
return nodata
######################################################################
# ECRN-50 Precipitation #
######################################################################
class ECRN50Precip(Converter):
def convert(self, response, raw_value):
return raw_value
######################################################################
# PYR Solar Radiation #
######################################################################
class PYR(Converter):
def convert(self, response, raw_value):
return raw_value * (1500.0/4096.0) * 5.0
######################################################################
# VP-3 Humidity/Air Temperature #
######################################################################
class VP3(Converter):
def get_temperature(self, raw_value):
rt = self.port(raw_value, start_bit=16, end_bit=25)
if rt <= 900:
return float(rt - 400) / 10.0
else:
return ((900 + 5 *(rt - 900)) - 400) / 10
def convert(self, response, raw_value):
#validity check, if invalid return NoData
if raw_value == 0:
return -9999.0
#vapor pressure - relative humidity
if response == 1:
re = self.port(raw_value, 0, 15)
ew = re / 100.0
t = self.get_temperature(raw_value)
saturated_vp = 0.611 * math.e ** ((17.502 * t) / (240.97+t))
return float(ew) / float(saturated_vp)
#temperature
elif response == 2:
return self.get_temperature(raw_value)
########################################################################
# Sonic Anemo Wind - NEED CHECK!!! #
########################################################################
class Anemo(Converter):
def convert(self, response, raw_value):
detect_flag = self.port(raw_value, 0, 0)
if detect_flag == 0:
return -9999.0
if response == 1:
d = self.port(raw_value, 1, 9)
return d
elif response == 2:
rs = self.port(raw_value, 20, 29)
#print rs
return 1.006 * rs / 10.0
elif response == 3:
rg = self.port(raw_value, 10, 19)
#print rg
return 1.006 * rg / 10.0
```
#### File: rushvalley/python/data_transfer.py
```python
__author__ = 'Jiri'
# we use xlrd for reading the Excel lookup table file
import sys
import xlrd
import time
import json
import urllib2
import requests
import decagon
import argparse
import datetime
from dateutil.parser import parse
from converter import Converter
class Updater(object):
def __init__(self):
self.HYDROSERVER_USER = 'HIS_admin'
self.HYDROSERVER_PASSWORD = 'password'
self.HYDROSERVER_URL = 'http://worldwater.byu.edu/app/index.php/rushvalley/services/api/'
self.dxd_folder = 'dxd'
self.xlsfile = "01-LookupTable.xlsx"
self.old_timestamp = "none"
self.verbose = False
self.no_upload = False
# checks if the file is a file or not
def is_file(self, filename):
try:
with open(filename):
pass
return True
except IOError as e:
print "Unable to open file %s" % filename
return None
##################################################
# given a site code, gets the site id. #
# calls the GetSitesJSON function of the API #
# returns NONE if the site is not found. #
##################################################
def get_site_id(self, site_code):
url = self.HYDROSERVER_URL + 'GetSitesJSON'
r = requests.get(url)
sites = r.json()
for site in sites:
if site['SiteCode'] == site_code:
return site['SiteID']
return None
##################################################
# given a variable code, gets the variable id. #
# calls the GetVariablesJSON function of the API #
# returns NONE if the variable is not found. #
##################################################
def get_variable_id(self, variable_code):
url = self.HYDROSERVER_URL + 'GetVariablesJSON'
r = requests.get(url)
variables = r.json()
for variable in variables:
if variable['VariableCode'] == variable_code:
return variable['VariableID']
return None
####################################################################
# reads the lookup table to find the association between the #
# sensor, response <---> variable_id, method_id #
# uses the second sheet of the 01-Lookup lookup table Excel file #
####################################################################
def get_sensor_metadata(self, sensor):
book = xlrd.open_workbook(self.xlsfile)
sheets = book.sheets()
if self.verbose:
print "filename" + self.xlsfile
sheet1 = sheets[1]
nr = sheet1.nrows
lookup = []
for i in range(1, nr):
sensor_code = sheet1.cell_value(i, 0)
if sensor_code != sensor:
continue
variable_code = sheet1.cell_value(i, 2)
#find the corresponding variable ID
variable_id = self.get_variable_id(variable_code)
if variable_id is None:
if self.verbose:
print 'VariableID not found on server for VariableCode: ' + variable_code
continue
method_id = int(sheet1.cell_value(i, 3))
response = sheet1.cell_value(i, 4)
lookup.append({"sensor": sensor_code,
"variable": variable_code,
"variable_id": variable_id,
"method": method_id,
"response": response})
return lookup
#########################################################################
# This function reads the values to upload from a local user-provided #
# xls file and returns them. #
#########################################################################
def read_local_values(self, port):
book = xlrd.open_workbook(self.manual_upload_file)
sheets = book.sheets()
sheet0 = sheets[0]
nr = sheet0.nrows
for i in range(1, nr):
logger = sheet0.cell_value(i, 0)
#########################################################################
# Upload the data related to the sensor. #
# This function uses the HydroServer JSON API for uploading the data. #
# If being used for manual upload, it reads the data from a local #
# user-provided xls file. Otherwise, it reads the data from the dxd #
# file, converts the values, and calls the values function of the API #
# using HTTP POST request. #
# The site_id, variable_id, method_id , and source_id must be valid #
# ID's that already exist in the database. #
#########################################################################
def sensor_upload(self, site_id, site_code, variable_id, method_id, source_id, upload_file, port, sensor, resp, logger):
new_data = {
"user": self.HYDROSERVER_USER,
"password": <PASSWORD>,
"SiteID": site_id,
"VariableID": variable_id,
"MethodID": method_id,
"SourceID": source_id,
"values":[]
}
#reading the new data from the dxd file
if (self.manual_upload_file != None):
print (str(variable_id), str(site_code), str(u.manual_upload_file.name), str(port), str(self.old_timestamp), str(logger), str(self.xlsfile))
new_data['values'] = decagon.read_xls(variable_id, site_code, u.manual_upload_file.name, port, self.old_timestamp, logger, self.xlsfile)
else:
raw_data = decagon.read_dxd(upload_file, port)
#converting the data from raw data to actual values
nr = len(raw_data["dates"])
c = Converter.create(sensor)
for row in range(0, nr):
raw_time = raw_data["dates"][row]
raw_val = raw_data["vals"][row]
local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(raw_time + 946684800))
local_time_obj = parse(local_time)
val = c.convert(resp, raw_val)
#only upload the values more recent than the old latest update
if self.old_timestamp != "none":
if local_time_obj > self.old_timestamp:
new_data["values"].append((local_time, val))
else:
print "Error: No timestamp given for latest update. Rerun with timestamp"
sys.exit()
#if there's no data, return
if len(new_data["values"]) <= 0:
if self.verbose:
print "No data to upload: " + str(new_data)
return
#the data is sent in the JSON format as the body of the request
payload = json.dumps(new_data)
print "payload " + str(payload)
url = self.HYDROSERVER_URL + 'values'
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
if self.no_upload:
print "No Upload option set, data will not be uploaded"
else:
#upload the data to the web and check for any error status codes
try:
response = urllib2.urlopen(req, payload)
status = json.load(response)
print status
except urllib2.HTTPError, e:
print e.code
print e.msg
print e.headers
print e.fp.read()
#this script reads the lookup-table and for each row, gets the logger-port-response-site-variable-method information
#this should include the SiteCode, SiteID, VariableID, MethodID
###################################################################
# Uploads the data for all sites from the sensor. #
###################################################################
def upload_data(self, sensor_name):
#get the sensor metadata:
#sensor, response, variable code, and method id
print sensor_name
sensor_metadata = self.get_sensor_metadata(sensor_name)
#open the lookup table
book = xlrd.open_workbook(self.xlsfile)
sheets = book.sheets()
sheet0 = sheets[0]
nr = sheet0.nrows
upload_file = "None"
for i in range(1, nr):
logger = sheet0.cell_value(i, 0)
site_code = sheet0.cell_value(i, 1)
port = int(sheet0.cell_value(i, 4))
sensor = sheet0.cell_value(i, 5)
#find the corresponding site ID
site_id = self.get_site_id(site_code)
if site_id is None:
if self.verbose:
print 'SiteID not found on server for SiteCode: ' + site_code
continue
#if automatically uploading, use dxd files
if self.manual_upload_file == None:
#find the right DXD file for the logger of this sensor
upload_file = '%s%s.dxd' % (self.dxd_folder, logger)
if not self.is_file(upload_file):
continue
else:
upload_file = str(self.manual_upload_file)
if str(logger) not in upload_file:
continue
if self.verbose:
print "sensor metadata" + str( sensor_metadata)
#start the uploading
if sensor == sensor_name:
for md in sensor_metadata:
self.sensor_upload(site_id=site_id,
site_code=site_code,
variable_id=md["variable_id"],
method_id=md["method"],
source_id=1,
resp=md["response"],
upload_file=upload_file,
port=port,
sensor=sensor,
logger=logger)
def get_timestamp(updater, namespace):
#this method either sets the timestamp based on one passed in by the user or
#loops through a set of 10 sites and variables, gets the latest dates from each from the database
#and compares them to find the most recent to avoid uploading old values again. An argument can be passed
#in to specify not to use dxd files, but to upload from xls files instead.
#uses optional arg as another date if present
if namespace.latest_upload_time != None:
old_time_str = namespace.latest_upload_time
try:
temp_timestamp = parse(old_time_str)
updater.old_timestamp = temp_timestamp
return
except Exception:
print "Timestamp given is invalid"
if namespace.no_date != None:
return
from suds.client import Client
client = Client("http://worldwater.byu.edu/app/index.php/rushvalley/services/cuahsi_1_1.asmx?WSDL")
#maps site IDs to variable codes
sites_dict = {
'Ru2BNM5': 'GS3_Moisture_Temp',
'Ru2BNMA': 'SRS_Nr_NDVI_sixthirty',
'Ru4BNC5': 'GS3_Moisture_VWC',
'Ru1BNC5': 'GS3_Moisture_EC',
'Ru1BMNA': 'SRS_Nr_NDVI_eighthundred',
'Ru1BNCA': 'SRS_Nr_NDVI',
'Ru3BMMA': 'SRS_Nr_NDVI_eighthundred',
'Ru5BMMA': 'SRS_Nr_NDVI',
'Ru2BMPA': 'SRS_Nr_NDVI_sixthirty',
'Ru5BMM5': 'GS3_Moisture_EC'
}
for site in sites_dict:
variable = "rushvalley:" + sites_dict[site]
site = "rushvalley:" + site
obj = client.service.GetValuesObject(site, variable )
try:
inner_time = obj.timeSeries[0].values[0].value[-1]._dateTime
if updater.verbose:
print inner_time
if updater.old_timestamp == "none":
updater.old_timestamp = inner_time
elif inner_time > updater.old_timestamp:
updater.old_timestamp = inner_time
except Exception:
print "Failed to get timestamp from database for site " + site + " and variable " + variable
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "Downloads data from Decagon server in .dxd files.\n" +
"Optionally accepts a timestamp argument, which it uses to ignore old values already uploaded. " +
"Additionally, the optional xls argument causes script to upload from a local xls file instead "
"of a downloaded dxd file (for offline logger manual upload).")
parser.add_argument("-lt", "--latest_upload_time", help="String of latest upload time. Ex. '2015-06-15 00:00:00'")
parser.add_argument("-xls", "--xls_file", help="Name of xls file to use instead of .dxd files, for manual upload.",
type=argparse.FileType('r'))
parser.add_argument("-v", "--verbose", action='store_true', help="Print out messages while running")
parser.add_argument("-nu", "--no_upload", action='store_true', help="Don't upload data, used for testing")
parser.add_argument("-nd", "--no_date", action='store_true', help="Don't use a date flag as an upload constraint")
namespace = parser.parse_args()
#If xls file passed in, dxd files not used
if namespace.xls_file == None:
#STEP 1: Get the data from DECAGON data loggers
decagon.download_all('passwords.csv','dxd')
#STEP 2: Upload the data to HydroServer
u = Updater()
u.verbose = namespace.verbose
u.no_upload = namespace.no_upload
get_timestamp(u, namespace)
u.manual_upload_file = namespace.xls_file;
u.dxd_folder = 'dxd/'
u.upload_data('SRS')
u.upload_data('PYR')
u.upload_data('MPS-6')
u.upload_data('GS3')
if u.manual_upload_file != None:
u.upload_data('5TE')
u.upload_data('5TM')
```
#### File: jirikadlec2/rushvalley/run_upload.py
```python
__author__ = 'Jiri'
import xlrd
import time
import json
import urllib2
import dxd
from converter import Converter
class Updater(object):
def __init__(self):
self.hydroserver_user = 'HIS_admin'
self.hydroserver_password = 'password'
self.dxd_folder = 'dxd'
self.HYDROSERVER_URL = 'http://worldwater.byu.edu/app/index.php/rushvalley/services/api/values/'
# checks is the file is a file or not
def is_file(self, filename):
try:
with open(filename):
pass
return True
except IOError as e:
print "Unable to open file %s" % filename
return None
#reads the association between the sensor, response, variable code, and method id
def sensor_lookup(self, sensor):
book = xlrd.open_workbook(xlsfile)
sheets = book.sheets()
sheet1 = sheets[1]
nr = sheet1.nrows
lookup = []
for i in range(1, nr):
sensor_code = sheet1.cell_value(i, 0)
if sensor_code != sensor:
continue
variable_code = sheet1.cell_value(i, 2)
method_id = int(sheet1.cell_value(i, 3))
response = sheet1.cell_value(i, 4)
lookup.append({"sensor": sensor_code,
"variable": variable_code,
"method": method_id,
"response": response})
return lookup
#now upload the data related to the sensor
def sensor_upload(self, site, var, meth, dxd_file, port, sensor, resp):
raw_data = dxd.read_dxd(dxd_file, port)
new_data = {
"user": self.hydroserver_user,
"password": self.hydroserver_password,
"sitecode": site,
"variablecode": var, #need to read from lookup table
"methodid": meth,
"sourceid": 1,
"values":[]
}
nr = len(raw_data["dates"])
c = Converter.create(sensor)
for row in range(0, nr):
raw_time = raw_data["dates"][row]
raw_val = raw_data["vals"][row]
local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(raw_time + 946684800))
val = c.convert(resp, raw_val)
new_data["values"].append((local_time, val))
postdata = json.dumps(new_data)
print postdata
url = 'http://worldwater.byu.edu/interactive/rushvalley/services/index.php/upload/values'
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
#uploading to the web
try:
response = urllib2.urlopen(req, postdata)
status = json.load(response)
print status
except urllib2.HTTPError, e:
print e.code
print e.msg
print e.headers
print e.fp.read()
#this script reads the lookup-table and for each row, gets the logger-port-response-site-variable-method information
#this should include the SiteCode, SiteID, VariableID, MethodID
def read_lookup(self, xlsfile, out_dir, sensor_name):
#get the sensor metadata
sensor_metadata = self.sensor_lookup(sensor_name)
book = xlrd.open_workbook(xlsfile)
sheets = book.sheets()
sheet0 = sheets[0]
nr = sheet0.nrows
for i in range(1, nr):
logger = sheet0.cell_value(i, 0)
site_code = sheet0.cell_value(i, 1)
port = int(sheet0.cell_value(i, 4))
sensor = sheet0.cell_value(i, 5)
dxd_file = '%s%s.dxd' % (self.dxd_folder, logger)
if not self.is_file(dxd_file):
continue
if sensor == sensor_name:
for md in sensor_metadata:
self.sensor_upload(site=site_code,
var=md["variable"],
meth=md["method"],
resp=md["response"],
dxd_file=dxd_file,
port=port,
sensor=sensor)
if __name__ == '__main__':
xlsfile = "C:\\jiri\\Dropbox\\BYU\\hydroinformatics\\project\\01-LookupTable.xlsx"
u = Updater()
u.dxd_folder = 'C:\\jiri\\Dropbox\\BYU\\hydroinformatics\\project\\dxd\\'
out_dir = 'C:\\jiri\\Dropbox\\BYU\\hydroinformatics\\project\\sql'
u.read_lookup(xlsfile, out_dir, 'GS3')
u.read_lookup(xlsfile, out_dir, 'SRS')
u.read_lookup(xlsfile, out_dir, 'PYR')
``` |
{
"source": "jirikadlec2/tethys-list-apps",
"score": 3
} |
#### File: tethysapp/api/app.py
```python
from tethys_sdk.base import TethysAppBase, url_map_maker
class Api(TethysAppBase):
"""
Tethys app class for api.
"""
name = 'api'
index = 'api:home'
icon = 'api/images/api_logo1.png'
package = 'api'
root_url = 'api'
color = '#9b59b6'
def url_maps(self):
"""
Add controllers
"""
UrlMap = url_map_maker(self.root_url)
url_maps = (UrlMap(name='home',
url='api',
controller='api.controllers.home'),
UrlMap(name='list_apps_help',
url='list_apps_help',
controller='api.controllers.list_apps_help'),
UrlMap(name='list_apps',
url='list_apps',
controller='api.controllers.list_apps'),
)
return url_maps
``` |
{
"source": "JiriKalvoda/slama.dev",
"score": 3
} |
#### File: assets/programovani-je-hra/10.2.1.py
```python
import sys
from PyQt5.QtWidgets import * # import VŠEHO z Pyqt5.QtWidgets
class MyWindow(QWidget):
def __init__(self):
super().__init__() # magie
# vytvoření labelu (nápisu) a buttonu (tlačítka)
self.label = QLabel("Délka: 0")
self.lineEdit = QLineEdit()
self.lineEdit.textChanged.connect(self.zmena_textu)
self.button = QPushButton("Stiskni mě, lol.")
self.button.clicked.connect(self.stisknute_tlacitko)
self.button2 = QPushButton("Nemačkej mě, né lol.")
self.button2.clicked.connect(self.stisknute_tlacitko2)
# vytvoření vertikálního layoutu, který pokládá věci pod sebe
layout = QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.lineEdit)
layout.addWidget(self.button)
layout.addWidget(self.button2)
# aplikování layoutu
self.setLayout(layout)
# ukázání widgetu
self.show()
def zmena_textu(self):
self.label.setText("Délka: " + str(len(self.lineEdit.text())))
def stisknute_tlacitko(self):
self.lineEdit.setText(self.lineEdit.text() + "lol")
def stisknute_tlacitko2(self):
x = self.lineEdit.text()
x = x[:-1]
self.lineEdit.setText(x)
# magie
app = QApplication(sys.argv)
ex = MyWindow()
sys.exit(app.exec_())
```
#### File: assets/programovani-je-hra/4.1.1-1.py
```python
x = 0
y = 0
def setup():
size(400, 400)
def draw():
global x, y
background(255)
rect(x, y, 30, 30)
def keyPressed():
global x, y
if key == 'w':
y -= 10
if key == 'a':
x -= 10
if key == 's':
y += 10
if key == 'd':
x += 10
```
#### File: assets/programovani-je-hra/6.1.1.py
```python
class Clovek:
def __init__(self, jmeno, vek):
self.jmeno = jmeno
self.vek = vek
def vyrost(self):
self.vek += 1
def vyrost_o(self, o):
self.vek += o
```
#### File: assets/programovani-je-hra/6.1.2.py
```python
class Clovek:
def __init__(self, jmeno, vek, vaha):
self.jmeno = jmeno
self.vek = vek
self.vaha = vaha
def vyrost(self):
self.vek += 1
def ztloustni(self):
self.vaha += 1
def zhubni(self):
self.vaha += 1
```
#### File: assets/programovani-je-hra/6.1.3-1.py
```python
ptaci = []
class Ptak:
def __init__(self, x, y, dx, dy):
self.x = x
self.y = y
self.dx = dx
self.dy = dy
def posun(self):
self.x += self.dx
self.y += self.dy
if self.x < 0:
self.dx *= -1
if self.y < 0:
self.dy *= -1
if self.y > height:
self.dy *= -1
if self.x > width:
self.dx *= -1
def vykresli(self):
ellipse(self.x, self.y, 10, 10)
def setup():
size(400, 400)
def draw():
background(255)
i = 0
while i < len(ptaci):
ptak = ptaci[i]
ptak.posun()
ptak.vykresli()
i += 1
def mousePressed():
global ptaci
ptak = Ptak(mouseX, mouseY, 3, 3)
ptaci.append(ptak)
```
#### File: assets/programovani-je-hra/7.1.2.py
```python
import random
class Flappy:
def __init__(self, x, y, r, dx, dy, gravitace, velikost_skoku):
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.r = r
self.gravitace = gravitace
self.velikost_skoku = velikost_skoku
def vykresli(self):
ellipse(self.x, self.y, self.r, self.r)
def pohni_se(self):
self.x += self.dx
self.y += self.dy
self.dy -= self.gravitace
if self.y < 0:
self.y = height
if self.y > height:
self.y = 0
def skoc(self):
self.dy = self.velikost_skoku
class Prekazka:
def __init__(self, x, w, h, m):
self.x = x
self.w = w
self.h = h
self.m = m
def vykresli(self):
# kus nahoře
rect(self.x, 0, self.w, self.h)
# kus dole
rect(self.x, self.h + self.m, self.w, height - self.h - self.m)
def kolize_ctverec(self, flappy, x, y, w, h):
"""Vrátí True, pokud flappy koliduje se čtvercem určeným bodem (x, y) a jeho šířkou/výškou."""
x = abs(flappy.x - (x + w / 2))
y = abs(flappy.y - (y + h / 2))
r = flappy.r / 2
dx, dy = x - w / 2, y - h / 2
return (
not (x > w / 2 + r or y > h / 2 + r)
and (x < w / 2 or y < h / 2)
or sqrt(dx ** 2 + dy ** 2) < r
)
def kolize(self, flappy):
# čtverce, které testujeme
rectangles = [
[self.x, 0, self.w, self.h],
[self.x, self.h + self.m, self.w, height - self.h - self.m]
]
# otestuje, zda flappy koliduje z jedním ze čtverců
# * je magie, která "rozbalí" pole do volání funkce
# self(*pole) je to samé jako self(pole[0], pole[1],...)
return self.kolize_ctverec(flappy, *rectangles[0]) or self.kolize_ctverec(flappy, *rectangles[1])
def keyPressed():
global flappy
if key == ' ':
flappy.skoc()
def mousePressed():
global flappy
if mouseButton == LEFT:
flappy.skoc()
# TODO: na kolečko myši
flappy = Flappy(100, 100, 15, 2 , 0, -0.2, -4)
prekazky = []
def setup():
global prekazky
size(400, 400)
pocet_prekazek = 30
vzdalenost_prekazek = 200
sirka_prekazek = 30
mezera_prekazek = 80 # TODO zmenšování
for i in range(pocet_prekazek):
x = (i + 1) * vzdalenost_prekazek
h = random.randint(50, 250)
prekazky.append(Prekazka(x, sirka_prekazek, h, mezera_prekazek - i/2.0))
def draw():
global flappy, prekazky
translate(-flappy.x + width / 2, 0)
background(255)
flappy.vykresli()
flappy.pohni_se()
for i in range(len(prekazky)):
if prekazky[i].kolize(flappy):
fill(0)
else:
fill(255)
prekazky[i].vykresli()
# lepší grafika
# - barvy
# - pozadí
# - otáčení flappy birda, když padá => lepší flappy bird!
# upgrady
# - náhodné prohození gravitace
# - zrychlování
```
#### File: assets/programovani-je-hra/9.1.1.1.py
```python
def fibonacci_iterative(n):
a = 0
b = 1
if n == 0:
return 0
if n == 1:
return 1
for i in range(0, n - 1):
c = a + b
a = b
b = c
return b
for i in range(0, 20):
print(fibonacci_iterative(i))
```
#### File: slama.dev/_plugins/climbing.py
```python
import os
import shutil
from PIL import Image
from random import choice
from string import ascii_lowercase, digits
from typing import *
from subprocess import Popen, PIPE
import yaml
def get_random_string(length: int):
"""Generate a random string."""
result = ""
for _ in range(length):
result += choice(ascii_lowercase)
return result
os.chdir(os.path.dirname(os.path.realpath(__file__)))
CLIMBING_FOLDER = "../climbing/"
CLIMBING_VIDEOS_FOLDER = os.path.join(CLIMBING_FOLDER, "videos")
CLIMBING_INFO = os.path.join(CLIMBING_FOLDER, "videos.yaml")
config = {}
if os.path.exists(CLIMBING_INFO):
with open(CLIMBING_INFO, "r") as f:
config = yaml.safe_load(f.read())
zones = [1, 2, 3, 4, 5, 6, 7, 8, 9, "all"]
colors = list(reversed(["red", "salmon", "blue", "yellow"]))
# rename new files
for name in list(config):
old_path = os.path.join(CLIMBING_VIDEOS_FOLDER, name)
if "new" in config[name]:
print(f"parsing new climb '{name}'.", flush=True)
# assign a new (random) name
random_string = get_random_string(8)
new_name = (
"smichoff-"
+ ("" if "color" not in config[name] else (config[name]["color"] + "-"))
+ (
""
if "date" not in config[name]
else config[name]["date"].strftime("%Y-%m-%d") + "-"
)
+ random_string
+ ".mp4"
)
# not new anymore
del config[name]["new"]
config[new_name] = config[name]
del config[name]
name = new_name
tmp_path = os.path.join(CLIMBING_VIDEOS_FOLDER, "tmp_" + name)
# trim the video
if "trim" in config[name]:
start, end = config[name]["trim"].split(",")
command = [
"ffmpeg",
"-y",
"-i",
old_path,
"-ss",
start,
"-to",
end,
tmp_path,
]
_ = Popen(command, stdout=PIPE, stderr=PIPE).communicate()
os.remove(old_path)
os.rename(tmp_path, old_path)
del config[name]["trim"]
# encode/rotate the video
if "encode" in config[name] or "rotate" in config[name]:
encode_config = (
[]
if "encode" not in config[name]
else ["-vcodec", "libx264", "-crf", "28"]
)
rotate_config = (
[]
if "rotate" not in config[name]
else [
"-vf",
f'transpose={"2" if config[name]["rotate"] == "left" else "1"}',
]
)
command = (
["ffmpeg", "-y", "-i", old_path]
+ encode_config
+ rotate_config
+ [tmp_path]
)
_ = Popen(command, stdout=PIPE, stderr=PIPE).communicate()
os.remove(old_path)
os.rename(tmp_path, old_path)
if "encode" in config[name]:
del config[name]["encode"]
if "rotate" in config[name]:
del config[name]["rotate"]
new_path = os.path.join(CLIMBING_VIDEOS_FOLDER, name)
os.rename(old_path, new_path)
# generate a poster, if it doesn't exist
poster_jpeg = os.path.join(
CLIMBING_VIDEOS_FOLDER, os.path.splitext(name)[0] + ".jpeg"
)
poster_webp = os.path.join(
CLIMBING_VIDEOS_FOLDER, os.path.splitext(name)[0] + ".webp"
)
if not os.path.exists(poster_webp):
print(f"generating a poster for '{name}'.", flush=True)
_ = Popen(
[
"ffmpeg",
"-i",
new_path,
"-vf",
"select=eq(n\,0)",
"-vframes",
"1",
"-y",
poster_jpeg,
],
stdout=PIPE,
stderr=PIPE,
).communicate()
im = Image.open(poster_jpeg)
width, height = im.size
new_width = 720
new_height = int(height * (new_width / width))
_ = Popen(
[
"cwebp",
"-q",
"5",
"-resize",
str(new_width),
str(new_height),
poster_jpeg,
"-o",
poster_webp,
],
stdout=PIPE,
stderr=PIPE,
).communicate()
_ = Popen(["rm", poster_jpeg], stdout=PIPE, stderr=PIPE).communicate()
# sort -- gets sorted by date, due to the name of the climbing files
config_list = [(file, config[file]) for file in config]
# clear old zones
zones_folder = os.path.join(CLIMBING_FOLDER, "zones")
if os.path.exists(zones_folder):
shutil.rmtree(zones_folder)
os.mkdir(zones_folder)
for zone in zones:
zone_file_name = os.path.join(CLIMBING_FOLDER, "zones", str(zone) + ".md")
zone_file_content = f"""---
title: Climbing
layout: default
css: climbing
no-heading: True
---
"""
added = False
total = 0
for color in colors:
videos_in_color = []
for name in config:
if (
"color" in config[name]
and config[name]["color"] == color
and (config[name]["zone"] == zone or zone == "all")
):
videos_in_color.append(name)
videos_in_color = list(reversed(sorted(videos_in_color)))
if len(videos_in_color) != 0:
zone_file_content += "\n\n{: .center}\n### " + color.capitalize()
for i, name in enumerate(videos_in_color):
style_class = "climbing-"
# either an odd number of videos, or even and not the last -- no center
if len(videos_in_color) % 2 == 0 or (
len(videos_in_color) % 2 == 1 and i != len(videos_in_color) - 1
):
style_class += "left" if i % 2 == 0 else "right"
else:
style_class += "center"
zone_file_content += f"""
<figure class='climbing-video climbing-{color} {style_class}'>
<video alt="Me climbing a {color} boulder at Smíchoff, {config[name]["date"].strftime("%d/%m/%Y")}." poster="/climbing/videos/{os.path.splitext(name)[0] + '.webp'}" controls preload="none"><source src='/climbing/videos/{name}' type='video/mp4'></video>
<figcaption class='figcaption-margin'>{config[name]["date"].strftime("%d / %m / %Y")}</figcaption>
</figure>"""
added = True
total += 1
# THIS IS SUPER IMPORTANT!
# I don't know how to make it so that floats don't intersect the footer,
# but putting anything below fixes it
if len(videos_in_color) != 0:
zone_file_content += f"<p class='right'>Total climbs: {total}</p>"
# if there are no videos of the climb, add a text about it
if not added:
zone_file_content += (
"{: .center}\nI haven't recorded any climbs in this zone yet, sorry!"
)
with open(zone_file_name, "w") as f:
f.write(zone_file_content)
print("zones generated.", flush=True)
with open(CLIMBING_INFO, "w") as f:
f.write(yaml.dump(config))
# remove videos that are not on the list, for good measure
files = os.listdir(CLIMBING_VIDEOS_FOLDER)
for file in files:
if file.lower().endswith(".mp4") and file not in config:
print(f"WARNING: leftover file {file}.", flush=True)
if file.lower().endswith(".jpeg") and file[:-5] not in config:
print(f"WARNING: leftover poster {file}.", flush=True)
for file in config:
if file not in files:
print(f"WARNING: file {file} not found.", flush=True)
``` |
{
"source": "jirikraus/cuml",
"score": 3
} |
#### File: cuML/knn/knn_wrapper.py
```python
import faiss
import numpy as np
import pandas as pd
import cudf
class KNNparams:
def __init__(self, n_gpus):
self.n_gpus = n_gpus
class KNN:
"""
Create a DataFrame, fill it with data, and compute KNN:
.. code-block:: python
import cudf
from cuml import KNN
import numpy as np
np_float = np.array([
[1,2,3], # Point 1
[1,2,4], # Point 2
[2,2,4] # Point 3
]).astype('float32')
gdf_float = cudf.DataFrame()
gdf_float['dim_0'] = np.ascontiguousarray(np_float[:,0])
gdf_float['dim_1'] = np.ascontiguousarray(np_float[:,1])
gdf_float['dim_2'] = np.ascontiguousarray(np_float[:,2])
print('n_samples = 3, n_dims = 3')
print(gdf_float)
knn_float = KNN(n_gpus=1)
knn_float.fit(gdf_float)
Distance,Index = knn_float.query(gdf_float,k=3) #get 3 nearest neighbors
print(Index)
print(Distance)
Output:
.. code-block:: python
n_samples = 3, n_dims = 3
dim_0 dim_1 dim_2
0 1.0 2.0 3.0
1 1.0 2.0 4.0
2 2.0 2.0 4.0
# Index:
index_neighbor_0 index_neighbor_1 index_neighbor_2
0 0 1 2
1 1 0 2
2 2 1 0
# Distance:
distance_neighbor_0 distance_neighbor_1 distance_neighbor_2
0 0.0 1.0 2.0
1 0.0 1.0 1.0
2 0.0 1.0 2.0
For an additional example see `the KNN notebook <https://github.com/rapidsai/cuml/blob/master/python/notebooks/knn_demo.ipynb>`_. For additional docs, see `scikitlearn's KDtree <http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html#sklearn.neighbors.KDTree>`_.
"""
def __init__(self, n_gpus=-1):
# -1 means using all gpus
self.params = KNNparams(n_gpus)
def fit(self, X):
if (isinstance(X, cudf.DataFrame)):
X = self.to_nparray(X)
assert len(X.shape) == 2, 'data should be two dimensional'
n_dims = X.shape[1]
cpu_index = faiss.IndexFlatL2(n_dims)
# build a flat (CPU) index
if self.params.n_gpus == 1:
res = faiss.StandardGpuResources()
# use a single GPU
# make it a flat GPU index
gpu_index = faiss.index_cpu_to_gpu(res, 0, cpu_index)
else:
gpu_index = faiss.index_cpu_to_all_gpus(cpu_index,
ngpu=self.params.n_gpus)
gpu_index.add(X)
self.gpu_index = gpu_index
def query(self, X, k):
X = self.to_nparray(X)
D, I = self.gpu_index.search(X, k)
D = self.to_cudf(D, col='distance')
I = self.to_cudf(I, col='index')
return D, I
def to_nparray(self, x):
if isinstance(x, cudf.DataFrame):
x = x.to_pandas()
return np.ascontiguousarray(x)
def to_cudf(self, df, col=''):
# convert pandas dataframe to cudf dataframe
if isinstance(df,np.ndarray):
df = pd.DataFrame({'%s_neighbor_%d'%(col, i): df[:, i] for i in range(df.shape[1])})
pdf = cudf.DataFrame.from_pandas(df)
return pdf
```
#### File: cuML/test/test_tsvd.py
```python
import pytest
from cuml import TruncatedSVD as cuTSVD
from sklearn.decomposition import TruncatedSVD as skTSVD
from test_utils import array_equal
import cudf
import numpy as np
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('input_type', ['dataframe', 'ndarray'])
def test_tsvd_fit(datatype, input_type):
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]],
dtype=datatype)
sktsvd = skTSVD(n_components=1)
sktsvd.fit(X)
cutsvd = cuTSVD(n_components=1)
if input_type == 'dataframe':
gdf = cudf.DataFrame()
gdf['0'] = np.asarray([-1, -2, -3, 1, 2, 3], dtype=datatype)
gdf['1'] = np.asarray([-1, -1, -2, 1, 1, 2], dtype=datatype)
cutsvd.fit(gdf)
else:
cutsvd.fit(X)
for attr in ['singular_values_', 'components_',
'explained_variance_ratio_']:
with_sign = False if attr in ['components_'] else True
assert array_equal(getattr(cutsvd, attr), getattr(sktsvd, attr),
0.4, with_sign=with_sign)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('input_type', ['dataframe', 'ndarray'])
def test_tsvd_fit_transform(datatype, input_type):
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]],
dtype=datatype)
skpca = skTSVD(n_components=1)
Xsktsvd = skpca.fit_transform(X)
cutsvd = cuTSVD(n_components=1)
if input_type == 'dataframe':
gdf = cudf.DataFrame()
gdf['0'] = np.asarray([-1, -2, -3, 1, 2, 3], dtype=datatype)
gdf['1'] = np.asarray([-1, -1, -2, 1, 1, 2], dtype=datatype)
Xcutsvd = cutsvd.fit_transform(gdf)
else:
Xcutsvd = cutsvd.fit_transform(X)
assert array_equal(Xcutsvd, Xsktsvd, 1e-3, with_sign=True)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('input_type', ['dataframe', 'ndarray'])
def test_tsvd_inverse_transform(datatype, input_type):
gdf = cudf.DataFrame()
gdf['0'] = np.asarray([-1, -2, -3, 1, 2, 3], dtype=datatype)
gdf['1'] = np.asarray([-1, -1, -2, 1, 1, 2], dtype=datatype)
cutsvd = cuTSVD(n_components=1)
if input_type == 'dataframe':
Xcutsvd = cutsvd.fit_transform(gdf)
else:
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]],
dtype=datatype)
Xcutsvd = cutsvd.fit_transform(X)
input_gdf = cutsvd.inverse_transform(Xcutsvd)
assert array_equal(input_gdf, gdf, 0.4, with_sign=True)
``` |
{
"source": "JiriKr/django-migrate-sql",
"score": 2
} |
#### File: JiriKr/django-migrate-sql/setup.py
```python
import re
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
def get_version(package):
"""
Get migrate_sql version as listed in `__version__` in `__init__.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
with open('README.rst') as readme_file:
readme = readme_file.read()
VERSION = get_version('migrate_sql')
setup(
name='django-migrate-sql-deux',
version=VERSION,
description='Migration support for raw SQL in Django',
long_description=readme,
author='Festicket',
author_email='<EMAIL>',
packages=find_packages(),
package_dir={'migrate_sql': 'migrate_sql'},
license='BSD',
zip_safe=False,
url='https://github.com/festicket/django-migrate-sql',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
],
tests_require=['tox'],
cmdclass={'test': Tox},
install_requires=[],
)
```
#### File: tests/test_app/test_utils.py
```python
from __future__ import unicode_literals
from django.test import TestCase
from migrate_sql.autodetector import is_sql_equal
class SQLComparisonTestCase(TestCase):
"""
Tests comparison algorithm for two SQL item contents.
"""
def test_flat(self):
self.assertTrue(is_sql_equal('SELECT 1', 'SELECT 1'))
self.assertFalse(is_sql_equal('SELECT 1', 'SELECT 2'))
def test_nested(self):
self.assertTrue(is_sql_equal(['SELECT 1', 'SELECT 2'], ['SELECT 1', 'SELECT 2']))
self.assertFalse(is_sql_equal(['SELECT 1', 'SELECT 2'], ['SELECT 1', 'SELECT 3']))
def test_nested_with_params(self):
self.assertTrue(is_sql_equal([('SELECT %s', [1]), ('SELECT %s', [2])],
[('SELECT %s', [1]), ('SELECT %s', [2])]))
self.assertFalse(is_sql_equal([('SELECT %s', [1]), ('SELECT %s', [2])],
[('SELECT %s', [1]), ('SELECT %s', [3])]))
def test_mixed_with_params(self):
self.assertFalse(is_sql_equal([('SELECT %s', [1]), ('SELECT %s', [2])],
['SELECT 1', ('SELECT %s', [2])]))
self.assertFalse(is_sql_equal(['SELECT 1', ('SELECT %s', [2])],
['SELECT 1', ('SELECT %s', [3])]))
def test_mixed_nesting(self):
self.assertTrue(is_sql_equal('SELECT 1', ['SELECT 1']))
self.assertFalse(is_sql_equal('SELECT 1', [('SELECT %s', [1])]))
``` |
{
"source": "jirikrepl/pyradio",
"score": 2
} |
#### File: pyradio/pyradio/browser.py
```python
import curses
try:
from dns import resolver
except ImportError:
pass
from copy import deepcopy
import random
import json
import collections
from operator import itemgetter
try:
import requests
except ImportError:
pass
import threading
import logging
from .player import info_dict_to_list
from .cjkwrap import cjklen, PY3
from .countries import countries
from .simple_curses_widgets import SimpleCursesLineEdit, SimpleCursesHorizontalPushButtons, SimpleCursesWidgetColumns, SimpleCursesCheckBox
import locale
locale.setlocale(locale.LC_ALL, '') # set your locale
logger = logging.getLogger(__name__)
def country_from_server(a_server):
if a_server:
country = a_server.split('.')[0]
up = country[:-1].upper()
if up in countries.keys():
return countries[up]
else:
return country
else:
return None
def capitalize_comma_separated_string(a_string):
sp = a_string.split(',')
for i, n in enumerate(sp):
sp[i] = n.strip().capitalize()
return ', '.join(sp)
class PyRadioStationsBrowser(object):
''' A base class to get results from online radio directory services.
Actual implementations should be subclasses of this one.
'''
BASE_URL = ''
TITLE = ''
_parent = _outer_parent = None
_raw_stations = []
_last_search = None
_internal_header_height = 0
_url_timeout = 3
_search_timeout = 3
_vote_callback = None
_sort = _sort_win = None
# Normally outer boddy (holding box, header, internal header) is
# 2 chars wider that the internal body (holding the stations)
# This property value is half the difference (normally 2 / 2 = 1)
# Used to chgat the columns' separators in internal body
# Check if the cursor is divided as required and adjust
_outer_internal_body_diff = 2
_outer_internal_body_half_diff = 1
def __init__(self,
config,
config_encoding,
session=None,
search=None,
pyradio_info=None,
search_return_function=None,
message_function=None):
''' Initialize the station's browser.
It should return a valid search result (for example,
www.radio-browser.info implementation, returns 100 stations
sorted by number of votes).
Parameters
----------
search
Search parameters to be used instead of the default.
'''
pass
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, val):
self._parent = val
if self._sort:
self._sort._parent = val
@property
def outer_parent(self):
return self._outer_parent
@outer_parent.setter
def outer_parent(self, val):
self._outer_parent = val
if self._sort_win:
self._sort_win._parent = val
@property
def outer_internal_body_half_diff(self):
return self._outer_internal_body_half_diff
@outer_internal_body_half_diff.setter
def outer_internal_body_half_diff(self, value):
raise ValueError('property is read only')
@property
def internal_header_height(self):
return self._internal_header_height
@internal_header_height.setter
def internal_header_height(self, value):
raise ValueError('property is read only')
@property
def title(self):
return self.TITLE
@title.setter
def title(self, value):
self.TITLE = value
@property
def vote_callback(self):
return self._vote_callback
@vote_callback.setter
def vote_callback(self, val):
self._vote_callback = val
def stations(self, playlist_format=1):
return []
def url(self, id_in_list):
''' Return a station's real/playable url
It has to be implemented only in case have_to_retrieve_url is True
Parameters
----------
id_in_list
id in list of stations (0..len-1)
Returns
-------
Real/playable url or '' if failed (string)
'''
return ''
def set_played(self, id_in_list, played):
''' Note that a player has been played.
Parameters
----------
id_in_list
id in list of stations (0..len-1)
played
True or False
'''
pass
def search(self, go_back_in_history=True):
return []
def set_encoding(self, id_in_list, new_encoding):
return
def format_station_line(self, id_in_list, pad, width):
return ''
def click(self, a_station):
pass
def vote(self, a_station):
pass
class RadioBrowserInfo(PyRadioStationsBrowser):
BASE_URL = 'api.radio-browser.info'
TITLE = 'Radio Browser '
_headers = {'User-Agent': 'PyRadio/dev',
'Content-Type': 'application/json'}
_raw_stations = []
# the output format to use based on window width
# Default value: -1
# Possible values: 0..5
# Look at format_station_line() for info
_output_format = -1
_info_len = []
_info_name_len = 0
_raw_stations = []
_internal_header_height = 1
_search_history = []
_search_history_index = -1
_columns_width = {
'votes': 7,
'clickcount': 7,
'bitrate': 7,
'country': 18,
'language': 15,
'state': 18,
'tags': 20,
'codec': 5
}
_server_selection_window = None
_dns_info = None
search_by = _old_search_by = None
keyboard_handler = None
def __init__(self,
config,
config_encoding,
session=None,
search=None,
pyradio_info=None,
search_return_function=None,
message_function=None):
'''
When first_search is True, it means that we are opening
the browser. If empty result is returned by the first
browser search, we show an empty stations' list.
if it is False and an empty result is returned by the first
browser search, which means we are already in the browser's
search screen, we just display the 'no result message'.
All of this is done at radio.py
'''
self.first_search = True
self._cnf = config
if session:
self._session = session
else:
self._session = requests.Session()
self._pyradio_info = pyradio_info.strip()
if self._pyradio_info:
self._headers['User-Agent'] = self._pyradio_info.replace(' ', '/')
self._config_encoding = config_encoding
self._message_function = message_function
self._search_return_function = search_return_function
def initialize(self):
self._dns_info = RadioBrowserInfoDns()
self._server = self._dns_info.give_me_a_server_url()
if logger.isEnabledFor(logging.INFO):
logger.info('random server is ' + self._server)
if self._server:
self._get_title()
self._search_history.append({
'type': 'topvote',
'term': '100',
'post_data': None,
})
self._search_history.append({
'type': 'bytagexact',
'term': 'big band',
'post_data': {'order': 'votes', 'reverse': 'true'},
})
self._search_history.append({
'type': 'search',
'term': '',
'post_data': {'name': 'jaz'},
})
self._search_history_index = 0
return True
return False
@property
def server(self):
return self._server
@property
def add_to_title(self):
return self._server.split('.')[0]
def _get_title(self):
self.TITLE = 'Radio Browser ({})'.format(country_from_server(self._server))
def stations(self, playlist_format=1):
''' Return stations' list (in PyRadio playlist format)
Parameters
----------
playlist_format
0: station name, url
1: station name, url, encoding
2: station name, url, encoding, browser flag (default)
'''
ret = []
for n in self._raw_stations:
if playlist_format == 0:
ret.append([n['name'], n['url']])
elif playlist_format == 1:
enc = '' if n['encoding'] == self._config_encoding else n['encoding']
ret.append([n['name'], n['url'], enc])
else:
enc = '' if n['encoding'] == self._config_encoding else n['encoding']
ret.append([n['name'], n['url'], enc, ''])
return ret
def url(self, id_in_list):
''' Get a station's url using resolved_url
Parameters
----------
id_in_list
id in list of stations (0..len-1)
Returns
-------
url or '' if failed
'''
if self._raw_stations:
if id_in_list < len(self._raw_stations):
if self._raw_stations[id_in_list]['url_resolved']:
return self._raw_stations[id_in_list]['url_resolved']
else:
return self._raw_stations[id_in_list]['url']
return ''
def click(self, a_station):
def do_click(a_station_uuid):
url = 'http://' + self._server + '/json/url/' + a_station_uuid
try:
r = self._session.get(url=url, headers=self._headers, timeout=(self._search_timeout, 2 * self._search_timeout))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station click result: "{}"'.format(r.text))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station click failed...')
threading.Thread(target=do_click, args=(self._raw_stations[a_station]['stationuuid'], )).start()
def vote(self, a_station):
url = 'http://' + self._server + '/json/vote/' + self._raw_stations[a_station]['stationuuid']
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Voting for: {}'.format(self._raw_stations[a_station]))
logger.debug('Voting url: ' + url)
try:
r = self._session.get(url=url, headers=self._headers, timeout=(self._search_timeout, 2 * self._search_timeout))
message = json.loads(r.text)
self.vote_result = self._raw_stations[a_station]['name'], message['message'][0].upper() + message['message'][1:]
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Voting result: "{}"'.format(message))
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station voting failed...')
self.vote_result = self._raw_stations[a_station]['name'], 'Voting for station failed'
if self._vote_callback:
self._vote_callback()
def get_info_string(self, a_station, max_width=60):
guide = [
('Name', 'name'),
('URL', 'url'),
('Resolved URL', 'url_resolved'),
('Website', 'homepage'),
('Tags', 'tags'),
('Votes', 'votes'),
('Clicks', 'clickcount'),
('Country', 'country'),
('State', 'state'),
('Language', 'language'),
('Bitrate', 'bitrate'),
('Codec', 'codec')
]
if self._raw_stations[a_station]['url'] == self._raw_stations[a_station]['url_resolved']:
guide.pop(2)
info = collections.OrderedDict()
for n in guide:
info[n[0]] = str(self._raw_stations[a_station][n[1]])
if n[1] == 'bitrate':
info[n[0]] += ' kb/s'
a_list = []
fix_highlight = []
a_list = info_dict_to_list(info, fix_highlight, max_width)
ret = '|' + '\n|'.join(a_list)
# logger.error('DE \n\n{}\n\n'.format(ret))
sp = ret.split('\n')
wrong_wrap = -1
for i, n in enumerate(sp):
# logger.exception('DE {0}: "{1}"'.format(i, n))
if wrong_wrap == i:
sp[i] = n.replace('|', '')
sp[i-1] += sp[i].replace('_', '')
sp[i] = '*' + sp[i]
wrong_wrap = -1
else:
if ': ' not in n:
sp[i] = n[1:]
if n[-1] == ':':
''' wrong wrapping! '''
wrong_wrap = i + 1
sp[i] += '|'
if sp[i][-1] != ' ':
sp[i] += ' '
if sp[i][0] != '|':
sp[i] = '|' + sp[i]
for i, n in enumerate(sp):
if n[0] == '*':
sp.pop(i)
ret = '\n'.join(sp).replace(': |', ':| ').replace(': ', ':| ')
# logger.error('DE \n\n{}\n\n'.format(ret))
return ret, ''
def search(self, go_back_in_history=True):
''' Search for stations with parameters.
Result is limited to 100 stations by default (use the
'limit' parameter to change it).
Parameters
----------
data
A dictionary containing the fields described at
http://www.radio-browser.info/webservice/#Advanced_station_search
Returns
-------
self._raw_stations
A dictionary with a subset of returned station data.
Its format is:
name : station name
id : station id
url : station url
resolved_url : station resolved_url
tags : starion tags
bitrate : station bitrate
hls : HLS status
votes : station votes
clickcount : station clicks
country : station country
state : statiob state
language : station language
codec : station codec
encoding : station encoding ('' means utf-8)
'''
if self._message_function:
self._message_function()
self.search_by = self._old_search_by = None
self._get_search_elements(
self._search_history[self._search_history_index]
)
self._old_search_by = self.search_by
self._sort = None
url = self._format_url(self._search_history[self._search_history_index])
post_data = {}
if self._search_history[self._search_history_index]['post_data']:
post_data = deepcopy(self._search_history[self._search_history_index]['post_data'])
self._output_format = -1
if self._search_type > 0:
if 'limit' not in post_data.keys():
post_data['limit'] = 100
if not 'hidebroken' not in post_data.keys():
post_data['hidebroken'] = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug(' == history = {}'.format(self._search_history[self._search_history_index]))
logger.debug(' == url = "{}"'.format(url))
logger.debug(' == headers = "{}"'.format(self._headers))
logger.debug(' == post_data = "{}"'.format(post_data))
''' keep server results here '''
new_raw_stations = []
try:
r = self._session.get(url=url, headers=self._headers, params=post_data, timeout=(self._search_timeout, 2 * self._search_timeout))
r.raise_for_status()
new_raw_stations = self._extract_data(json.loads(r.text))
# logger.error('DE \n\n{}'.format(new_raw_stations))
ret = True, len(new_raw_stations), go_back_in_history
except requests.exceptions.RequestException as e:
if logger.isEnabledFor(logging.ERROR):
logger.error(e)
self._raw_stations = []
ret = False, 0, go_back_in_history
''' use server result '''
if len(new_raw_stations) > 0:
self._raw_stations = new_raw_stations[:]
if self._search_return_function:
self._search_return_function(ret)
def _get_search_elements(self, a_search):
'''
get "by search" and "reverse"
values from a search dict.
To be used with the sort function
'''
logger.error('DE search in function is "{}"'.format(a_search))
a_term = a_search['term']
p_data = a_search['post_data']
self.search_by = None
self.reverse = False
if a_search['post_data']:
if 'order' in a_search['post_data'].keys():
self.search_by = a_search['post_data']['order']
if 'reverse' in a_search['post_data']:
self.reverse = True if a_search['post_data']['reverse'] == 'true' else False
logger.error('DE search by was "{}"'.format(self.search_by))
if self.search_by is None:
a_type = a_search['type']
if a_type == 'byname':
self.search_by = 'name'
elif a_type == 'topvote':
self.search_by = 'votes'
logger.error('DE search by is votes')
elif a_type == 'clickcount':
self.search_by = 'clickcount'
elif a_type == 'bitrate':
self.search_by = 'bitrate'
elif a_type == 'codec':
self.search_by = 'codec'
elif a_type == 'country':
self.search_by = 'country'
elif a_type == 'state':
self.search_by = 'state'
elif a_type == 'language':
self.search_by = 'language'
elif a_type == 'tags':
self.search_by = 'tags'
if self.search_by is None:
if p_data:
if 'name' in p_data.keys():
self.search_by = 'name'
logger.error('DE search by is name (default)')
if self.search_by is None:
self.search_by = 'name'
logger.error('DE search by is name (default)')
logger.error('DE search by is "{}"'.format(self.search_by))
def get_next(self, search_term, start=0, stop=None):
if search_term:
for n in range(start, len(self._raw_stations)):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found start from list top """
for n in range(0, start):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found return None """
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{}" not found'.format(search_term))
return None
else:
return None
def get_previous(self, search_term, start=0, stop=None):
if search_term:
for n in range(start, -1, -1):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found start from list end """
for n in range(len(self._raw_stations) - 1, start, -1):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found return None """
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{}" not found'.format(search_term))
return None
else:
return None
def _search_in_station(self, a_search_term, a_station):
guide = (
'name',
'country',
'codec',
'tags',
'bitrate',
'language'
)
for n in guide:
source = self._raw_stations[a_station][n]
if isinstance(source, int):
''' this is one of the numerical data '''
source = str(source)
if a_search_term.lower() in source.lower():
return True
return False
def _format_url(self, a_search):
if a_search['type'] in ('topvote',
'topclick',
'lastclick',
'lastchange',
'changed',
'improvable',
'broken',
):
url = 'http://{0}{1}'.format(
self._server,
'/json/stations/{}'.format(a_search['type'])
)
if a_search['term'] not in ('', '0'):
url += '/{}'.format(a_search['term'])
self._search_type = 0
elif a_search['type'] in ('byuuid',
'byname',
'bynameexact',
'bycodec',
'bycodecexact',
'bycountry',
'bycountryexact',
'bycountrycodeexact',
'bystate',
'bystateexact',
'bylanguage',
'bylanguageexact',
'bytag',
'bytagexact',
):
url = 'http://{0}{1}/{2}'.format(
self._server,
'/json/stations/{}'.format(a_search['type']),
a_search['term']
)
self._search_type = 1
elif a_search['type'] == 'search':
url = 'http://{0}{1}'.format(
self._server,
'/json/stations/search'
)
self._search_type = 2
return url
def format_empty_line(self, width):
if self._output_format == 0:
return -1, ' '
info = (
(),
('bitrate', ),
('votes', 'bitrate'),
('votes', 'clickcount', 'bitrate'),
('votes', 'clickcount', 'bitrate', 'country'),
('votes', 'clickcount', 'bitrate', 'country', 'language'),
('votes', 'clickcount', 'bitrate', 'country', 'state', 'language'),
('votes', 'clickcount', 'bitrate', 'codec', 'country', 'state', 'language', 'tags')
)
out = ['', '']
i_out = []
for i, n in enumerate(info[self._output_format]):
i_out.append(u'│' + ' ' * self._columns_width[n])
out[1] = ''.join(i_out)
name_width = width-len(out[1])
out[0] = ' ' * name_width
if PY3:
return -1, '{0}{1}'.format(*out)
else:
return -1 , '{0}{1}'.format(
out[0],
out[1].encode('utf-8', 'replace')
)
def format_station_line(self, id_in_list, pad, width):
''' Create a formated line for a station
Parameters
----------
id_in_list
id in list of stations (0..len-1)
pad
length of NUMBER
width
final length of created string
Returns
-------
A string of the following format:
NUMBER. STATION NAME [INFO]
where:
NUMBER
Right padded counter (id_in_list + 1)
STATION NAME
Left padded station name
INFO
Station info. Depending on window width, it can be:
[Votes: XX, Clicks: XX, Bitrate: XXXkb, Country: XXXX],
[Votes: XX, Clicks: XX, Bitrate: XXXkb],
[XXXX v, XXXX, cl, XXXkb],
[Bitrate: XXXkb], or
empty string
'''
info = (u'',
u' {0}{1}kb',
u' {0}{1}│{2}kb',
u' {0}{1}│{2}│{3}kb',
u' {0}{1}│{2}│{3}kb│{4}',
u' {0}{1}│{2}│{3}kb│{4}│{5}',
u' {0}{1}│{2}│{3}kb│{4}│{5}│{6}',
u' {0}{1}│{2}│{3}kb│{4}│{5}│{6}│{7}│{8}',
)
self._get_output_format(width)
# logger.error('DE self._output_format = {}'.format(self._output_format))
out = ['{0}. '.format(str(id_in_list + 1).rjust(pad)), '', '']
# format info field
pl = u'├' if self._raw_stations[id_in_list]['played'] else u'│'
if self._output_format == 7:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['codec'].rjust(self._columns_width['codec'])[:self._columns_width['codec']],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['state'].ljust(self._columns_width['state'])[:self._columns_width['state']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']],
self._raw_stations[id_in_list]['tags'].ljust(self._columns_width['tags'])[:self._columns_width['tags']]
)
if self._output_format == 6:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['state'].ljust(self._columns_width['state'])[:self._columns_width['state']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']]
)
if self._output_format == 5:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']]
)
if self._output_format == 4:
# full or condensed info
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']]
)
elif self._output_format == 2:
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
elif self._output_format == 3:
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
elif self._output_format == 1:
# Bitrate only
out[2] = info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
name_width = width-len(out[0])-len(out[2])
out[1] = self._fix_cjk_string_width(self._raw_stations[id_in_list]['name'].ljust(name_width)[:name_width], name_width)
if PY3:
# if pl == '╞':
# out[2] += '╡'
return (self._raw_stations[id_in_list]['played'],
'{0}{1}{2}'.format(*out))
else:
# on python 2, strings are already in utf-8
return (self._raw_stations[id_in_list]['played'],
'{0}{1}{2}'.format(
out[0].encode('utf-8', 'replace'),
out[1].encode('utf-8', 'replace'),
out[2].encode('utf-8', 'replace')))
def set_encoding(self, id_in_list, new_encoding):
if id_in_list < len(self._raw_stations):
self._raw_stations[id_in_list]['encoding'] = new_encoding
if logger.isEnabledFor(logging.DEBUG):
logger.debug('New encoding set to "{0}" for station "{1}"'.format(new_encoding, self._raw_stations[id_in_list]['name']))
def _fix_cjk_string_width(self, a_string, width):
while cjklen(a_string) > width:
a_string = a_string[:-1]
return a_string
def _extract_data(self, a_search_result):
ret = []
self._max_len = [0, 0]
if a_search_result:
for n in a_search_result:
ret.append({'name': n['name'].replace(',', ' ')})
ret[-1]['stationuuid'] = n['stationuuid']
ret[-1]['url'] = n['url']
ret[-1]['url_resolved'] = n['url_resolved']
ret[-1]['url'] = n['url']
ret[-1]['played'] = False
ret[-1]['hls'] = n['hls']
ret[-1]['stationuuid'] = n['stationuuid']
ret[-1]['countrycode'] = n['countrycode']
ret[-1]['country'] = n['country']
ret[-1]['codec'] = n['codec']
ret[-1]['state'] = n['state']
ret[-1]['tags'] = n['tags'].replace(',', ', ')
ret[-1]['homepage'] = n['homepage']
if isinstance(n['clickcount'], int):
# old API
ret[-1]['votes'] = n['votes']
ret[-1]['clickcount'] = n['clickcount']
ret[-1]['bitrate'] = n['bitrate']
else:
# new API
ret[-1]['votes'] = int(n['votes'])
ret[-1]['clickcount'] = int(n['clickcount'])
ret[-1]['bitrate'] = int(n['bitrate'])
ret[-1]['language'] = capitalize_comma_separated_string(n['language'])
ret[-1]['encoding'] = ''
self._get_max_len(ret[-1]['votes'],
ret[-1]['clickcount'])
return ret
def _get_max_len(self, votes, clicks):
''' Calculate the maximum length of numeric_data / country
Parameters
----------
votes
Number of station's vote (string)
clicks
Number of station's clicks (string)
numeric_data
Returns
-------
self._max_len
A list [max votes length,
max clickcount length]
'''
numeric_data = (votes, clicks)
# logger.error('DE numeric_data = {}'.format(numeric_data))
min_data = (6, 7)
for i, x in enumerate(numeric_data):
n = str(x)
if len(n) > self._max_len[i]:
self._max_len[i] = len(n) if len(n) > min_data[i] else min_data[i]
def _get_output_format(self, width):
''' Return output format based on window width
Paramaters
----------
width
Window width
Returns
-------
self._output_format
A number 0..5
'''
# now_width = get_terminal_size().columns - 2
if width <= 50:
self._output_format = 0
elif width < 57:
self._output_format = 1
elif width < 65:
self._output_format = 2
elif width < 80:
self._output_format = 3
elif width < 95:
self._output_format = 4
elif width < 120:
self._output_format = 5
elif width < 145:
self._output_format = 6
else:
self._output_format = 7
def _populate_columns_separators(self, a_tuple, width):
ret = []
for i, n in enumerate(a_tuple):
if i == 0:
# logger.error('DE {0} - {1} = {2} - {3}'.format(width, self._columns_width[n], width-self._columns_width[n]-2, n))
ret.append(width - self._columns_width[n] - 2)
else:
# logger.error('{0} -1 - {1} = {2} - {3}'.format(ret[-1], self._columns_width[n], ret[-1] - 1 - self._columns_width[n], n))
ret.append(ret[-1] - 1 - self._columns_width[n])
ret.reverse()
# logger.error('DE \n\nret = {}\n\n'.format(ret))
return ret
def get_columns_separators(self,
width,
use_old_output_format=False,
adjust=0,
adjust_for_body=False,
adjust_for_header=False,
):
''' Calculates columns separators for a given width
based on self._output_format.
Parameters
----------
width
Window width to use for the calculation.
use_old_output_format
If True, do not calculate self._output_format
(use what's already calculated).
adjust
Delete adjust from the output
Example:
if the output was [55, 67]
and adjust was 2
the output would become [53, 65]
adjust_for_header
Delete self._outer_internal_body_diff from output
This is to be used for displaying the internal header
adjust_for_body
Delete self._outer_internal_body_half_diff from output
This is to be used for changing columns' separators
color, when displaying body lines (stations' lines).
IMPORTANT
---------
The adjust* parameters are mutually exclusive, which means
that ONLY ONE of them can be used at any given call to the
function. If you fail to comply, the result will be wrong.
Returns
-------
A list containing columns_separotors (e.g. [55, 65]).
'''
columns_separotors = []
if not use_old_output_format:
self._get_output_format(width)
if self._output_format == 0:
columns_separotors = []
elif self._output_format == 1:
columns_separotors = [width - self._columns_width['bitrate']]
elif self._output_format == 2:
columns_separotors = self._populate_columns_separators(('bitrate', 'votes'), width)
elif self._output_format == 3:
columns_separotors = self._populate_columns_separators(('bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 4:
columns_separotors = self._populate_columns_separators(('country', 'bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 5:
columns_separotors = self._populate_columns_separators(('language', 'country', 'bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 6:
columns_separotors = self._populate_columns_separators(('language', 'state', 'country', 'bitrate', 'clickcount', 'votes'), width)
else:
columns_separotors = self._populate_columns_separators(('tags', 'language', 'state', 'country', 'codec', 'bitrate', 'clickcount', 'votes'), width)
if adjust_for_header and self._output_format == 1:
columns_separotors[0] -= self._outer_internal_body_diff
if adjust_for_body:
if self._output_format == 1:
columns_separotors[0] -= self._outer_internal_body_half_diff
else:
for n in range(0, len(columns_separotors)):
columns_separotors[n] += self._outer_internal_body_half_diff
if adjust > 0:
for n in range(0, len(columns_separotors)):
columns_separotors[n] -= adjust
return columns_separotors
def get_internal_header(self, pad, width):
guide = {
'name': 'Name',
'votes': ' Votes',
'clickcount': ' Clicks',
'bitrate': 'Bitrate',
'codec': 'Codec',
'country': 'Country',
'state': 'State',
'language': 'Language',
'tags': 'Tags',
}
# logger.error('DE search = {}'.format(self._search_history[self._search_history_index]))
reset_search_elements = False
if self.search_by is None:
reset_search_elements = True
self._get_search_elements(self._search_history[self._search_history_index])
# logger.error('DE search by = {}'.format(self.search_by))
columns = ((),
('Bitrate', ),
(' Votes', 'Bitrate'),
(' Votes', ' Clicks', 'Bitrate'),
(' Votes', ' Clicks', 'Bitrate', 'Country'),
(' Votes', ' Clicks', 'Bitrate', 'Country', 'Language'),
(' Votes', ' Clicks', 'Bitrate', 'Country', 'State', 'Language'),
(' Votes', ' Clicks', 'Bitrate', 'Codec', 'Country', 'State', 'Language', 'Tags')
)
columns_separotors = self.get_columns_separators(width, use_old_output_format=True)
if self._output_format == 1:
columns_separotors[0] -= 2
title = '#'.rjust(pad), ' Name '
if reset_search_elements:
self._old_search_by = self.search_by
# logger.error('DE search by = {}'.format(self.search_by))
# logger.error('DE Looking for: "{}"'.format(guide[self.search_by]))
# logger.error('DE Names = {}'.format(columns[self._output_format]))
if guide[self.search_by] == 'Name':
highlight = -2
else:
try:
highlight = columns[self._output_format].index(guide[self.search_by])
except:
highlight = -1
return highlight, ((title, columns_separotors, columns[self._output_format]), )
def select_servers(self):
if self._server_selection_window is None:
self._server_selection_window = RadioBrowserInfoServersSelect(
self.parent, self._dns_info.server_urls, self._server)
else:
self._server_selection_window.set_parent(self.parent)
self.keyboard_handler = self._server_selection_window
self._server_selection_window.show()
def sort(self):
'''
Create and show the Sort window
'''
if self._sort is None:
self._get_search_elements(
self._search_history[self._search_history_index]
)
self._sort = RadioBrowserInfoSort(
parent=self.parent,
search_by=self.search_by
)
self.keyboard_handler = self._sort
self._sort.show()
def keypress(self, char):
''' RadioBrowserInfo keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
ret = self.keyboard_handler.keypress(char)
if ret == 0:
if self.keyboard_handler == self._sort:
self.search_by = self._sort.search_by
if self.search_by == self._old_search_by:
self.reverse = not self.reverse
else:
self.reverse = False
if self.search_by != self._old_search_by:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('search by = "{}"'.format(self.search_by))
''' set reverse to True for numerical values
when changing sort type
'''
if self.search_by in (
'votes',
'clickcount',
'bitrate'
):
self.reverse = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug('settng reverse to {}'.format(self.reverse))
self._raw_stations = sorted(self._raw_stations, key=itemgetter(self.search_by), reverse=self.reverse)
self._old_search_by = self.search_by
elif self.keyboard_handler == self._server_selection_window:
if ret == 0:
self._server = self._server_selection_window.server
if logger.isEnabledFor(logging.INFO):
logger.info('user selected server is ' + self._server)
self._get_title()
return ret
def do_search(self, parent=None, init=False):
if init:
self._sort_win = RadioBrowserInfoSearchWindow(
parent=parent,
init=init
)
self.keyboard_handler = self._sort_win
self._sort_win.show()
class RadioBrowserInfoSearchWindow(object):
# search_by_items = (
# 'No search term',
# 'Name',
# 'Tag',
# 'Country',
# 'State',
# 'Codec',
# 'Language',
# )
search_by_items = (
'Votes',
'Clicks',
'Recent click',
'Recently changed'
)
sort_by_items = (
'No sorting',
'Random',
'Name',
'Tag',
'Country',
'State',
'Language',
'Votes',
'Clicks',
'Bitrate',
'Codec',
)
def __init__(self,
parent,
init=False
):
self._parent = parent
self._init = init
self._too_small = False
self._focus = 0
self._win = None
self.maxY = self.maxX = 0
self.TITLE = ' Radio Browser Search '
''' we have two columns;
this is the width of each of them
'''
self._half_width = 0
self._widgets = [ None, None, None, None, None, None, None, None]
@property
def focus(self):
return self._focus
@focus.setter
def focus(self, val):
if val in range(0, len(self._widgets)):
self._focus = val
else:
if val < 0:
self._focus = len(self._widgets) - 1
else:
self._focus = 0
self.show()
def show(self):
pY, pX = self._parent.getmaxyx()
logger.error('DE pY = {}, pX = {}'.format(pY, pX))
self.Y, self.X = self._parent.getbegyx()
if self.maxY != pY or self.maxX != pX:
logger.error('DE --== SEARCH ==--')
pY, pX = self._parent.getmaxyx()
logger.error('DE pY = {}, pX = {}'.format(pY, pX))
self.maxY = pY
self.maxX = pX
self._win = self._parent
# self._win = curses.newwin(
# self.maxY, self.maxX,
# Y, X
# )
self._half_width = int((self.maxX -2 ) / 2) -3
self._win.bkgdset(' ', curses.color_pair(5))
# self._win.erase()
self._win.box()
self._win.addstr(0, int((self.maxX - len(self.TITLE)) / 2),
self.TITLE,
curses.color_pair(4))
self._win.refresh()
# self._erase_win(self.maxY, self.maxX, self.Y, self.X)
''' start displaying things '''
self._win.addstr(1, 2, 'Search for', curses.color_pair(5))
self._win.addstr(4, 2, 'Search by', curses.color_pair(5))
for i, n in enumerate(self._widgets):
if n is None:
if i == 0:
#self._widgets[2] = SimpleCursesCheckBox(
# 1, 2, 'Display by',
# curses.color_pair(9),
# curses.color_pair(4),
# curses.color_pair(5))
self._widgets[0] = SimpleCursesLineEdit(
parent=self._win,
width=-2,
begin_y=3,
begin_x=2,
boxed=False,
has_history=False,
caption='',
box_color=curses.color_pair(9),
caption_color=curses.color_pair(4),
edit_color=curses.color_pair(9),
cursor_color=curses.color_pair(8),
unfocused_color=curses.color_pair(5),
string_changed_handler='')
self._widgets[0].bracket = False
self._line_editor = self._widgets[0]
elif i == 1:
''' search by '''
self._widgets[i] = SimpleCursesWidgetColumns(
Y=5, X=3, window=self._win,
selection=0,
active=0,
items=self.search_by_items,
color=curses.color_pair(5),
color_active=curses.color_pair(4),
color_cursor_selection=curses.color_pair(6),
color_cursor_active=curses.color_pair(9),
margin=1,
max_width=self._half_width
)
elif i == 2:
''' search exact '''
self._widgets[2] = SimpleCursesCheckBox(
self._widgets[1].Y + self._widgets[1].height + 2, 2,
'Exact match',
curses.color_pair(9), curses.color_pair(4), curses.color_pair(5))
elif i == 3:
''' sort by '''
self._widgets[i] = SimpleCursesWidgetColumns(
Y=5, X=self.maxX - 1 - self._half_width,
max_width=self._half_width,
window=self._win,
selection=0,
active=0,
items=self.sort_by_items,
color=curses.color_pair(5),
color_active=curses.color_pair(4),
color_cursor_selection=curses.color_pair(6),
color_cursor_active=curses.color_pair(9),
margin=1
)
elif i == 4:
'''' sort ascending / descending '''
self._widgets[4] = SimpleCursesCheckBox(
self._widgets[3].Y + self._widgets[3].height + 1, self._widgets[3].X - 2 + self._widgets[3].margin,
'Sort descending',
curses.color_pair(9), curses.color_pair(4), curses.color_pair(5))
elif i == 5:
'''' limit results '''
self._widgets[5] = None
elif i == 6:
self._widgets[i] = None
''' add horizontal push buttons '''
self._h_buttons = SimpleCursesHorizontalPushButtons(
Y=5 + len(self.search_by_items) + 2,
captions=('OK', 'Cancel'),
color_focused=curses.color_pair(9),
color=curses.color_pair(4),
bracket_color=curses.color_pair(5),
parent=self._win)
#self._h_buttons.calculate_buttons_position()
self._widgets[6], self._widgets[7] = self._h_buttons.buttons
self._widgets[6]._focused = self._widgets[7].focused = False
else:
if i in (1, 3):
''' update lists' window '''
if i == 3:
self._widgets[3].X = self.maxX - 1 - self._half_width
self._widgets[i].window = self._win
self._widgets[i].max_width = self._half_width
self._win.addstr(
4,
self._widgets[3].X - 2 + self._widgets[3].margin,
'Sort by',
curses.color_pair(5)
)
self._win.refresh()
self._update_focus()
if not self._too_small:
self._line_editor.show(self._win, opening=False)
self._h_buttons.calculate_buttons_position()
for n in range(1, len(self._widgets)):
if self._widgets[n]:
if n in (2, 4):
if n == 2:
self._widgets[2].Y = self._widgets[1].Y + self._widgets[1].height + 2
else:
self._widgets[4].Y = self._widgets[3].Y + self._widgets[3].height + 1
self._widgets[4].X = self._widgets[3].X - 2 + self._widgets[3].margin
self._widgets[n].move()
# self._widgets[n].resize()
self._widgets[n].show()
self._win.refresh()
# self._refresh()
def _update_focus(self):
# use _focused here to avoid triggering
# widgets' refresh
for i, x in enumerate(self._widgets):
if x:
if self._focus == i:
x._focused = True
else:
x._focused = False
def keypress(self, char):
''' RadioBrowserInfoSearchWindow keypress
Returns
-------
-1 - Cancel
0 - do search
1 - Continue
2 - Display help
'''
if self._too_small:
return 1
if char == ord('?'):
return 2
if char in (
curses.KEY_EXIT, ord('q'), 27,
ord('h'), curses.KEY_LEFT
):
return -1
elif char in (
ord('l'), ord(' '), ord('\n'), ord('\r'),
curses.KEY_RIGHT, curses.KEY_ENTER
):
return 0
class RadioBrowserInfoData(object):
''' Read search parameters for radio.browser.info service
parameters are:
tags, countries(and states), codecs, languages
'''
_data = {}
_connection_error = False
_lock = threading.Lock()
_stop_thread = False
_timeout = 3
data_thread = None
def __init__(self, url, timeout=3):
self._url = url
self._timeout = timeout
def start(self, force_update=False):
''' Start data acquisition thread '''
self.data_thread = threading.Thread(
target=self._get_all_data_thread,
args=(
self._lock, force_update, lambda: self._stop_thread,
self._update_data
)
)
self.data_thread.start()
def stop(self):
''' Stop (cancel) data acquisition thread '''
self._stop_thread = True
@property
def lock(self):
''' Return thread lock (read only)'''
return self._lock
@lock.setter
def lock(self, val):
raise ValueError('property is read only')
@property
def terminated(self):
''' Return True if thread is not alive (read only)
which means that data has been retrieved'''
if self.data_thread.is_alive():
return False
return True
@terminated.setter
def terminated(self, val):
raise ValueError('property is read only')
@property
def connection_error(self):
self._lock.acquire()
ret = self._connection_error
self._lock.release()
return ret
@connection_error.setter
def connection_error(self, val):
raise ValueError('property is read only')
@property
def tags(self):
self._lock.acquire()
ret = self._data['tags']
self._lock.release()
return ret
@tags.setter
def tags(self, val):
raise ValueError('property is read only')
@property
def codecs(self):
self._lock.acquire()
if 'codecs' in self._data:
ret = self._data['codecs']
else:
ret = {}
self._lock.release()
return ret
@codecs.setter
def codecs(self, val):
raise ValueError('property is read only')
@property
def countries(self):
self._lock.acquire()
ret = self._data['countries']
self._lock.release()
return ret
@countries.setter
def countries(self, val):
raise ValueError('property is read only')
@property
def languages(self):
self._lock.acquire()
ret = self._data['languages']
self._lock.release()
return ret
@languages.setter
def languages(self, val):
raise ValueError('property is read only')
def reset_all_data(self):
self._data = {}
self.start()
def _update_data(self, data, connection_error):
self._connection_error = connection_error
self._data = data
def _get_all_data_thread(self, lock, force_update, stop, callback): # noqa
def get_data(data):
ret = {}
json_data = []
connection_error, json_data = get_data_dict(data)
if connection_error:
return True, {}
if json_data:
for a_tag in json_data:
ret[a_tag['name']] = a_tag['stationcount']
return False, ret
def get_countries(stop):
ret = {}
connection_error, json_countrycodes = get_data_dict('countrycodes')
if connection_error:
return True, {}
from countries import countries
st = 'stationcount'
for n in json_countrycodes:
if n['name'] in countries.keys():
ret[countries[n['name']]] = {}
ret[countries[n['name']]]['code'] = n['name']
ret[countries[n['name']]]['stationcount'] = n[st]
ret[countries[n['name']]]['states'] = {}
connection_error, json_states = get_data_dict('states')
if connection_error:
return True, {}
for n in json_states:
if n['country'] in ret.keys():
ret[n['country']]['states'][n['name']] = n['stationcount']
return False, ret
def get_data_dict(data):
url = 'http://' + self._url + '/json/' + data
jdata = {'hidebroken': 'true'}
headers = {'user-agent': 'PyRadio/dev',
'encoding': 'application/json'}
if self._pyradio_info:
headers['user-agent'] = self._pyradio_info.replace(' ', '/')
try:
r = requests.get(url, headers=headers, json=jdata, timeout=self._timeout)
r.raise_for_status()
return False, json.loads(r.text)
# if r.status_code == 200:
# return False, json.loads(r.text)
# else:
# return True, []
except requests.exceptions.RequestException as e:
if logger.isEnabledFor(logger.ERROR):
logger.error(e)
return True, []
my_data = {}
data_items = ['tags', 'countries', 'codecs', 'languages']
for an_item in data_items:
if an_item == 'countries':
ret, my_data['countries'] = get_countries(stop)
else:
ret, my_data[an_item] = get_data(an_item)
if stop():
if logger.isEnabledFor(logging.DEBUG):
logger.info('Asked to stop after working on "{}"...'.format(an_item))
self._terminated = True
return
lock.acquire()
callback(my_data, ret)
lock.release()
class RadioBrowserInfoDns(object):
''' Preforms query the DNS SRV record of
_api._tcp.radio-browser.info which
gives the list of server names directly
without reverse dns lookups '''
_urls = None
def __init__(self):
pass
@property
def server_urls(self):
''' Returns server urls in a tuple '''
if self._urls is None:
self._get_urls()
return tuple(self._urls) if self._urls is not None else None
@server_urls.setter
def server_urls(self, val):
return
def _get_urls(self):
self._urls = []
result = None
try:
result = resolver.query('_api._tcp.radio-browser.info', 'SRV')
except:
self._urls = None
for n in result:
self._urls.append(str(n).split(' ')[-1][:-1])
def give_me_a_server_url(self):
''' Returns a random server '''
if self._urls is None:
self._get_urls()
if self._urls:
num = random.randint(0, len(self._urls) - 1)
return self._urls[num]
else:
return None
def servers(self):
''' server urls as generator '''
if self._urls is None:
self._get_urls()
for a_url in self._urls:
yield a_url
class RadioBrowserInfoSort(object):
TITLE = ' Sort by '
items = collections.OrderedDict({
'Name': 'name',
'Votes': 'votes',
'Clicks': 'clickcount',
'Bitrate': 'bitrate',
'Codec': 'codec',
'Country': 'country',
'State': 'state',
'Language': 'language',
'Tag': 'tags'
})
_too_small = False
def __init__(self, parent, search_by=None):
self._parent = parent
self.active = self.selection = 0
if search_by:
if search_by in self.items.values():
self.active = self.selection = self._value_to_index(search_by)
self.maxY = len(self.items) + 2
self.maxX = max(len(x) for x in self.items.keys()) + 4
if len(self.TITLE) + 4 > self.maxX:
self.maxX = len(self.TITLE) + 4
self._win = None
if search_by:
self.set_active_by_value(search_by)
def _value_to_index(self, val):
for i, n in enumerate(self.items.values()):
if val == n:
return i
return -1
def set_parent(self, parent):
self._parent = parent
self.show()
def set_active_by_value(self, a_string, set_selection=True):
for i, n in enumerate(self.items.values()):
if a_string == n:
if set_selection:
self.active = self.selection = i
else:
self.active = i
return
if set_selection:
self.active = self.selection = 0
else:
self.active = 0
def show(self):
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
self._too_small = True
msg = 'Window too small to display content!'
if pX < len(msg) + 2:
msg = 'Window too small!'
self._win = curses.newwin(
3, len(msg) + 2,
Y + int((pY - 3) / 2),
int((pX - len(msg)) / 2))
self._win.bkgdset(' ', curses.color_pair(3))
self._win.box()
try:
self._win.addstr( 1, 1, msg,
curses.color_pair(5))
except:
pass
self._win.refresh()
return
self._win = curses.newwin(
self.maxY, self.maxX,
Y + int((pY - self.maxY) / 2),
int((pX - self.maxX) / 2)
)
self._win.bkgdset(' ', curses.color_pair(3))
# self._win.erase()
self._win.box()
self._win.addstr(0, 1,
self.TITLE,
curses.color_pair(4))
self._refresh()
def _refresh(self):
for i, n in enumerate(self.items.keys()):
col = 5
if i == self.active == self.selection:
col = 9
elif i == self.selection:
col = 6
elif i == self.active:
col = 4
self._win.addstr(i + 1, 1,
' {}'.format(n.ljust(self.maxX - 3)),
curses.color_pair(col))
self._win.refresh()
def keypress(self, char):
''' RadioBrowserInfoSort keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
if self._too_small:
return 1
if char in (
curses.KEY_EXIT, ord('q'), 27,
ord('h'), curses.KEY_LEFT
):
return -1
elif char in (
ord('l'), ord(' '), ord('\n'), ord('\r'),
curses.KEY_RIGHT, curses.KEY_ENTER
):
for i, n in enumerate(self.items.keys()):
if i == self.selection:
self.search_by = self.items[n]
self.active = i
break
return 0
elif char in (ord('g'), curses.KEY_HOME):
self.selection = 0
self._refresh()
elif char in (ord('G'), curses.KEY_END):
self.selection = len(self.items) - 1
self._refresh()
elif char in (curses.KEY_PPAGE, ):
if self.selection == 0:
self.selection = len(self.items) - 1
else:
self.selection -= 5
if self.selection < 0:
self.selection = 0
self._refresh()
elif char in (curses.KEY_NPAGE, ):
if self.selection == len(self.items) - 1:
self.selection = 0
else:
self.selection += 5
if self.selection >= len(self.items):
self.selection = len(self.items) - 1
self._refresh()
elif char in (ord('k'), curses.KEY_UP):
self.selection -= 1
if self.selection < 0:
self.selection = len(self.items) - 1
self._refresh()
elif char in (ord('j'), curses.KEY_DOWN):
self.selection += 1
if self.selection == len(self.items):
self.selection = 0
self._refresh()
return 1
class RadioBrowserInfoServersSelect(object):
TITLE = ' Server Selection '
def __init__(self, parent, servers, current_server):
self._parent = parent
self.items = list(servers)
self.server = current_server
self.servers = RadioBrowserInfoServers(
parent, servers, current_server
)
self.maxY = self.servers.maxY + 2
self.maxX = self.servers.maxX + 2
def show(self):
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
self._too_small = True
msg = 'Window too small to display content!'
if pX < len(msg) + 2:
msg = 'Window too small!'
self._win = curses.newwin(
3, len(msg) + 2,
Y + int((pY - 3) / 2),
int((pX - len(msg)) / 2))
self._win.bkgdset(' ', curses.color_pair(3))
self._win.box()
try:
self._win.addstr( 1, 1, msg,
curses.color_pair(5))
except:
pass
self._win.refresh()
return
self._win = curses.newwin(
self.maxY, self.maxX,
Y + int((pY - self.maxY) / 2),
int((pX - self.maxX) / 2)
)
self._win.bkgdset(' ', curses.color_pair(3))
# self._win.erase()
self._win.box()
self._win.addstr(
0, int((self.maxX - len(self.TITLE)) / 2),
self.TITLE,
curses.color_pair(4)
)
self._win.refresh()
self.servers._parent = self._win
self.servers.show()
def set_parent(self, parent):
self._parent = parent
self.servers._parent = parent
def keypress(self, char):
''' RadioBrowserInfoServersSelect keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
ret = self.servers.keypress(char)
if ret == 2:
ret = 1
if ret == 0:
self.server = self.servers.server
return ret
class RadioBrowserInfoServers(object):
''' Display Radio Browser server
This is supposed to be pluged into
another widget
'''
_too_small = False
def __init__(self, parent, servers, current_server):
self._parent = parent
self.items = list(servers)
self.server = current_server
s_max = 0
for i, n in enumerate(self.items):
if self.server == n:
self.selection = self.active = i
self.items[i] = ' ' + country_from_server(n) + ' ({}) '.format(n)
if len(self.items[i]) > s_max:
s_max = len(self.items[i])
self.items.sort()
for i, n in enumerate(self.items):
if len(self.items[i]) < s_max:
self.items[i] = self.items[i].replace('(', ' ' * (s_max - len(self.items[i])) + '(')
self.maxY = len(self.items)
self.maxX = len(self.items[0])
''' get selection and active server id '''
for i, n in enumerate(self.items):
if self.server in n:
self.active = self.selection = i
break
def show(self):
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
''' display nothing
let the parent do whatever
'''
self._too_small = True
else:
self._win = curses.newwin(
self.maxY, self.maxX,
Y + 1, X + 1
)
for i, n in enumerate(self.items):
col = 5
if i == self.active == self.selection:
col = 9
elif i == self.selection:
col = 6
elif i == self.active:
col = 4
try:
self._win.addstr(i, 0 , n, curses.color_pair(col))
except:
pass
self._win.refresh()
def keypress(self, char):
''' RadioBrowserInfoServers keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
2: Show help
'''
if self._too_small:
return 1
if char in (
curses.KEY_EXIT, ord('q'), 27,
ord('h'), curses.KEY_LEFT
):
return -1
elif char in (
ord('l'), ord(' '), ord('\n'), ord('\r'),
curses.KEY_RIGHT, curses.KEY_ENTER
):
for i, n in enumerate(self.items):
if i == self.selection:
self.server = n.split('(')[1].replace(') ', '')
self.active = i
break
return 0
elif char in (ord('?'), ):
return 2
elif char in (ord('g'), curses.KEY_HOME):
self.selection = 0
self.show()
elif char in (ord('G'), curses.KEY_END):
self.selection = len(self.items) - 1
self.show()
elif char in (curses.KEY_PPAGE, ):
if self.selection == 0:
self.selection = len(self.items) - 1
else:
self.selection -= 5
if self.selection < 0:
self.selection = 0
self.show()
elif char in (curses.KEY_NPAGE, ):
if self.selection == len(self.items) - 1:
self.selection = 0
else:
self.selection += 5
if self.selection >= len(self.items):
self.selection = len(self.items) - 1
self.show()
elif char in (ord('k'), curses.KEY_UP):
self.selection -= 1
if self.selection < 0:
self.selection = len(self.items) - 1
self.show()
elif char in (ord('j'), curses.KEY_DOWN):
self.selection += 1
if self.selection == len(self.items):
self.selection = 0
self.show()
return 1
def probeBrowsers(a_browser_url):
base_url = a_browser_url.split('/')[2]
if not base_url:
base_url = a_browser_url
implementedBrowsers = PyRadioStationsBrowser.__subclasses__()
if logger.isEnabledFor(logging.INFO):
logger.info('Implemented browsers: {}'.format(implementedBrowsers))
for a_browser in implementedBrowsers:
if a_browser.BASE_URL == base_url:
if logger.isEnabledFor(logging.INFO):
logger.info('Supported browser: {}'.format(a_browser))
return a_browser
if logger.isEnabledFor(logging.INFO):
logger.info('No supported browser found for: ' + a_browser_url)
return None
```
#### File: pyradio/pyradio/del_vlc_log.py
```python
import os
from sys import platform
def RemoveWinVlcLogFiles(*args):
''' Removes all VLC log files within pyradio config
directory on Windows.
Files currently in use will not be deleted.
'''
if platform.startswith('win'):
adir = args[0]
# print('config = "{}"'.format(adir))
files = [file for file in os.listdir(adir) if 'vlc_log.' in file]
if files:
for afile in files:
#i print(afile)
try:
# print('removing "{}"'.format(afile))
os.remove(os.path.join(adir, afile))
except:
pass
if __name__ == "__main__":
# example:
import threading
threading.Thread(target=RemoveWinVlcLogFiles('C:\\Users\\Spiros\\AppData\\Roaming\\pyradio')).start()
``` |
{
"source": "jirikuchta/garmin-ical-export",
"score": 2
} |
#### File: garmin-ical-export/garminicalexport/__init__.py
```python
import vobject
from typing import Optional
from . import activities
from . import data_types
from .garmin_api import GarminAPI
def to_ical(login_data: data_types.LoginData, limit: int,
activity_type: Optional[data_types.ActivityType],
measurement_system: Optional[data_types.MeasurementSystem]) -> str:
with GarminAPI(login_data) as api:
activities_data = api.activites(limit, activity_type)
cal = vobject.iCalendar()
for activity_data in activities_data:
activity = activities.get_activity(
activity_data, api, measurement_system=measurement_system)
event = vobject.newFromBehavior("vevent")
event.add("uid").value = activity.ical_uid
event.add("summary").value = activity.ical_summary
event.add("dtstart").value = activity.ical_dtstart
event.add("dtend").value = activity.ical_dtend
event.add("description").value = activity.detail_link
cal.add(event)
return cal.serialize()
``` |
{
"source": "jirikuncar/Flask-Collect",
"score": 3
} |
#### File: flask_collect/storage/base.py
```python
from __future__ import print_function
from os import path as op, walk
class BaseStorage():
""" Base class for storages. """
def __init__(self, collect, verbose=False):
self.verbose = verbose
self.collect = collect
def __iter__(self):
""" Seek static files and result full and relative paths.
:return generator: Walk files
"""
for bp in [self.collect.app] + list(self.collect.blueprints.values()):
if bp.has_static_folder and op.isdir(bp.static_folder):
for root, _, files in walk(bp.static_folder):
for f in files:
fpath = op.join(root, f)
opath = op.relpath(fpath, bp.static_folder.rstrip('/'))
if bp.static_url_path and self.collect.static_url and \
bp.static_url_path.startswith(
op.join(self.collect.static_url, '')): # noqa
opath = op.join(
op.relpath(
bp.static_url_path,
self.collect.static_url), opath)
yield bp, fpath, opath
def log(self, msg):
""" Log message. """
if self.verbose:
print(msg)
```
#### File: flask_collect/storage/test.py
```python
from .base import BaseStorage
class Storage(BaseStorage):
def run(self):
return [f for f in self]
``` |
{
"source": "jirikuncar/renku-gateway",
"score": 2
} |
#### File: app/helpers/gitlab_user_utils.py
```python
import logging
import json
import requests
import re
from .. import app
logger = logging.getLogger(__name__)
# A dictionary to cache GitLab usernames given the "sub" claim from the keycloak access token
# as a key. This dictionary can be trashed without any functional implications, it will just
# result in a few extra queries to GitLab.
GITLAB_USERNAMES = {}
def get_or_create_gitlab_user(access_token):
"""Get the username of a a user given the validated JWT keycloak access_token. Create
a new user in case it doesn't already exist in GitLab."""
username = GITLAB_USERNAMES.get(access_token['sub'], None)
if username:
return username
sudo_header = {
'Private-Token': app.config['GITLAB_PASS']
}
query_params = {
'extern_uid': access_token['sub'],
'provider': 'oauth2_generic'
}
user_response = requests.get(
app.config['GITLAB_URL'] + '/api/v4/users',
headers=sudo_header,
params=query_params
)
# More than one user found -> should not happen
if len(user_response.json()) > 1:
logging.error('More than one user with ' +
'extern_uid={} for provider oauth2_generic.'.format(access_token['sub']))
return None
# No user found, lets create it.
# We emulate the behaviour of gitlab in creating the username from the email
# address, while appending integers in case a username is already taken.
elif len(user_response.json()) == 0:
username_counter = 0
while True:
username_appendix = '' if username_counter == 0 else str(username_counter)
username_base = re.match(r'[a-zA-Z0-9\.\_\-]*', access_token['preferred_username']).group(0)
body = {
'username': username_base + username_appendix,
'email': access_token['email'],
'name': '{first} {last}'.format(first=access_token['given_name'], last=access_token['family_name']),
'extern_uid': access_token['sub'],
'provider': 'oauth2_generic',
'skip_confirmation': True,
'reset_password': <PASSWORD>
}
new_user_response = requests.post(
app.config['GITLAB_URL'] + '/api/v4/users',
headers=sudo_header,
data=body
)
if (new_user_response.status_code != 409 or
new_user_response.json()['message'] != 'Username has already been taken'):
break
username_counter += 1
if new_user_response.status_code != 201:
logging.error('Problem creating user from body {}'.format(json.dumps(body)))
logging.error(new_user_response.json()['message'])
username = new_user_response.json()['username']
# Exactly one user found, return the username
else:
username = user_response.json()[0]['username']
GITLAB_USERNAMES[access_token['sub']] = username
return username
``` |
{
"source": "jirikuncar/renku-python",
"score": 2
} |
#### File: jirikuncar/renku-python/conftest.py
```python
import json
import os
import shutil
import sys
import tempfile
import time
import types
import pytest
import responses
from click.testing import CliRunner
@pytest.fixture(scope='module')
def renku_path(tmpdir_factory):
"""Temporary instance path."""
path = str(tmpdir_factory.mktemp('renku'))
yield path
shutil.rmtree(path)
@pytest.fixture()
def instance_path(renku_path, monkeypatch):
"""Temporary instance path."""
orig_pwd = os.getcwd()
with monkeypatch.context() as m:
m.chdir(renku_path)
yield renku_path
@pytest.fixture()
def runner(monkeypatch):
"""Create a runner on isolated filesystem."""
from renku.cli._config import RENKU_HOME
monkeypatch.setenv('RENKU_CONFIG', RENKU_HOME)
return CliRunner()
@pytest.fixture()
def run(runner, capsys):
"""Return a callable runner."""
import contextlib
from renku import cli
@contextlib.contextmanager
def chdir(path):
"""Change the current working directory."""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
class redirect_stdin(contextlib.ContextDecorator):
"""Implement missing redirect stdin based on ``contextlib.py``."""
_stream = 'stdin'
def __init__(self, new_target):
"""Keep the original stream."""
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
"""Change the stream value."""
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
"""Restore the stream value."""
setattr(sys, self._stream, self._old_targets.pop())
managers = {
'stdout': lambda path: contextlib.redirect_stdout(path.open('wb')),
'stderr': lambda path: contextlib.redirect_stderr(path.open('wb')),
'stdin':
lambda path: redirect_stdin(
path.open('rb') if not hasattr(path, 'read') else path
),
}
def generate(args=('update', ), cwd=None, **streams):
"""Generate an output."""
with capsys.disabled(), contextlib.ExitStack() as stack:
for name, stream in streams.items():
stack.enter_context(managers[name](stream))
if cwd is not None:
stack.enter_context(chdir(str(cwd)))
try:
cli.cli.main(
args=args,
prog_name=runner.get_default_prog_name(cli.cli),
)
except SystemExit as e:
return 0 if e.code is None else e.code
except Exception:
raise
return generate
@pytest.fixture()
def isolated_runner(monkeypatch):
"""Create a runner on isolated filesystem."""
from renku.cli._config import RENKU_HOME
monkeypatch.setenv('RENKU_CONFIG', RENKU_HOME)
runner_ = CliRunner()
with runner_.isolated_filesystem():
yield runner_
@pytest.fixture()
def data_file(tmpdir):
"""Create a sample data file."""
p = tmpdir.mkdir('data').join('file')
p.write('1234')
return p
@pytest.fixture(scope='module')
def repository():
"""Yield a Renku repository."""
from renku import cli
from renku.api import LocalClient
runner = CliRunner()
with runner.isolated_filesystem() as project_path:
result = runner.invoke(cli.cli, ['init', '.'], catch_exceptions=False)
assert result.exit_code == 0
yield project_path
@pytest.fixture
def project(repository):
"""Create a test project."""
from git import Repo
repo = Repo(repository)
commit = repo.head.commit
os.chdir(repository)
yield repository
os.chdir(repository)
repo.head.reset(commit, index=True, working_tree=True)
# remove any extra non-tracked files (.pyc, etc)
repo.git.clean('-xdff')
@pytest.fixture()
def client(repository):
"""Return a Renku repository."""
from git import Repo
from renku.api import LocalClient
repo = Repo(repository)
commit = repo.head.commit
os.chdir(repository)
yield LocalClient(path=repository)
os.chdir(repository)
repo.head.reset(commit, index=True, working_tree=True)
# remove any extra non-tracked files (.pyc, etc)
repo.git.clean('-xdff')
@pytest.fixture()
def dataset(client):
"""Create a dataset."""
with client.with_dataset(name='dataset') as dataset:
dataset.authors = {
'name': 'me',
'email': '<EMAIL>',
}
return dataset
@pytest.fixture()
def dataset_responses():
"""Authentication responses."""
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
def request_callback(request):
return (200, {'Content-Type': 'application/text'}, '1234')
rsps.add_callback(
responses.GET,
'http://example.com/file',
callback=request_callback
)
rsps.add_callback(
responses.GET,
'https://example.com/file',
callback=request_callback
)
yield rsps
@pytest.fixture(scope='module')
def directory_tree(tmpdir_factory):
"""Create a test directory tree."""
# initialize
p = tmpdir_factory.mktemp('directory_tree')
p.join('file').write('1234')
p.join('dir2').mkdir()
p.join('dir2/file2').write('5678')
return p
@pytest.fixture(scope='module')
def data_repository(directory_tree):
"""Create a test repo."""
from git import Repo, Actor
# initialize
repo = Repo.init(directory_tree.strpath)
# add a file
repo.index.add([directory_tree.join('file').strpath])
repo.index.commit('test commit', author=Actor('me', '<EMAIL>'))
# commit changes to the same file with a different user
directory_tree.join('file').write('5678')
repo.index.add([directory_tree.join('file').strpath])
repo.index.commit('test commit', author=Actor('me2', '<EMAIL>'))
# commit a second file
repo.index.add([directory_tree.join('dir2/file2').strpath])
repo.index.commit('test commit', author=Actor('me', '<EMAIL>'))
# return the repo
return repo
@pytest.fixture(autouse=True)
def add_client(doctest_namespace):
"""Add Renku client to doctest namespace."""
from renku.api import LocalClient
doctest_namespace['client'] = LocalClient(path=tempfile.mkdtemp())
```
#### File: renku/api/datasets.py
```python
import os
import shutil
import stat
import warnings
from contextlib import contextmanager
from urllib import error, parse
import attr
import requests
import yaml
from renku._compat import Path
from renku.models.datasets import Author, Dataset, DatasetFile, NoneType
@attr.s
class DatasetsApiMixin(object):
"""Client for handling datasets."""
datadir = attr.ib(default='data', converter=str)
"""Define a name of the folder for storing datasets."""
@contextmanager
def with_dataset(self, name=None):
"""Yield an editable metadata object for a dataset."""
with self.lock:
from renku.models._jsonld import asjsonld
from renku.models.datasets import Dataset
path = None
dataset = None
dataset_path = self.path / self.datadir / name
if name:
path = dataset_path / self.METADATA
if path.exists():
with path.open('r') as f:
source = yaml.load(f) or {}
dataset = Dataset.from_jsonld(source)
if dataset is None:
source = {}
dataset = Dataset(name=name)
try:
dataset_path.mkdir(parents=True, exist_ok=True)
except FileExistsError:
raise FileExistsError('This dataset already exists.')
yield dataset
source.update(
**asjsonld(
dataset,
filter=lambda attr, _: attr.name != 'datadir',
)
)
# TODO
# if path is None:
# path = dataset_path / self.METADATA
# if path.exists():
# raise ValueError('Dataset already exists')
with path.open('w') as f:
yaml.dump(source, f, default_flow_style=False)
def add_data_to_dataset(self, dataset, url, git=False, **kwargs):
"""Import the data into the data directory."""
dataset_path = self.path / self.datadir / dataset.name
git = git or check_for_git_repo(url)
target = kwargs.pop('target', None)
if git:
if isinstance(target, (str, NoneType)):
dataset.files.update(
self._add_from_git(
dataset, dataset_path, url, target, **kwargs
)
)
else:
for t in target:
dataset.files.update(
self._add_from_git(
dataset, dataset_path, url, t, **kwargs
)
)
else:
dataset.files.update(
self._add_from_url(dataset, dataset_path, url, **kwargs)
)
def _add_from_url(self, dataset, path, url, nocopy=False, **kwargs):
"""Process an add from url and return the location on disk."""
u = parse.urlparse(url)
if u.scheme not in Dataset.SUPPORTED_SCHEMES:
raise NotImplementedError(
'{} URLs are not supported'.format(u.scheme)
)
# Respect the directory struture inside the source path.
relative_to = kwargs.pop('relative_to', None)
if relative_to:
dst_path = Path(url).resolve().absolute().relative_to(
Path(relative_to).resolve().absolute()
)
else:
dst_path = os.path.basename(url)
dst = path.joinpath(dst_path).absolute()
if u.scheme in ('', 'file'):
src = Path(u.path).absolute()
# if we have a directory, recurse
if src.is_dir():
files = {}
dst.mkdir(parents=True, exist_ok=True)
for f in src.iterdir():
files.update(
self._add_from_url(
dataset,
dst,
f.absolute().as_posix(),
nocopy=nocopy
)
)
return files
# Make sure the parent directory exists.
dst.parent.mkdir(parents=True, exist_ok=True)
if nocopy:
try:
os.link(str(src), str(dst))
except Exception as e:
raise Exception(
'Could not create hard link '
'- retry without nocopy.'
) from e
else:
shutil.copy(str(src), str(dst))
# Do not expose local paths.
src = None
else:
try:
response = requests.get(url)
dst.write_bytes(response.content)
except error.HTTPError as e: # pragma nocover
raise e
# make the added file read-only
mode = dst.stat().st_mode & 0o777
dst.chmod(mode & ~(stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
self.track_paths_in_storage(str(dst.relative_to(self.path)))
dataset_path = self.path / self.datadir / dataset.name
result = dst.relative_to(dataset_path).as_posix()
return {
result:
DatasetFile(
path=result,
url=url,
authors=dataset.authors,
dataset=dataset.name,
)
}
def _add_from_git(self, dataset, path, url, target, **kwargs):
"""Process adding resources from another git repository.
The submodules are placed in ``.renku/vendors`` and linked
to the *path* specified by the user.
"""
from git import Repo
# create the submodule
u = parse.urlparse(url)
submodule_path = self.renku_path / 'vendors' / (u.netloc or 'local')
# Respect the directory struture inside the source path.
relative_to = kwargs.get('relative_to', None)
if u.scheme in ('', 'file'):
warnings.warn('Importing local git repository, use HTTPS')
# determine where is the base repo path
r = Repo(url, search_parent_directories=True)
src_repo_path = Path(r.git_dir).parent
submodule_name = src_repo_path.name
submodule_path = submodule_path / str(src_repo_path).lstrip('/')
# if repo path is a parent, rebase the paths and update url
if src_repo_path != Path(u.path):
top_target = Path(
u.path
).resolve().absolute().relative_to(src_repo_path)
if target:
target = top_target / target
else:
target = top_target
url = src_repo_path.as_posix()
elif u.scheme in {'http', 'https', 'git+https', 'git+ssh'}:
submodule_name = os.path.splitext(os.path.basename(u.path))[0]
submodule_path = submodule_path.joinpath(
os.path.dirname(u.path).lstrip('/'), submodule_name
)
else:
raise NotImplementedError(
'Scheme {} not supported'.format(u.scheme)
)
# FIXME: do a proper check that the repos are not the same
if submodule_name not in (s.name for s in self.git.submodules):
# new submodule to add
if u.scheme == 'git+ssh':
url = 'git@{netloc}:{path}'.format(
netloc=u.netloc, path=u.path[1:]
)
self.git.create_submodule(
name=submodule_name, path=submodule_path.as_posix(), url=url
)
src = submodule_path / (target or '')
if target and relative_to:
relative_to = Path(relative_to)
if relative_to.is_absolute():
assert u.scheme in {
'', 'file'
}, ('Only relative paths can be used with URLs.')
target = (Path(url).resolve().absolute() / target).relative_to(
relative_to.resolve()
)
else:
# src already includes target so we do not have to append it
target = src.relative_to(submodule_path / relative_to)
# link the target into the data directory
dst = self.path / path / (target or '')
# if we have a directory, recurse
if src.is_dir():
files = {}
dst.mkdir(parents=True, exist_ok=True)
# FIXME get all files from submodule index
for f in src.iterdir():
try:
files.update(
self._add_from_git(
dataset,
path,
url,
target=f.relative_to(submodule_path),
**kwargs
)
)
except ValueError:
pass # skip files outside the relative path
return files
if not dst.parent.exists():
dst.parent.mkdir(parents=True)
os.symlink(os.path.relpath(str(src), str(dst.parent)), str(dst))
# grab all the authors from the commit history
git_repo = Repo(str(submodule_path.absolute()))
authors = []
for commit in git_repo.iter_commits(paths=target):
author = Author.from_commit(commit)
if author not in authors:
authors.append(author)
dataset_path = self.path / self.datadir / dataset.name
result = dst.relative_to(dataset_path).as_posix()
if u.scheme in ('', 'file'):
url = None
else:
url = '{}/{}'.format(url, target)
return {
result:
DatasetFile(
path=result,
url=url,
authors=authors,
dataset=dataset.name, # TODO detect original dataset
)
}
def check_for_git_repo(url):
"""Check if a url points to a git repository."""
u = parse.urlparse(url)
is_git = False
if os.path.splitext(u.path)[1] == '.git':
is_git = True
elif u.scheme in ('', 'file'):
from git import InvalidGitRepositoryError, Repo
try:
Repo(u.path, search_parent_directories=True)
is_git = True
except InvalidGitRepositoryError:
is_git = False
return is_git
```
#### File: renku/cli/_docker.py
```python
import re
import subprocess
from configparser import NoSectionError
import attr
from renku import errors
#: Define possible repository URLs.
_REPOSITORY_URLS = (
re.compile(
r'^(?P<protocol>https?|git|ssh|rsync)\://'
r'(?:(?P<username>[^:]+)(:(?P<password>[^@]+))?@)?'
r'(?P<hostname>[a-z0-9_.-]*)'
r'[:/]*'
r'(?P<port>[\d]+){0,1}'
r'(?P<pathname>\/(?P<owner>.+)/(?P<name>.+).git)'
),
re.compile(
r'(git\+)?'
r'((?P<protocol>\w+)://)'
# '((?P<user>\w+)@)?'
r'((?P<username>[^:]+)(:(?P<password>[^@]+))?@)?'
r'((?P<hostname>[\w\.\-]+))'
r'(:(?P<port>\d+))?'
r'(?P<pathname>(\/(?P<owner>\w+)/)?'
r'(\/?(?P<name>[\w\-]+)(\.git)?)?)'
),
re.compile(
r'^(?:(?P<username>.+)@)*'
r'(?P<hostname>[a-z0-9_.-]*)[:/]*'
r'(?P<port>[\d]+){0,1}'
r'[:](?P<pathname>\/?(?P<owner>.+)/(?P<name>.+).git)'
),
re.compile(
r'((?P<username>\w+)@)?'
r'((?P<hostname>[\w\.\-]+))'
r'[\:\/]{1,2}'
r'(?P<pathname>((?P<owner>\w+)/)?'
r'((?P<name>[\w\-]+)(\.git)?)?)'
),
re.compile(
# Simple registry URL like: docker.io
r'((?P<hostname>[\w\.\-]+))'
),
)
@attr.s()
class GitURL(object):
"""Parser for common Git URLs."""
# Initial value
href = attr.ib()
# Parsed protocols
pathname = attr.ib(default=None)
protocols = attr.ib(default=attr.Factory(list), init=False)
protocol = attr.ib(default='ssh')
hostname = attr.ib(default=None)
username = attr.ib(default=None)
password = attr.ib(default=None)
port = attr.ib(default=None)
owner = attr.ib(default=None)
name = attr.ib(default=None)
def __attrs_post_init__(self):
"""Derive basic informations."""
if self.protocol:
self.protocols = self.protocol.split('+')
@classmethod
def parse(cls, href):
"""Derive basic informations."""
for regex in _REPOSITORY_URLS:
if re.search(regex, href):
matches = re.search(regex, href)
return cls(href=href, **matches.groupdict())
else:
raise errors.ConfigurationError(
'"{href} is not a valid Git remote.'.format(href=href)
)
@property
def image(self):
"""Return image name."""
img = self.hostname
if self.owner:
img += '/' + self.owner
if self.name:
img += '/' + self.name
return img
def detect_registry_url(client, auto_login=True):
"""Return a URL of the Docker registry."""
repo = client.git
config = repo.config_reader()
# Find registry URL in .git/config
remote_url = None
try:
registry_url = config.get_value('renku', 'registry', None)
except NoSectionError:
registry_url = None
remote_branch = repo.head.reference.tracking_branch()
if remote_branch is not None:
remote_name = remote_branch.remote_name
config_section = 'renku "{remote_name}"'.format(
remote_name=remote_name
)
try:
registry_url = config.get_value(
config_section, 'registry', registry_url
)
except NoSectionError:
pass
remote_url = repo.remotes[remote_name].url
if registry_url:
# Look in [renku] and [renku "{remote_name}"] for registry_url key.
url = GitURL.parse(registry_url)
elif remote_url:
# Use URL based on remote configuration.
url = GitURL.parse(remote_url)
# Replace gitlab. with registry. unless running on gitlab.com.
hostname_parts = url.hostname.split('.')
if len(hostname_parts) > 2 and hostname_parts[0] == 'gitlab':
hostname_parts = hostname_parts[1:]
hostname = '.'.join(['registry'] + hostname_parts)
url = attr.evolve(url, hostname=hostname)
else:
raise errors.ConfigurationError(
'Configure renku.repository_url or Git remote.'
)
if auto_login and url.username and url.password:
try:
subprocess.run([
'docker',
'login',
url.hostname,
'-u',
url.username,
'--password-stdin',
],
check=True,
input=url.password.encode('utf-8'))
except subprocess.CalledProcessError:
raise errors.AuthenticationError(
'Check configuration of password or token in the registry URL'
)
return url
```
#### File: renku/cli/env.py
```python
import click
from ._config import with_config
from ._options import default_endpoint
@click.command()
@click.argument('endpoint', required=False, callback=default_endpoint)
@with_config
def env(config, endpoint):
"""Print RENKU environment variables.
Run this command to configure your Renku client:
$ eval "$(renku env)"
"""
access_token = config['endpoints'][endpoint]['token']['access_token']
click.echo('export {0}={1}'.format('RENKU_ENDPOINT', endpoint))
click.echo('export {0}={1}'.format('RENKU_ACCESS_TOKEN', access_token))
click.echo('# Run this command to configure your Renku client:')
click.echo('# eval "$(renku env)"')
```
#### File: renku/cli/_format.py
```python
import functools
import click
def ascii(graph):
"""Format graph as an ASCII art."""
from ._ascii import DAG
from ._echo import echo_via_pager
echo_via_pager(str(DAG(graph)))
def _jsonld(graph, format, *args, **kwargs):
"""Return formatted graph in JSON-LD ``format`` function."""
import json
from pyld import jsonld
from renku.models._jsonld import asjsonld
output = getattr(jsonld, format)([
asjsonld(action) for action in graph.activities.values()
])
return json.dumps(output, indent=2)
def dot(graph, simple=True, landscape=False):
"""Format graph as a dot file."""
import sys
from rdflib import ConjunctiveGraph
from rdflib.plugin import register, Parser
from rdflib.tools.rdf2dot import rdf2dot
register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser')
g = ConjunctiveGraph().parse(
data=_jsonld(graph, 'expand'),
format='json-ld',
)
g.bind('prov', 'http://www.w3.org/ns/prov#')
g.bind('wfdesc', 'http://purl.org/wf4ever/wfdesc#')
g.bind('wf', 'http://www.w3.org/2005/01/wf/flow#')
g.bind('wfprov', 'http://purl.org/wf4ever/wfprov#')
if simple:
_rdf2dot_simple(g, sys.stdout, landscape)
else:
rdf2dot(g, sys.stdout)
# define the various dot options
dot_full = functools.partial(dot, simple=False, landscape=False)
dot_landscape = functools.partial(dot, simple=True, landscape=True)
dot_full_landscape = functools.partial(dot, simple=False, landscape=True)
def _rdf2dot_simple(g, stream, landscape=True):
"""Create a simple graph of processes and artifacts."""
from itertools import chain
stream.write('digraph { \n node [ fontname="DejaVu Sans" ] ; \n ')
if landscape:
stream.write('rankdir="LR" \n')
import re
path_re = re.compile(
r'file:///(?P<type>[a-zA-Z]+)/'
r'(?P<commit>\w+)'
r'(?P<path>.+)?'
)
inputs = g.query(
"""
SELECT ?input ?role ?activity ?comment
WHERE {
?activity (prov:qualifiedUsage/prov:entity) ?input .
?activity prov:qualifiedUsage ?qual .
?qual prov:hadRole ?role .
?qual prov:entity ?input .
?qual rdf:type ?type .
?activity rdf:type wfprov:ProcessRun .
?activity rdfs:comment ?comment .
FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}
}
"""
)
outputs = g.query(
"""
SELECT ?activity ?role ?output ?comment
WHERE {
?output (prov:qualifiedGeneration/prov:activity) ?activity .
?output prov:qualifiedGeneration ?qual .
?qual prov:hadRole ?role .
?qual prov:activity ?activity .
?qual rdf:type ?type .
?activity rdf:type wfprov:ProcessRun ;
rdfs:comment ?comment .
FILTER NOT EXISTS {?activity rdf:type wfprov:WorkflowRun}
}
"""
)
activity_nodes = {}
artifact_nodes = {}
for source, role, target, comment, in chain(inputs, outputs):
# extract the pieces of the process URI
src_path = path_re.match(source).groupdict()
tgt_path = path_re.match(target).groupdict()
# write the edge
stream.write(
'\t"{src_commit}:{src_path}" -> '
'"{tgt_commit}:{tgt_path}" '
'[label={role}] \n'.format(
src_commit=src_path['commit'][:5],
src_path=src_path.get('path') or '',
tgt_commit=tgt_path['commit'][:5],
tgt_path=tgt_path.get('path') or '',
role=role
)
)
if src_path.get('type') == 'commit':
activity_nodes.setdefault(source, {'comment': comment})
artifact_nodes.setdefault(target, {})
if tgt_path.get('type') == 'commit':
activity_nodes.setdefault(target, {'comment': comment})
artifact_nodes.setdefault(source, {})
# customize the nodes
for node, content in activity_nodes.items():
node_path = path_re.match(node).groupdict()
stream.write(
'\t"{commit}:{path}" '
'[shape=box label="#{commit}:{path}:{comment}"] \n'.format(
comment=content['comment'],
commit=node_path['commit'][:5],
path=node_path.get('path') or ''
)
)
for node, content in artifact_nodes.items():
node_path = path_re.match(node).groupdict()
stream.write(
'\t"{commit}:{path}" '
'[label="#{commit}:{path}"] \n'.format(
commit=node_path['commit'][:5],
path=node_path.get('path') or ''
)
)
stream.write('}\n')
def jsonld(graph):
"""Format graph as JSON-LD file."""
click.echo(_jsonld(graph, 'expand'))
def jsonld_graph(graph):
"""Format graph as JSON-LD graph file."""
click.echo(_jsonld(graph, 'flatten'))
def nt(graph):
"""Format graph as n-tuples."""
from rdflib import ConjunctiveGraph
from rdflib.plugin import register, Parser
register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser')
click.echo(
ConjunctiveGraph().parse(
data=_jsonld(graph, 'expand'),
format='json-ld',
).serialize(format='nt')
)
def rdf(graph):
"""Output the graph as RDF."""
from rdflib import ConjunctiveGraph
from rdflib.plugin import register, Parser
register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser')
click.echo(
ConjunctiveGraph().parse(
data=_jsonld(graph, 'expand'),
format='json-ld',
).serialize(format='application/rdf+xml')
)
FORMATS = {
'ascii': ascii,
'dot': dot,
'dot-full': dot_full,
'dot-landscape': dot_landscape,
'dot-full-landscape': dot_full_landscape,
'json-ld': jsonld,
'json-ld-graph': jsonld_graph,
'nt': nt,
'rdf': rdf,
}
"""Valid formatting options."""
```
#### File: renku/cli/_git.py
```python
import os
import sys
from contextlib import contextmanager
from email.utils import formatdate
import click
import git
from git import Actor
from renku import errors
from renku.version import __version__
GIT_KEY = 'renku.git'
COMMITTER = Actor(
'renku {0}'.format(__version__),
'<EMAIL>(__<EMAIL>__),
)
def set_git_home(value):
"""Set Git path."""
ctx = click.get_current_context()
ctx.meta[GIT_KEY] = value
def get_git_home(path='.'):
"""Get Git path from current context."""
ctx = click.get_current_context(silent=True)
if ctx and GIT_KEY in ctx.meta:
return ctx.meta[GIT_KEY]
from git import Repo
return Repo(path, search_parent_directories=True).working_dir
def _modified_paths(repo):
"""Return paths of modified files."""
return [item.b_path for item in repo.index.diff(None) if item.b_path]
def _dirty_paths(repo):
"""Get paths of dirty files in the repository."""
repo_path = repo.working_dir
return {
os.path.join(repo_path, p)
for p in repo.untracked_files + _modified_paths(repo)
}
def _mapped_std_streams(lookup_paths, streams=('stdin', 'stdout', 'stderr')):
"""Get a mapping of standard streams to given paths."""
# FIXME add device number too
standard_inos = {}
for stream in streams:
try:
stream_stat = os.fstat(getattr(sys, stream).fileno())
key = stream_stat.st_dev, stream_stat.st_ino
standard_inos[key] = stream
except Exception: # FIXME UnsupportedOperation
pass
# FIXME if not getattr(sys, stream).istty()
def stream_inos(paths):
"""Yield tuples with stats and path."""
for path in paths:
try:
stat = os.stat(path)
key = (stat.st_dev, stat.st_ino)
if key in standard_inos:
yield standard_inos[key], path
except FileNotFoundError: # pragma: no cover
pass
return dict(stream_inos(lookup_paths)) if standard_inos else {}
def _clean_streams(repo, mapped_streams):
"""Clean mapped standard streams."""
for stream_name in ('stdout', 'stderr'):
stream = mapped_streams.get(stream_name)
if not stream:
continue
path = os.path.relpath(stream, start=repo.working_dir)
if (path, 0) not in repo.index.entries:
os.remove(stream)
else:
blob = repo.index.entries[(path, 0)].to_blob(repo)
with open(path, 'wb') as fp:
fp.write(blob.data_stream.read())
@contextmanager
def with_git(
clean=True, up_to_date=False, commit=True, ignore_std_streams=False
):
"""Perform Git checks and operations."""
from git import Repo
repo_path = get_git_home()
current_dir = os.getcwd()
if clean:
try:
os.chdir(repo_path)
repo = Repo(repo_path)
dirty_paths = _dirty_paths(repo)
mapped_streams = _mapped_std_streams(dirty_paths)
if ignore_std_streams:
if dirty_paths - set(mapped_streams.values()):
_clean_streams(repo, mapped_streams)
raise errors.DirtyRepository(repo)
elif repo.is_dirty(untracked_files=True):
_clean_streams(repo, mapped_streams)
raise errors.DirtyRepository(repo)
except git.exc.InvalidGitRepositoryError:
raise errors.UninitializedProject(repo_path)
finally:
os.chdir(current_dir)
if up_to_date:
# TODO
# Fetch origin/master
# is_ancestor('origin/master', 'HEAD')
pass
author_date = formatdate(localtime=True)
yield
if commit:
try:
os.chdir(repo_path)
repo = Repo(get_git_home())
repo.git.add('--all')
argv = [os.path.basename(sys.argv[0])] + sys.argv[1:]
# Ignore pre-commit hooks since we have already done everything.
repo.index.commit(
' '.join(argv),
author_date=author_date,
committer=COMMITTER,
skip_hooks=True,
)
finally:
os.chdir(current_dir)
def _safe_issue_checkout(repo, issue=None):
"""Safely checkout branch for the issue."""
branch_name = str(issue) if issue else 'master'
if branch_name not in repo.heads:
branch = repo.create_head(branch_name)
else:
branch = repo.heads[branch_name]
branch.checkout()
```
#### File: renku/cli/runner.py
```python
import os
import sys
import tempfile
from subprocess import call
import click
import yaml
from ._client import pass_local_client
_GITLAB_CI = '.gitlab-ci.yml'
_DOCKERFILE = 'Dockerfile'
_REQUIREMENTS = 'requirements.txt'
CI_TEMPLATES = [_GITLAB_CI, _DOCKERFILE, _REQUIREMENTS]
@click.group()
def runner():
"""Simplify running of CI scripts."""
@runner.command()
@pass_local_client
def template(client):
"""Render templated configuration files."""
import pkg_resources
# create the templated files
for tpl_file in CI_TEMPLATES:
tpl_path = client.path / tpl_file
with tpl_path.open('wb') as dest:
with pkg_resources.resource_stream(__name__, tpl_file) as tpl:
dest.write(tpl.read())
@runner.command()
@click.option(
'--run/--no-run',
is_flag=True,
envvar='RENKU_RUNNER_RERUN',
help='Run or only load the CWL and the job description.'
)
@click.option(
'--job', envvar='RENKU_RUNNER_JOB', help='Job description in YAML.'
)
@pass_local_client
def rerun(client, run, job):
"""Re-run existing workflow or tool using CWL runner."""
from renku.models.provenance import ProcessRun, from_git_commit
activity = from_git_commit(commit=client.git.head.commit, client=client)
if not isinstance(activity, ProcessRun):
click.secho('No tool was found.', fg='red', file=sys.stderr)
return
try:
args = ['cwl-runner', activity.path]
if job:
job_file = tempfile.NamedTemporaryFile(
suffix='.yml', dir=os.getcwd(), delete=False
)
args.append(job_file.name)
with job_file as fp:
yaml.dump(yaml.load(job), stream=fp, encoding='utf-8')
if run:
return call(args, cwd=os.getcwd())
finally:
if job:
os.unlink(job_file.name)
```
#### File: renku/cli/update.py
```python
r"""Update outdated files created by the "run" command.
Recreating outdated files
~~~~~~~~~~~~~~~~~~~~~~~~~
The information about dependencies for each file in the repository is generated
from information stored in the underlying Git repository.
A minimal dependency graph is generated for each outdated file stored in the
repository. It means that only the necessary steps will be executed and the
workflow used to orchestrate these steps is stored in the repository.
Assume that the following history for the file ``H`` exists.
.. code-block:: text
C---D---E
/ \
A---B---F---G---H
The first example shows situation when ``D`` is modified and files ``E`` and
``H`` become outdated.
.. code-block:: text
C--*D*--(E)
/ \
A---B---F---G---(H)
** - modified
() - needs update
In this situation, you can do efectively two things:
* Recreate a single file by running
.. code-block:: console
$ renku update E
* Update all files by simply running
.. code-block:: console
$ renku update
.. note:: If there were uncommitted changes then the command fails.
Check :program:`git status` to see details.
Pre-update checks
~~~~~~~~~~~~~~~~~
In the next example, files ``A`` or ``B`` are modified, hence the majority
of dependent files must be recreated.
.. code-block:: text
(C)--(D)--(E)
/ \
*A*--*B*--(F)--(G)--(H)
To avoid excesive recreation of the large portion of files which could have
been affected by a simple change of an input file, consider speficing a single
file (e.g. ``renku update G``). See also :ref:`cli-status`.
.. _cli-update-with-siblings:
Update siblings
~~~~~~~~~~~~~~~
If a tool produces multiple output files, these outputs need to be always
updated together.
.. code-block:: text
(B)
/
*A*--[step 1]--(C)
\
(D)
An attempt to update a single file would fail with the following error.
.. code-block:: console
$ renku update C
Error: There are missing output siblings:
B
D
Include the files above in the command or use --with-siblings option.
The following commands will produce the same result.
.. code-block:: console
$ renku update --with-siblings C
$ renku update B C D
"""
import sys
import uuid
import click
from renku.models.cwl._ascwl import ascwl
from ._client import pass_local_client
from ._git import with_git
from ._graph import Graph, _safe_path
from ._options import option_siblings
@click.command()
@click.option('--revision', default='HEAD')
@click.option(
'--no-output',
is_flag=True,
default=False,
help='Display commands without output files.'
)
@option_siblings
@click.argument(
'paths', type=click.Path(exists=True, dir_okay=False), nargs=-1
)
@pass_local_client
@click.pass_context
@with_git()
def update(ctx, client, revision, no_output, siblings, paths):
"""Update existing files by rerunning their outdated workflow."""
graph = Graph(client)
outputs = graph.build(revision=revision, can_be_cwl=no_output, paths=paths)
outputs = {node for node in outputs if graph.need_update(node)}
if not outputs:
click.secho(
'All files were generated from the latest inputs.', fg='green'
)
sys.exit(0)
# Check or extend siblings of outputs.
outputs = siblings(graph, outputs)
output_paths = {node.path for node in outputs if _safe_path(node.path)}
# Get all clean nodes
input_paths = {node.path for node in graph.nodes} - output_paths
# Store the generated workflow used for updating paths.
import yaml
output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex)
workflow = graph.ascwl(
input_paths=input_paths,
output_paths=output_paths,
outputs=outputs,
)
with output_file.open('w') as f:
f.write(
yaml.dump(
ascwl(
workflow,
filter=lambda _, x: x is not None,
basedir=client.workflow_path,
),
default_flow_style=False
)
)
from ._cwl import execute
execute(client, output_file, output_paths=output_paths)
```
#### File: models/provenance/agents.py
```python
import re
from renku.models import _jsonld as jsonld
@jsonld.s(
type=[
'prov:Person',
'foaf:Person',
],
context={
'foaf': 'http://xmlns.com/foaf/0.1/',
'prov': 'http://purl.org/dc/terms/',
},
frozen=True,
slots=True,
)
class Person:
"""Represent a person."""
name = jsonld.ib(context='rdfs:label')
email = jsonld.ib(context={
'@type': '@id',
'@id': 'foaf:mbox',
})
_id = jsonld.ib(context='@id', init=False, kw_only=True)
@_id.default
def default_id(self):
"""Configure calculated ID."""
return self.email
@email.validator
def check_email(self, attribute, value):
"""Check that the email is valid."""
if not (isinstance(value, str) and re.match(r"[^@]+@[^@]+", value)):
raise ValueError('Email address "{0}" is invalid.'.format(value))
@classmethod
def from_commit(cls, commit):
"""Create an instance from a Git commit."""
return cls(
name=commit.author.name,
email='mailto:{0}'.format(commit.author.email),
)
@jsonld.s(
type=[
'prov:SoftwareAgent',
'wfprov:WorkflowEngine',
],
context={
'prov': 'http://purl.org/dc/terms/',
'wfprov': 'http://purl.org/wf4ever/wfprov#',
},
frozen=True,
slots=True,
)
class SoftwareAgent:
"""Represent a person."""
label = jsonld.ib(context='rdfs:label', kw_only=True)
was_started_by = jsonld.ib(
context='prov:wasStartedBy',
default=None,
kw_only=True,
)
_id = jsonld.ib(context='@id', kw_only=True)
@classmethod
def from_commit(cls, commit):
"""Create an instance from a Git commit."""
author = Person.from_commit(commit)
if commit.author != commit.committer:
return cls(
label=commit.committer.name,
id='mailto:{0}'.format(commit.committer.email),
was_started_by=author,
)
return author
```
#### File: renku/models/_tabulate.py
```python
from datetime import datetime
from operator import attrgetter
from tabulate import tabulate as tblte
def format_cell(cell, datetime_fmt=None):
"""Format a cell."""
if datetime_fmt and isinstance(cell, datetime):
return cell.strftime(datetime_fmt)
return cell
def tabulate(collection, headers, datetime_fmt='%Y-%m-%d %H:%M:%S', **kwargs):
"""Pretty-print a collection."""
table = [(
format_cell(cell, datetime_fmt=datetime_fmt)
for cell in attrgetter(*headers)(c)
) for c in collection]
return tblte(table, headers=[h.upper() for h in headers], **kwargs)
```
#### File: renku-python/tests/test_dataset.py
```python
import os
import shutil
import stat
from contextlib import contextmanager
import git
import pytest
import yaml
from renku.models.datasets import Author, Dataset, DatasetFile
def raises(error):
"""Wrapper around pytest.raises to support None."""
if error:
return pytest.raises(error)
else:
@contextmanager
def not_raises():
try:
yield
except Exception as e:
raise e
return not_raises()
@pytest.mark.parametrize(
'scheme, path, error', [('', 'temp', None), ('file://', 'temp', None),
('', 'tempp', git.NoSuchPathError),
('http://', 'example.com/file', None),
('https://', 'example.com/file', None),
('bla://', 'file', NotImplementedError)]
)
def test_data_add(
scheme, path, error, client, data_file, directory_tree, dataset_responses
):
"""Test data import."""
with raises(error):
if path == 'temp':
path = str(data_file)
elif path == 'tempdir':
path = str(directory_tree)
with client.with_dataset('dataset') as d:
d.authors = [{
'name': 'me',
'email': '<EMAIL>',
}]
client.add_data_to_dataset(d, '{}{}'.format(scheme, path))
with open('data/dataset/file') as f:
assert f.read() == '1234'
assert d.files.get('file')
# check that the imported file is read-only
assert not os.access(
'data/dataset/file', stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
# assert os.stat('data/dataset/file/metadata.yml')
# check the linking
if scheme in ('', 'file://'):
shutil.rmtree('./data/dataset')
with client.with_dataset('dataset') as d:
d.authors = [{
'name': 'me',
'email': '<EMAIL>',
}]
client.add_data_to_dataset(
d, '{}{}'.format(scheme, path), nocopy=True
)
assert os.path.exists('data/dataset/file')
def test_data_add_recursive(directory_tree, client):
"""Test recursive data imports."""
with client.with_dataset('dataset') as d:
d.authors = [{
'name': 'me',
'email': '<EMAIL>',
}]
client.add_data_to_dataset(d, directory_tree.join('dir2').strpath)
assert 'dir2/file2' in d.files
def dataset_serialization(client, dataset, data_file):
"""Test deserializing a dataset object."""
with open(dataset.path / 'metadata.yml', 'r') as f:
source = yaml.load(f)
d = Dataset.from_jsonld(source)
assert d.path == dataset.path
d_dict = d.to_dict()
assert all([key in d_dict for key in ('name', 'identifier', 'files')])
assert not len(d_dict['files'].values())
client.add_data_to_dataset(d, str(data_file))
d_dict = d.to_dict()
assert len(d_dict['files'].values())
def test_git_repo_import(client, dataset, tmpdir, data_repository):
"""Test an import from a git repository."""
# add data from local repo
client.add_data_to_dataset(
dataset,
os.path.join(os.path.dirname(data_repository.git_dir), 'dir2')
)
assert os.stat('data/dataset/dir2/file2')
assert 'dir2/file2' in dataset.files
assert os.stat('.renku/vendors/local')
# check that the authors are properly parsed from commits
client.add_data_to_dataset(
dataset, os.path.dirname(data_repository.git_dir), target='file'
)
assert len(dataset.files['file'].authors) == 2
assert all(x.name in ('me', 'me2') for x in dataset.files['file'].authors)
@pytest.mark.parametrize(
'authors', [
[Author(name='me', email='<EMAIL>')],
[{
'name': 'me',
'email': '<EMAIL>',
}],
]
)
def test_author_parse(authors, data_file):
"""Test that different options for specifying authors work."""
f = DatasetFile('file', authors=authors)
assert Author(name='me', email='<EMAIL>') in f.authors
# email check
with pytest.raises(ValueError):
Author(name='me', email='<EMAIL>')
# authors must be a set or list of dicts or Author
with pytest.raises(ValueError):
f = DatasetFile('file', authors=['name'])
``` |
{
"source": "JiriKursky/httas",
"score": 2
} |
#### File: custom_components/httas/sensor.py
```python
import logging
import async_timeout
import asyncio
import aiohttp
import time
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.const import (CONF_PASSWORD, CONF_USERNAME, CONF_FRIENDLY_NAME, CONF_IP_ADDRESS, DEVICE_CLASS_POWER,
CONF_SENSORS, CONF_SENSOR_TYPE, CONF_ICON, CONF_SCAN_INTERVAL)
from inspect import currentframe, getframeinfo
from homeassistant.const import TEMP_CELSIUS, CONF_SCAN_INTERVAL
DOMAIN = 'httas'
ENTITY_ID_FORMAT = 'sensor.{}'
_LOGGER = logging.getLogger(__name__)
ASYNC_TIMEOUT = 5 # Timeout for async courutine
CONF_NOTIFICATION = 'notification'
CMND_STATUS = 'status%208'
CMND_POWER = 'POWER'
CMND_POWER_ON = 'Power%20On'
CMND_POWER_OFF = 'Power%20Off'
S_CMND = "CMND"
S_VALUE = "VALUE"
S_UNIT = "UNIT"
S_ICON = 'ICON'
ST_TEMPERATURE = 'temperature'
ST_CURRENT = 'current'
ST_POWER = 'power'
ST_VOLTAGE = 'voltage'
MAX_LOST = 5 # Can be lost in commincation
# definition of type of sensors
SENSORS = {
ST_TEMPERATURE :{
S_CMND: CMND_STATUS,
S_VALUE: ["StatusSNS", "DS18B20","Temperature"] ,
CONF_SCAN_INTERVAL: 30,
S_UNIT: TEMP_CELSIUS,
S_ICON: ''
},
ST_VOLTAGE: {
S_CMND: CMND_STATUS,
S_VALUE: ["StatusSNS","ENERGY", "Voltage"] ,
CONF_SCAN_INTERVAL: 30,
S_UNIT: 'V',
S_ICON: ''
},
ST_CURRENT: {
S_CMND: CMND_STATUS,
S_VALUE: ["StatusSNS", "ENERGY", "Current"] ,
CONF_SCAN_INTERVAL: 5,
S_UNIT: 'A',
S_ICON: 'mdi:current-ac'
},
ST_POWER: {
S_CMND: CMND_STATUS,
S_VALUE: ["StatusSNS", "ENERGY", "Power"] ,
CONF_SCAN_INTERVAL: 5,
S_UNIT: 'W',
S_ICON: 'mdi:power-plug'
}
}
# Validation of the user's configuration
SENSOR_SCHEMA = vol.Schema({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_NOTIFICATION, default = True): cv.boolean,
vol.Required(CONF_SENSOR_TYPE): vol.In(SENSORS.keys())
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_USERNAME, default = ''): cv.string,
vol.Optional(CONF_PASSWORD, default = ''): cv.string,
vol.Optional(CONF_ICON): cv.string,
vol.Optional(CONF_SENSORS, default={}):
cv.schema_with_slug_keys(SENSOR_SCHEMA),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the httas sensors."""
# Assign configuration variables.
# The configuration check takes care they are present.
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
sensors = config.get(CONF_SENSORS)
entities = []
for object_id, pars in sensors.items():
base_url = "http://{}/cm?".format(pars[CONF_IP_ADDRESS])
# username and passord is using only when protected internal
if len(username) > 0:
base_url += '&user='+username
if len(password) > 0:
base_url += '&password='+password
base_url += '&cmnd='
entity = SonoffSensor(hass, object_id, pars.get(CONF_FRIENDLY_NAME), pars.get(CONF_SENSOR_TYPE), base_url, pars)
entities.append(entity)
add_entities(entities)
class SonoffSensor(Entity):
"""Representation of a Sonoff device sensor."""
def __init__(self, hass, object_id, name, sensor_type, base_url, pars):
"""Initialize the sensor."""
self._name = name
self.entity_id = ENTITY_ID_FORMAT.format(object_id+'_'+sensor_type)
self._state = None
self._is_available = False
self._sensor_type = sensor_type
self._base_url = base_url
self._scan_interval = SENSORS[sensor_type][CONF_SCAN_INTERVAL]
self._unit_of_measurement = SENSORS[sensor_type][S_UNIT]
self._cmnd = SENSORS[sensor_type][S_CMND]
self._state = None
icon = pars.get(CONF_ICON)
if icon is None:
icon = SENSORS[sensor_type][S_ICON]
self._icon = icon
self._next_expiration = None
self._ip_address = pars[CONF_IP_ADDRESS]
self._notification = pars.get(CONF_NOTIFICATION)
self._lost = 0
self._lost_informed = False
self._info_state_ok = True # info that everything is ok
def _debug(self, s):
cf = currentframe()
line = cf.f_back.f_lineno
if s is None:
s = ''
_LOGGER.debug("line: {} ip_address: {} msg: {}".format(line, self._ip_address, s))
@property
def should_poll(self):
"""If entity should be polled."""
# Has its own timer for refreshing
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
def _to_get(self, cmnd):
return self._base_url + cmnd
async def _do_update(self):
self._debug("update: {}".format(self._cmnd))
websession = async_get_clientsession(self.hass)
value = 0
value_error = False
try:
with async_timeout.timeout(ASYNC_TIMEOUT):
response = await websession.post(self._to_get(self._cmnd))
if response is not None:
try:
value = await response.json()
except:
value = 0
value_error = True
except:
self._debug('except')
self._debug("value: {}".format(value))
if value_error:
self._state = None
scan_interval = 5
self._is_available = False
if self._lost > MAX_LOST:
scan_interval = 59
self._lost = 0
if not self._lost_informed:
if self._notification:
self.hass.components.persistent_notification.create(
"{} ({}) has permanent error.<br/>Please fix device. Scan interval is {} seconds now".format(self._name, self.entity_id, scan_interval),
title=DOMAIN)
self._info_state_ok = False
self._lost_informed = True
else:
self._lost += 1
self.async_schedule_update_ha_state()
self._debug("no success scan interval reduced to {} seconds".format(scan_interval))
self.hass.helpers.event.async_call_later(scan_interval, self._do_update())
return False
self._lost = 0
if not self._info_state_ok:
if self._notification:
self.hass.components.persistent_notification.create(
"{} ({}) is ok. Scan interval is {} seconds now".format(self._name, self.enity_id, self._scan_interval),
title=DOMAIN)
self._info_state_ok = True
value = self._json_key_value(SENSORS[self._sensor_type][S_VALUE], value)
self._state = value
self._is_available = True
self.async_schedule_update_ha_state()
self._debug("Next call in {} seconds".format(self._scan_interval))
self.hass.helpers.event.async_call_later(self._scan_interval, self._do_update())
return True
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
await self._do_update()
self._debug("entity added")
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def _json_key_value(self, def_array, value):
""" Mapping of returned values. Defined in httas_const.py """
# Maybe can be done by Schema - no success how to do
try:
if value is None:
return None
for key in def_array:
if key in value.keys():
value = value[key]
else :
return None
return value
except:
return None
``` |
{
"source": "jirin1a/pyrankvote",
"score": 3
} |
#### File: pyrankvote/tests/test_external_irv.py
```python
import unittest
import os
import csv
from operator import itemgetter
import pyrankvote
from pyrankvote import Candidate, Ballot
from pyrankvote.test_helpers import assert_list_almost_equal
TEST_FOLDER = "test_data/external_irv/"
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_PATH = os.path.join(THIS_DIR, os.pardir, TEST_FOLDER)
def parse_ballots_csv_file(file_name):
file_path = os.path.join(TEST_DATA_PATH, file_name)
with open(file_path) as f:
csv_file_without_header = list(csv.reader(f))[1:]
parsed_csv_file = [(ballot_id, rank, candidate_name) for ballot_id, rank, candidate_name in csv_file_without_header]
#sorted_csv_file = sorted(parsed_csv_file, key=itemgetter(0,1))
sorted_csv_file = parsed_csv_file
candidates = {}
ballots = []
last_ballot_id = 0
ranked_candidates = []
for ballot_id, rank, candidate_name in sorted_csv_file:
if ballot_id != last_ballot_id and last_ballot_id != 0:
ballot = Ballot(ranked_candidates)
ballots.append(ballot)
ranked_candidates = []
last_ballot_id = ballot_id
if candidate_name == "$UNDERVOTE":
continue
if candidate_name == "$OVERVOTE":
continue
if candidate_name in candidates:
candidate = candidates[candidate_name]
else:
candidate = Candidate(name=candidate_name)
candidates[candidate_name] = candidate
ranked_candidates.append(candidate)
ballot = Ballot(ranked_candidates)
ballots.append(ballot)
return list(candidates.values()), ballots
class TestExternalIRV(unittest.TestCase):
def test_us_vt_btv_2009_03_mayor(self):
"""
Burlington 2009 Mayoral Election
Source: https://ranked.vote/us/vt/btv/2009/03/mayor/
Data source: https://s3.amazonaws.com/ranked.vote-reports/us/vt/btv/2009/03/mayor/us_vt_btv_2009_03_mayor.normalized.csv.gz
"""
file_name = "us_vt_btv_2009_03_mayor.normalized.csv"
candidates, ballots = parse_ballots_csv_file(file_name)
election_result = pyrankvote.instant_runoff_voting(candidates, ballots, pick_random_if_blank=False)
last_round = election_result.rounds[-1]
blank_votes = last_round.number_of_blank_votes
correct_blank_votes = 607
self.assertEqual(correct_blank_votes, blank_votes)
number_of_votes = [candidate_result.number_of_votes for candidate_result in last_round.candidate_results]
correct_number_of_votes = [4313, 4060, 0, 0, 0, 0]
assert_list_almost_equal(self, correct_number_of_votes, number_of_votes)
def test_us_me_2018_06_cd02_primary(self):
"""
Test Maine 2018 Congress District 2 Democrat Primary Election
Source: https://ranked.vote/us/me/2018/06/cd02-primary/
Data source: https://s3.amazonaws.com/ranked.vote-reports/us/me/2018/06/cd02-primary/us_me_2018_06_cd02-primary.normalized.csv.gz
"""
file_name = "us_me_2018_06_cd02-primary.normalized.csv"
candidates, ballots = parse_ballots_csv_file(file_name)
election_result = pyrankvote.instant_runoff_voting(candidates, ballots, pick_random_if_blank=False)
last_round = election_result.rounds[-1]
blank_votes = last_round.number_of_blank_votes
correct_blank_votes = 7381
self.assertEqual(correct_blank_votes, blank_votes)
number_of_votes = [candidate_result.number_of_votes for candidate_result in last_round.candidate_results]
correct_number_of_votes = [23611, 19853, 0, 0]
assert_list_almost_equal(self, correct_number_of_votes, number_of_votes)
def test_us_me_2018_11_cd02(self):
"""
Maine 2018 Congress District 2 General Election
Source: https://ranked.vote/us/me/2018/11/cd02/
Data source: https://s3.amazonaws.com/ranked.vote-reports/us/me/2018/11/cd02/us_me_2018_11_cd02.normalized.csv.gz
"""
file_name = "us_me_2018_11_cd02.normalized.csv"
candidates, ballots = parse_ballots_csv_file(file_name)
election_result = pyrankvote.instant_runoff_voting(candidates, ballots, pick_random_if_blank=False)
last_round = election_result.rounds[-1]
blank_votes = last_round.number_of_blank_votes
correct_blank_votes = 14706
self.assertEqual(correct_blank_votes, blank_votes)
number_of_votes = [candidate_result.number_of_votes for candidate_result in last_round.candidate_results]
correct_number_of_votes = [142440, 138931, 0, 0]
assert_list_almost_equal(self, correct_number_of_votes, number_of_votes)
```
#### File: pyrankvote/tests/test_multiple_seat_ranking_methods.py
```python
import unittest
from pyrankvote.test_helpers import assert_list_almost_equal
import pyrankvote
from pyrankvote import Candidate, Ballot
from pyrankvote.helpers import CandidateStatus
class TestPreferentialBlockVoting(unittest.TestCase):
def test_simple_pbv(self):
stay = Candidate("Stay")
soft = Candidate("Soft Brexit")
hard = Candidate("Hard Brexit")
candidates = [stay, soft, hard]
ballots = [
Ballot(ranked_candidates=[soft, stay]),
Ballot(ranked_candidates=[stay, soft]),
Ballot(ranked_candidates=[stay, soft]),
Ballot(ranked_candidates=[hard, soft]),
Ballot(ranked_candidates=[hard, stay, soft]),
]
election_result = pyrankvote.preferential_block_voting(
candidates, ballots, number_of_seats=1
)
winners = election_result.get_winners()
self.assertEqual(1, len(winners), "Function should return a list with one item")
winner = winners[0]
self.assertEqual(stay, winner, "Winner should be Soft")
def test_simple_pbv2(self):
per = Candidate("Per")
paal = Candidate("Pål")
askeladden = Candidate("Askeladden")
candidates = [per, paal, askeladden]
ballots = [
Ballot(ranked_candidates=[askeladden, per]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[paal, per]),
Ballot(ranked_candidates=[paal, per, askeladden]),
]
election_result = pyrankvote.preferential_block_voting(
candidates, ballots, number_of_seats=1
)
winners = election_result.get_winners()
self.assertEqual(1, len(winners), "Function should return a list with one item")
self.assertListEqual([per], winners, "Winners should be Per")
def test_simple_pbv(self):
per = Candidate("Per")
paal = Candidate("Pål")
askeladden = Candidate("Askeladden")
candidates = [per, paal, askeladden]
ballots = [
Ballot(ranked_candidates=[askeladden, per]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[paal, per]),
Ballot(ranked_candidates=[paal, per, askeladden]),
]
election_result = pyrankvote.preferential_block_voting(
candidates, ballots, number_of_seats=2
)
winners = election_result.get_winners()
self.assertEqual(2, len(winners), "Function should return a list with two items")
self.assertIn(per, winners, "Per should be one of the winners")
self.assertIn(paal, winners, "Pål should be one of the winners")
def test_pbv_with_second_selection_if_equal(self):
stay = Candidate("Stay")
soft = Candidate("Soft Brexit")
hard = Candidate("Hard Brexit")
candidates = [stay, soft, hard]
ballots = [
Ballot(ranked_candidates=[stay, soft, hard]),
Ballot(ranked_candidates=[hard, soft, stay]),
Ballot(ranked_candidates=[soft, stay, hard]),
]
election_result = pyrankvote.preferential_block_voting(
candidates, ballots, number_of_seats=1
)
winners = election_result.get_winners()
self.assertEqual(1, len(winners), "Function should return a list with one item")
winner = winners[0]
self.assertEqual(soft, winner, "Winner should be soft")
def test_simple_pbv_with_second_selection_if_equal(self):
stay = Candidate("Stay")
soft = Candidate("Soft Brexit")
hard = Candidate("Hard Brexit")
candidates = [stay, soft, hard]
ballots = [
Ballot(ranked_candidates=[stay, soft, hard]),
Ballot(ranked_candidates=[hard, soft, stay]),
Ballot(ranked_candidates=[soft, stay, hard]),
]
election_result = pyrankvote.preferential_block_voting(
candidates, ballots, number_of_seats=2
)
winners = election_result.get_winners()
self.assertEqual(2, len(winners), "Function should return a list with two items")
self.assertIn(soft, winners, "Soft should be one of the winners")
self.assertIn(stay, winners, "Stay should be one of the winners")
def test_example(self):
popular_moderate = Candidate("William, popular moderate")
moderate2 = Candidate("John, moderate")
moderate3 = Candidate("Charles, moderate")
far_left = Candidate("Thomas, far-left")
candidates = [popular_moderate, moderate2, moderate3, far_left]
ballots = [
Ballot(ranked_candidates=[popular_moderate, moderate2, moderate3, far_left]),
Ballot(ranked_candidates=[popular_moderate, moderate2, moderate3, far_left]),
Ballot(ranked_candidates=[popular_moderate, moderate3, moderate2, far_left]),
Ballot(ranked_candidates=[popular_moderate, moderate3, moderate2, far_left]),
Ballot(ranked_candidates=[moderate2, popular_moderate, moderate3, far_left]),
Ballot(ranked_candidates=[moderate2, popular_moderate, moderate3, far_left]),
Ballot(ranked_candidates=[far_left, popular_moderate, moderate2, moderate3]),
Ballot(ranked_candidates=[far_left, popular_moderate, moderate2, moderate3]),
Ballot(ranked_candidates=[far_left, moderate2, popular_moderate, moderate3]),
Ballot(ranked_candidates=[far_left, moderate2, popular_moderate, moderate3]),
]
election_result = pyrankvote.preferential_block_voting(candidates, ballots, number_of_seats=2)
round_nr = 0
candidates_results_in_round = election_result.rounds[round_nr].candidate_results
ranking_in_round = [candidate_result.candidate for candidate_result in candidates_results_in_round]
correct_ranking_in_round = [popular_moderate, moderate2, far_left, moderate3]
self.assertEqual(4, len(ranking_in_round), "All four candidates should be in the result list")
self.assertListEqual(correct_ranking_in_round, ranking_in_round)
votes_in_round = [candidate_result.number_of_votes for candidate_result in candidates_results_in_round]
correct_votes_in_round = [8, 6, 4, 2]
assert_list_almost_equal(self, correct_votes_in_round, votes_in_round)
status_in_round = [candidate_result.status for candidate_result in candidates_results_in_round]
correct_status_in_round = [CandidateStatus.Elected, CandidateStatus.Elected, CandidateStatus.Rejected, CandidateStatus.Rejected]
self.assertListEqual(correct_status_in_round, status_in_round)
winners = election_result.get_winners()
self.assertEqual(2, len(winners), "Function should return a list with two items")
self.assertIn(popular_moderate, winners, "William should be a winner")
self.assertIn(moderate2, winners, "<NAME> be a winner")
class TestSingleTransferableVote(unittest.TestCase):
def test_simple_stv(self):
stay = Candidate("Stay")
soft = Candidate("Soft Brexit")
hard = Candidate("Hard Brexit")
candidates = [stay, soft, hard]
ballots = [
Ballot(ranked_candidates=[soft, stay]),
Ballot(ranked_candidates=[stay, soft]),
Ballot(ranked_candidates=[stay, soft]),
Ballot(ranked_candidates=[hard, soft]),
Ballot(ranked_candidates=[hard, stay, soft]),
]
election_result = pyrankvote.single_transferable_vote(
candidates, ballots, number_of_seats=1
)
winners = election_result.get_winners()
self.assertEqual(1, len(winners), "Function should return a list with one item")
winner = winners[0]
self.assertEqual(stay, winner, "Winner should be Soft")
def test_simple_stv2(self):
per = Candidate("Per")
paal = Candidate("Pål")
askeladden = Candidate("Askeladden")
candidates = [per, paal, askeladden]
ballots = [
Ballot(ranked_candidates=[askeladden, per]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[paal, per]),
Ballot(ranked_candidates=[paal, per, askeladden]),
]
election_result = pyrankvote.single_transferable_vote(
candidates, ballots, number_of_seats=1
)
winners = election_result.get_winners()
self.assertEqual(1, len(winners), "Function should return a list with one item")
self.assertListEqual([per], winners, "Winners should be Per")
def test_case1_simple(self):
per = Candidate("Per")
paal = Candidate("Pål")
askeladden = Candidate("Askeladden")
candidates = [per, paal, askeladden]
ballots = [
Ballot(ranked_candidates=[askeladden, per]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[paal, per]),
Ballot(ranked_candidates=[paal, per, askeladden]),
]
election_result = pyrankvote.single_transferable_vote(
candidates, ballots, number_of_seats=2
)
winners = election_result.get_winners()
self.assertEqual(2, len(winners), "Function should return a list with two items")
self.assertListEqual([per, paal], winners, "Winners should be Per and Pål")
def test_case2(self):
per = Candidate("Per")
paal = Candidate("Pål")
maria = Candidate("Maria")
ingrid = Candidate("Ingrid")
candidates = [per, paal, maria, ingrid]
# Quote = 3.33 with 10 votes and 2 seat
ballots = [
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[maria, ingrid]),
Ballot(ranked_candidates=[ingrid, maria]),
Ballot(ranked_candidates=[ingrid, maria]),
]
# 1. round: Per: 7, Ingrid: 2, Maria: 1, Pål: 0
# --> Per is elected and 3.67 votes are transferred to Pål
# Final round: Per: 3.33, Pål: 3.67, Ingrid: 2, Maria: 1
# --> Paal is elected. Since all seats filled, Ingrid and Maria are rejected.
election_result = pyrankvote.single_transferable_vote(
candidates, ballots, number_of_seats=2
)
winners = election_result.get_winners()
self.assertEqual(2, len(winners), "Function should return a list with two items")
self.assertListEqual([per, paal], winners, "Winners should be Per and Pål")
round = 0
votes_round = [candidate_vc.number_of_votes for candidate_vc in election_result.rounds[round].candidate_results]
assert_list_almost_equal(self, votes_round, [7, 2, 1, 0], 0.02)
round = 1
votes_round = [candidate_vc.number_of_votes for candidate_vc in election_result.rounds[round].candidate_results]
assert_list_almost_equal(self, votes_round, [3.33, 3.67, 2, 1], 0.02)
def test_case3(self):
per = Candidate("Per")
paal = Candidate("Pål")
maria = Candidate("Maria")
ingrid = Candidate("Ingrid")
candidates = [per, paal, maria, ingrid]
# Quote = 4.67 with 11 votes and 2 seat
ballots = [
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[per, paal]),
Ballot(ranked_candidates=[maria, ingrid]),
Ballot(ranked_candidates=[ingrid, maria]),
Ballot(ranked_candidates=[ingrid, maria]),
]
# 1. round: Per: 7, Ingrid: 2, Maria: 1, Pål: 0
# --> Per is elected and 3.33 votes are transfered to Pål
# 2. round: Pål: 3.33, Ingrid: 2, Maria: 1
# --> Maria is excluded and her one vote is transfered to Ingrid
# 3. round: Pål: 3.33, Ingrid: 3
# --> Pål is elected
election_result = pyrankvote.single_transferable_vote(
candidates, ballots, number_of_seats=2
)
winners = election_result.get_winners()
self.assertEqual(2, len(winners), "Function should return a list with two items")
self.assertListEqual([per, paal], winners, "Winners should be Per and Pål")
def test_example(self):
popular_moderate = Candidate("William, popular moderate")
moderate2 = Candidate("John, moderate")
moderate3 = Candidate("Charles, moderate")
far_left = Candidate("Thomas, far-left")
candidates = [popular_moderate, moderate2, moderate3, far_left]
ballots = [
Ballot(ranked_candidates=[popular_moderate, moderate2, moderate3, far_left]),
Ballot(ranked_candidates=[popular_moderate, moderate2, moderate3, far_left]),
Ballot(ranked_candidates=[popular_moderate, moderate3, moderate2, far_left]),
Ballot(ranked_candidates=[popular_moderate, moderate3, moderate2, far_left]),
Ballot(ranked_candidates=[moderate2, popular_moderate, moderate3, far_left]),
Ballot(ranked_candidates=[moderate2, popular_moderate, moderate3, far_left]),
Ballot(ranked_candidates=[far_left, popular_moderate, moderate2, moderate3]),
Ballot(ranked_candidates=[far_left, popular_moderate, moderate2, moderate3]),
Ballot(ranked_candidates=[far_left, moderate2, popular_moderate, moderate3]),
Ballot(ranked_candidates=[far_left, moderate2, popular_moderate, moderate3]),
]
election_result = pyrankvote.single_transferable_vote(candidates, ballots, number_of_seats=2)
round_nr = 0
candidates_results_in_round = election_result.rounds[round_nr].candidate_results
ranking_in_round = [candidate_result.candidate for candidate_result in candidates_results_in_round]
votes_in_round = [candidate_result.number_of_votes for candidate_result in candidates_results_in_round]
self.assertEqual(4, len(ranking_in_round), "All four candidates should be list.")
self.assertListEqual([popular_moderate, far_left, moderate2, moderate3], ranking_in_round)
assert_list_almost_equal(self, [4, 4, 2, 0], votes_in_round)
self.assertEqual(1, len(election_result.rounds), "Should be only one round")
winners = election_result.get_winners()
self.assertEqual(2, len(winners), "Should be two winners")
self.assertIn(popular_moderate, winners, "William should be a winner")
self.assertIn(far_left, winners, "John should be a winner")
``` |
{
"source": "jiri-novak-cz/lab",
"score": 4
} |
#### File: jiri-novak-cz/lab/factorial.py
```python
FACT_MAX = 10
def log(msg: str, space_cnt: int = 0):
"""
Print log message.
Arguments:
msg - text to print
space_cnt - number of spaces to insert before message
"""
print(f"{' ' * space_cnt}{msg}")
def factorial(n: int) -> int:
"""
Compute factorial recursively.
Arguments:
n - input integer
Returns:
Factorial
"""
n_spaces = 4 * (FACT_MAX - n)
log(f" --> factorial({n}) ...", n_spaces)
result: int = 1 # Used for n == 0 and n == 1
if n > 1:
log(f" result = {n} * factorial({n} - 1)", n_spaces)
result = n * factorial(n - 1) # Recursive call
log(f" <-- factorial(): result={result}", n_spaces)
return result
print(factorial(7))
``` |
{
"source": "jiri-novak-cz/reddit_answers",
"score": 5
} |
#### File: jiri-novak-cz/reddit_answers/date_calculation.py
```python
DAYS_IN_MONTH = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def days_in_month(year: int, month: int) -> int:
add = 0
if month == 2 and year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
add = 1
return DAYS_IN_MONTH[month - 1] + add
def compute_date(today: tuple[int, int, int], days: int) -> tuple[int, int, int]:
year, month, day = today
left = days + day - 28 # Get rid of left == 0 situations at the end of the function
day = 28
in_month = days_in_month(year, month)
while left + day > in_month:
left -= in_month
month = month % 12 + 1 # The next month
if month == 1:
year += 1
in_month = days_in_month(year, month) # Number of days in the next month
return (year, month, day + left)
def test_compute_date():
assert compute_date((2021, 8, 14), 3) == (2021, 8, 17) # Not crossing current month
assert compute_date((2021, 8, 14), 21) == (2021, 9, 4) # Next month
assert compute_date((2019, 12, 30), 33) == (2020, 2, 1) # Leap year #1
assert compute_date((2019, 12, 30), 61) == (2020, 2, 29) # Leap year #1
assert compute_date((2019, 12, 30), 427) == (2021, 3, 1) # More than a year incl. a leap year
print("All tests passed OK.")
def main() -> None:
today = input("Enter today's date (ex. 2021-08-14): ")
number_of_days = int(input("Enter number of days to skip ahead: "))
year, month, day = compute_date(map(int, today.split('-')), number_of_days)
print(f"The date after {number_of_days} days(s): {year}-{month:02}-{day:02}")
if __name__ == "__main__":
try:
test_compute_date()
main()
except KeyboardInterrupt:
print("Interrupted by user.")
```
#### File: jiri-novak-cz/reddit_answers/passwd_gen.py
```python
import random
from typing import Union
def generate_password(size: int, pool: list[Union[int, str]]) -> str:
result = ""
i = 0
while i < size:
char = random.choice(pool)
if i == 0 or char != result[i - 1]:
result += str(char)
i += 1
return result
def main() -> None:
pool_digits = list(range(10))
pool_chars = [chr(ord('a') + x) for x in range(26)]
passwd = generate_password(13, pool_digits)
print(f"Password: '{passwd}'")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Interrupted by user.")
```
#### File: jiri-novak-cz/reddit_answers/scrape_img.py
```python
from selenium import webdriver
from msedge.selenium_tools import Edge, EdgeOptions
from typing import Generator
URL_SEARCH = "https://www.istockphoto.com/cs/search/2/image?family=creative&phrase="
CLASS_MASK = "MosiacAsset-module__thumb_"
PATH_BROWSER = "resources/msedgedriver.exe"
def init_browser(driver_path: str, headless: bool = True) -> webdriver:
options = EdgeOptions()
options.use_chromium = True
if headless:
options.add_argument('headless')
options.add_argument('disable-gpu')
return Edge(executable_path=driver_path, options=options)
def search_site(phrase: str, search_url: str, driver: webdriver) -> Generator[str, None, None]:
query = phrase.replace(' ', '%20')
url = f"{search_url}{query}"
driver.get(url)
xpath = f"//img[starts-with(@class, '{CLASS_MASK}')]"
try:
for elem in driver.find_elements_by_xpath(xpath):
yield elem.get_attribute("src")
except Exception as e:
print(e)
def main() -> None:
driver = init_browser(PATH_BROWSER)
urls = search_site("lion", URL_SEARCH, driver)
for item in urls:
print(f"Found image URL: {item}")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Interrupted by user.")
``` |
{
"source": "jiri-novak-cz/udemy-python",
"score": 4
} |
#### File: jiri-novak-cz/udemy-python/euler.py
```python
def factorial(n):
if n < 0:
raise Exception("Number has to be >= 0")
result = 1
if n > 1:
result = n * factorial(n - 1)
return result
def compute_e(iter_count):
result = 0
for i in range(iter_count):
result += 1 / factorial(i)
return result
def main():
for n in range(1, 33):
eulers_num = compute_e(iter_count = n)
print("Euler's number: {} / (n = {})".format(eulers_num, n))
main()
```
#### File: jiri-novak-cz/udemy-python/iplocator.py
```python
import urllib.request
import json
URL = "https://geoip-db.com/json"
def locate_ip(ip_address=""):
url_str = URL
if len(ip_address) > 0:
url_str = "{}/{}".format(URL, ip_address)
with urllib.request.urlopen(url_str) as url:
result = json.loads(url.read().decode())
return result
data = locate_ip()
#print(data)
print("IP address: {}".format(data["IPv4"]))
print("Country: {} ({})".format(data["country_name"], data["country_code"]))
print("City: {}".format(data["city"]))
print("Postal code: {}".format(data["postal"]))
print("LAT x LONG: {}, {}".format(data["latitude"], data["longitude"]))
```
#### File: jiri-novak-cz/udemy-python/palindrome.py
```python
def is_palindrome(s: str) -> bool:
result = True
stop = len(s) // 2
for i in range(stop):
result = result and (s[i] == s[-(i + 1)])
return result
p = is_palindrome('racecar')
q = is_palindrome('jelenovipivonelej')
``` |
{
"source": "jiri-one/czech-sort",
"score": 4
} |
#### File: czech-sort/czech_sort/impl.py
```python
from __future__ import unicode_literals
import re
import functools
import unicodedata
try:
import builtins
except ImportError:
# Python 2
import __builtin__ as builtins
def sorted(strings):
"""Return a list of strings sorted using Czech collation
:param strings: iterable of strings (unicode in Python 2)
"""
return builtins.sorted(strings, key=key)
nfkd = functools.partial(unicodedata.normalize, 'NFKD')
HACEK = nfkd('č')[-1]
def key(string):
"""Return a Czech sort key for the given string
:param string: string (unicode in Python 2)
Comparing the sort keys of two strings will give the result according
to how the strings would compare in Czech collation order, i.e.
``key(s1) < key(s2)`` <=> ``s1`` comes before ``s2``
The structure of the sort key may change in the future.
The only operations guaranteed to work on it are comparisons and equality
checks (<, ==, etc.) against other keys.
"""
# The multi-level key is a nested tuple containing strings and ints.
# The tuple contains sub-keys that roughly correspond to levels in
# UTS #10 (http://unicode.org/reports/tr10/). Except for fallback strings
# at the end, each contains a tuple of typically one key per element/letter.
# - Alphabet:
# Separators (0, p, l, w)
# p: -no. of paragraph separators
# l: -no. of line separators
# w: -no. of word separators (spaces)
# Letters (1, l); l is the base letter, lowercased
# Special letters: 'č' shows up as 'cx'; 'ř' as 'rx', etc.
# the 'ch' digraph becomes 'hx'
# Numbers (2, n); n is int(numeric value * 100)
# Missing for non-letters
# - Diacritics (p, n, s)
# p: position (above, below, behind, in front, in/over/around, unknown)
# (as a sorted tuple of indices)
# s: shape (dot, grave, breve, ..., unknown)
# (as a sorted tuple of indices)
# Missing for non-letters; empty if diacritics included in base (e.g. ř)
# - Case: True for uppercased letters
# Missing for non-letters
# - Punctuation: see PUNCTUATION_MAP below
# - (fallback) NFKD-normalized string
# - (fallback) original string
subkeys = [], [], [], []
add_alphabet = subkeys[0].append
add_diacritic = subkeys[1].append
add_case = subkeys[2].append
add_punctuation = subkeys[3].append
skip = 0
normal = nfkd(string).rstrip()
diacritics = []
for i, char in enumerate(normal):
if skip > 0:
skip -= 1
continue
category = get_category(char)
cat0, cat1 = category
if cat0 == 'L':
# Letter (Lowercase, Modifier, Other, Titlecase, Uppercase)
char_lower = char.lower()
found = False
if char_lower in DECOMPOSING_EXTRAS:
# stuff like Ł doesn't decompose in Unicode; do it manually
char_lower, _extra_diacritics = DECOMPOSING_EXTRAS[char_lower]
diacritics.extend(_extra_diacritics)
for next in normal[i+1:]:
if next == HACEK and char_lower in ('c', 'r', 's', 'z'):
skip += 1
char_lower = char_lower + 'x'
elif char_lower == 'c' and next.lower() == 'h':
skip += 1
char_lower = 'hx'
break
elif next in DIACRITICS_MAP:
skip += 1
diacritics.extend(DIACRITICS_MAP[next])
elif unicodedata.category(char)[0] == 'M':
skip += 1
diacritics.append((POS_UNKNOWN, SH_UNKNOWN))
else:
break
add_alphabet((1, char_lower))
if diacritics:
add_diacritic(make_diacritics_key(diacritics))
else:
add_diacritic(())
add_case(cat1 in ('u', 't')) # upper & title case
add_punctuation((0, ))
diacritics = []
elif cat0 == 'Z':
# Separator (Line, Paragraph, Space)
counts = {'Zp': 0, 'Zl': 0, 'Zs': 0}
counts[category] = 1
for next in normal[i+1:]:
next_cat = get_category(next)
if next_cat[0] == 'Z':
counts[next_cat] += 1
skip += 1
else:
break
add_alphabet((0, -counts['Zp'], -counts['Zl'], -counts['Zs']))
add_diacritic(())
add_case(False)
add_punctuation((0, ))
elif char in DIACRITICS_BEFORE_MAP:
diacritics.extend(DIACRITICS_BEFORE_MAP[char])
elif char in DIACRITICS_MAP:
diacritics.extend(DIACRITICS_MAP[char])
elif char in PUNCTUATION_MAP:
add_punctuation(PUNCTUATION_MAP[char])
elif cat0 == 'P':
# Punctuation (Connector, Dash, Open/Close, Final/Initial Quote, Other)
add_punctuation((3, ))
elif cat0 == 'N':
# Number (Decimal digit, Letter, Other)
add_alphabet((2, int(unicodedata.numeric(char, 0)) * 100))
add_diacritic(())
add_case(False)
add_punctuation((0, ))
elif cat0 == 'S':
# Symbol (Currency, Modifier, Math)
add_punctuation((3, ))
elif cat0 == 'C':
# Other (Control, Format, Not Assigned, Private Use, Surrogate)
pass
elif cat0 == 'M':
# Mark (Spacing Combining, Enclosing, Nonspacing)
# TODO
diacritics.append((POS_FRONT, SH_UNKNOWN))
else:
raise ValueError('Unknown Unicode category')
if diacritics:
add_diacritic(make_diacritics_key(diacritics))
diacritics = []
return tuple(tuple(k) for k in subkeys) + (normal, string)
def make_diacritics_key(diacritics):
positions, shapes = zip(*diacritics)
positions = tuple(builtins.sorted(positions))
shapes = tuple(builtins.sorted(shapes))
return positions, shapes
def get_category(c):
return CATEGORY_CORRECTIONS.get(c, unicodedata.category(c))
# Treat \n as a line separator
CATEGORY_CORRECTIONS = {'\n': 'Zl'}
POS_ABOVE, POS_BELOW, POS_BEHIND, POS_FRONT, POS_IN, POS_UNKNOWN = range(6)
(SH_DOT, SH_ACUTE, SH_HORIZONTAL, SH_VERTICAL, SH_GRAVE, SH_CIRCUMFLEX,
SH_HACEK, SH_TILDE, SH_BREVE, SH_INV_BREVE, SH_HOOK, SH_RING,
SH_UNKNOWN) = range(13)
DIACRITICS_MAP = {
"'": [(POS_BEHIND, SH_VERTICAL)],
'\N{PRIME}': [(POS_BEHIND, SH_VERTICAL)],
'\N{APOSTROPHE}': [(POS_BEHIND, SH_VERTICAL)],
'\N{COMBINING DOT ABOVE}': [(POS_ABOVE, SH_DOT)],
'\N{COMBINING ACUTE ACCENT}': [(POS_ABOVE, SH_ACUTE)],
'\N{COMBINING MACRON}': [(POS_ABOVE, SH_HORIZONTAL)],
'\N{COMBINING GRAVE ACCENT}': [(POS_ABOVE, SH_GRAVE)],
'\N{COMBINING CIRCUMFLEX ACCENT}': [(POS_ABOVE, SH_CIRCUMFLEX)],
'\N{COMBINING CARON}': [(POS_ABOVE, SH_HACEK)],
'\N{COMBINING TILDE}': [(POS_ABOVE, SH_TILDE)],
'\N{COMBINING BREVE}': [(POS_ABOVE, SH_BREVE)],
'\N{COMBINING INVERTED BREVE}': [(POS_ABOVE, SH_INV_BREVE)],
'\N{COMBINING HOOK ABOVE}': [(POS_ABOVE, SH_HOOK)],
'\N{COMBINING RING ABOVE}': [(POS_ABOVE, SH_RING)],
'\N{COMBINING DOUBLE ACUTE ACCENT}': [(POS_ABOVE, SH_ACUTE)] * 2,
'\N{COMBINING DOUBLE GRAVE ACCENT}': [(POS_ABOVE, SH_GRAVE)] * 2,
'\N{COMBINING DIAERESIS}': [(POS_ABOVE, SH_DOT)] * 2,
'\N{COMBINING OGONEK}': [(POS_BELOW, SH_HOOK)],
# XXX: All the others
}
DIACRITICS_BEFORE_MAP = {
"'": [(POS_FRONT, SH_VERTICAL)],
'\N{PRIME}': [(POS_FRONT, SH_VERTICAL)],
'\N{APOSTROPHE}': [(POS_FRONT, SH_VERTICAL)],
'\N{DEGREE SIGN}': [(POS_FRONT, SH_RING)],
}
DECOMPOSING_EXTRAS = {
'ł': ('l', [(POS_IN, SH_GRAVE)]),
'ø': ('o', [(POS_IN, SH_GRAVE)]),
}
PUNCTUATION_MAP = {}
# Punctuation key is (0, ) for non-punctuation.
# For punctuation, it can be:
# Hyphen: (-1, )
PUNCTUATION_MAP['-'] = (-1, )
# Marks: (1, i): .,;?!: quotes –|/\()[]()‹›<>{}
# i: index in the list
for i, c in enumerate('.,;?!:„“”‘’””«»"\'`「」—–|\\()/[]()‹›{}<>'):
PUNCTUATION_MAP[c] = (1, i)
# Symbols: (2, i): @&€£§%‰$
for i, c in enumerate('@&€£§%‰$'):
PUNCTUATION_MAP[c] = (2, i)
# Graphics: (3, a, b, n):
# a: True for curves
# b: True for overlapping strokes
# n: number of strokes
PUNCTUATION_MAP['_'] = (3, False, False, 1)
PUNCTUATION_MAP['='] = (3, False, False, 2)
PUNCTUATION_MAP['^'] = (3, False, False, 2)
PUNCTUATION_MAP['+'] = (3, False, True, 2)
PUNCTUATION_MAP['×'] = (3, False, True, 2)
PUNCTUATION_MAP['*'] = (3, False, True, 3)
PUNCTUATION_MAP['#'] = (3, False, True, 4)
PUNCTUATION_MAP['~'] = (3, True, False, 1)
PUNCTUATION_MAP['≈'] = (3, True, False, 2)
# Unknown: (3,)
# XXX: Geometric shapes: (4, n); n is no. of points
``` |
{
"source": "jiri-one/freezeyt",
"score": 3
} |
#### File: fixtures/app_cleanup_config/app.py
```python
import falcon
freeze_config = {'cleanup': False}
class Resource(object):
"""Creates Resource object for Falcon App"""
def on_get(self, req, resp):
"""Handles GET requests on index (/)"""
resp.text = """
<html>
<head>
<title>Hello world from Falcon app</title>
</head>
<body>
<h3>Hello world! This is Falcon app page link to error 404.</h3>
<a href="/not_found.html">not_found.html</a>
</body>
</html>\n"""
def on_get_error(self, req, resp):
"""Handles GET requests on index (/not_found.html)"""
raise falcon.HTTPNotFound()
# create Falcon App for testing purposes
app = falcon.App(media_type=falcon.MEDIA_HTML)
resource = Resource()
app.add_route('/', resource)
app.add_route('/not_found.html', resource, suffix="error")
```
#### File: fixtures/app_various_errors/app.py
```python
from flask import Flask, url_for
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
@app.route('/')
def index():
return f"""
<html>
<head>
<title>Hello world</title>
</head>
<body>
Hello world!
<a href="nowhere">Link to nowhere</a>
<a href="{url_for('value_error')}">Link</a>
<a href="{url_for('type_error')}">Link</a>
<a href="{url_for('type_error2')}">Link</a>
</body>
</html>
"""
@app.route('/value_error')
def value_error():
raise ValueError()
@app.route('/type_error')
def type_error():
raise TypeError()
@app.route('/type_error2')
def type_error2():
raise TypeError()
```
#### File: freezeyt/tests/test_mime_db_mimetype.py
```python
import pytest
import json
from testutil import context_for_test
from freezeyt import freeze
from freezeyt.freezer import convert_mime_db, mime_db_mimetype
CONVERSION_TESTCASES = {
"catch_jpeg": (
{
"application/3gpdash-qoe-report+xml": {
"source": "iana",
"charset": "UTF-8",
"compressible": True
},
"image/jpeg": {
"source": "iana",
"compressible": False,
"extensions": ["jpeg","jpg","jpe"]
}
},
{
"jpeg": ["image/jpeg"],
"jpg": ["image/jpeg"],
"jpe": ["image/jpeg"],
}
),
"catch_many_mimetypes": (
{
"audio/wav": {
"compressible": False,
"extensions": ["wav"]
},
"audio/wave": {
"compressible": False,
"extensions": ["wav"]
},
"audio/webm": {
"source": "apache",
"compressible": False,
"extensions": ["weba"]
},
"application/x-msdos-program": {
"extensions": ["exe"]
},
"application/x-msdownload": {
"source": "apache",
"extensions": ["exe","dll","com","bat","msi"]
},
"application/octet-stream": {
"source": "iana",
"compressible": False,
"extensions": ["bin","exe","dll","msi"]
},
},
{
"wav": ["audio/wav", "audio/wave"],
"weba": ["audio/webm"],
"exe": [
"application/x-msdos-program",
"application/x-msdownload",
"application/octet-stream"
],
"dll": ["application/x-msdownload", "application/octet-stream"],
"com": ["application/x-msdownload"],
"bat": ["application/x-msdownload"],
"msi": ["application/x-msdownload", "application/octet-stream"],
"bin": ["application/octet-stream"]
}
),
"no_catch": (
{
"application/vnd.cybank": {
"source": "iana"
},
"application/vnd.cyclonedx+json": {
"source": "iana",
"compressible": True
},
},
{}
),
"capitals_used": (
{
"auDio/Wav": {
"compressible": False,
"extensions": ["wAv"]
},
"aUdio/waVe": {
"compressible": False,
"extensions": ["waV"]
},
},
{
"wav": ["audio/wav", "audio/wave"]
},
)
}
@pytest.mark.parametrize('testname', CONVERSION_TESTCASES)
def test_mime_db_conversion(testname):
"""Test if the convert process of mime-db structure
to new one is performed correctly.
"""
mime_db, expected = CONVERSION_TESTCASES[testname]
result = convert_mime_db(mime_db)
assert result == expected
def test_modified_mime_db_file(tmp_path):
"""Integration test with modified mime-db, where is purposely
set wrong extensions for MIME image/png to check if our db was used.
"""
MIME_DB_TO_JSON = {
"image/png": {
"source": "iana",
"compressible": False,
"extensions": ["Jpeg","jPg"]
},
'text/html': {
"extensions": ["htmL"]
}
}
builddir = tmp_path / 'build'
db_path = tmp_path / "mime_db.json"
with open(db_path, mode="w") as mime_db:
json.dump(MIME_DB_TO_JSON, mime_db)
with context_for_test('app_wrong_mimetype') as module:
freeze_config = {
'output': str(builddir),
'mime_db_file': str(db_path)
}
freeze(module.app, freeze_config)
assert (builddir / 'index.html').exists()
# 'image.jpg' exists because we linked jpg extension with MIME 'image/png'
assert (builddir / 'image.jpg').exists()
GET_MIME_TYPE_TESTCASES = {
"simple": (
{"wav": ["audio/wav", "audio/wave"]},
"https://example.test/hello.wav",
["audio/wav", "audio/wave"]
),
"capital_file_suffix": (
{"wav": ["audio/wav", "audio/wave"]},
"https://example.test/hello.WAV",
["audio/wav", "audio/wave"]
),
"without_suffix": (
{"wav": ["audio/wav", "audio/wave"]},
"https://example.test/hello",
None
)
}
@pytest.mark.parametrize('testname', GET_MIME_TYPE_TESTCASES)
def test_get_MIME_type_from_suffix(testname):
"""Test the return values of mime_db_mimetype
"""
converted_mime_db, url, expected = GET_MIME_TYPE_TESTCASES[testname]
result = mime_db_mimetype(converted_mime_db, url)
assert result == expected
``` |
{
"source": "jiri-otoupal/py-cross-kit",
"score": 3
} |
#### File: py-cross-kit/tests/test_env_vars.py
```python
import unittest
from pycrosskit.envariables import SysEnv
class TestEnvVars(unittest.TestCase):
def test_set_var(self):
try:
SysEnv.set_var("test", "test")
except:
self.fail("")
def test_get_var(self):
SysEnv.set_var("test", "test")
self.assertEqual(SysEnv.get_var("test"), "test")
def test_get_rm_var(self):
SysEnv.set_var("test", "test")
self.assertEqual(SysEnv.get_var("test", delete=True), "test")
try:
SysEnv.get_var("test")
self.fail()
except Exception:
pass
if __name__ == '__main__':
unittest.main()
```
#### File: py-cross-kit/tests/test_shortcut.py
```python
import os
import unittest
from pycrosskit.shortcuts import Shortcut
class Test_Shortcuts(unittest.TestCase):
def test_create_desktop(self):
try:
sh = Shortcut("Test", "__init__.py", desktop=True)
self.assertEqual(True, os.path.exists(sh.desktop_path))
except:
self.assertEqual(True, False)
def test_delete_desktop(self):
try:
desktop, startmenu = Shortcut.delete("Test", desktop=True)
self.assertEqual(True, not os.path.exists(desktop))
except:
self.assertEqual(True, False)
def test_create_startmenu(self):
try:
sh = Shortcut("Test", "__init__.py", start_menu=True)
self.assertEqual(True, os.path.exists(sh.startmenu_path))
except:
self.assertEqual(True, False)
def test_delete_startmenu(self):
try:
desktop, startmenu = Shortcut.delete("Test", start_menu=True)
self.assertEqual(True, not os.path.exists(startmenu))
except:
self.assertEqual(True, False)
def test_create_both(self):
try:
sh = Shortcut("Test", "__init__.py", desktop=True, start_menu=True)
self.assertEqual(True, os.path.exists(sh.desktop_path))
self.assertEqual(True, os.path.exists(sh.startmenu_path))
except:
self.assertEqual(True, False)
def test_delete_both(self):
try:
desktop, startmenu = Shortcut.delete("Test", desktop=True, start_menu=True)
self.assertEqual(True, not os.path.exists(desktop))
self.assertEqual(True, not os.path.exists(startmenu))
except:
self.assertEqual(True, False)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jirisrba/gtil2-wifi",
"score": 3
} |
#### File: jirisrba/gtil2-wifi/gtil_influx.py
```python
import requests
from requests.auth import HTTPBasicAuth
from influxdb import InfluxDBClient
# GTIL invertor WiFi IP
gtil_ip = '192.168.2.205'
# influx db config
db = InfluxDBClient('192.168.2.4', 8089, use_udp=True, udp_port=8089, database='malina')
measurement = 'gtil2'
gtil_data = dict()
def read_gtil_data():
""" read data from GTIL2 Wifi module """
response = requests.get(
'http://' + gtil_ip + '/status.html',
auth=HTTPBasicAuth('admin', 'admin'))
for line in response.text.splitlines():
# var webdata_sn = "1905100904";
if line.startswith('var webdata'):
key, value = line[12:-1].split("=")
gtil_data[key.strip()] = value.strip().replace('"', '')
# print(gtil_data)
return gtil_data
def send_data_to_influx():
""" send data to influxdb """
json_body = {
"tags": {
"sn": gtil_data['sn'],
},
"points": [{
"measurement": measurement,
"fields": {
"current_power": int(gtil_data['now_p']),
"today_energy": float(gtil_data['today_e']),
"total_energy": float(gtil_data['total_e'])
}
}]
}
db.send_packet(json_body)
def main():
"""Main()"""
read_gtil_data()
send_data_to_influx()
if __name__ == "__main__":
main()
``` |
{
"source": "jirit/poledni-menu",
"score": 2
} |
#### File: poledni-menu/poledni_menu/email_menu.py
```python
import email.mime.multipart
import email.mime.text
import email.utils
import smtplib
import copy
import datetime
import markdown
import yaml
import click
from .isholiday import isholiday
from .digest import generate_digest
from .version import TAGLINE
htmltemplate = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Polední menu</title>
<style>
ul {{
list-style-type: disc;
}}
</style>
</head>
<body>
{body}
</body>
</html>
"""
def htmlize(text):
"""Return HTML version of markdown text"""
htmlized = markdown.markdown(
text,
output_format="xhtml5", safe_mode="escape",
)
htmlversion = htmltemplate.format(body=htmlized)
return htmlversion
def send_html_email(
textpart, htmlpart, subject, recipients,
server="localhost",
sender="Foodmaster <<EMAIL>>",
):
"""
Sends multipart alternative e-mail
"""
msgtpl = email.mime.multipart.MIMEMultipart('alternative')
msgtpl['Subject'] = subject
msgtpl['From'] = email.utils.formataddr(email.utils.parseaddr(sender))
msgtpl['Date'] = email.utils.formatdate(localtime=True)
msgtpl.attach(email.mime.text.MIMEText(textpart))
msgtpl.attach(email.mime.text.MIMEText(htmlpart, 'html'))
with smtplib.SMTP(server) as smtp:
for r in recipients:
msg = copy.deepcopy(msgtpl)
msg['Message-id'] = email.utils.make_msgid('poledni-menu')
msg['To'] = email.utils.formataddr(email.utils.parseaddr(r))
smtp.send_message(msg)
def send_email_digest(config):
dow = (
"pondělí", "úterý", "středu",
"čtvrtek", "pátek", "sobotu", "neděli",
)
mname = (
"ledna", "února", "března", "dubna", "května", "června", "července",
"srpna", "září", "října", "listopadu", "prosince",
)
td = datetime.date.today()
if isholiday(td):
return
subject = (
"\N{Fork and Knife With Plate} Polední nabídka pro"
" {dow} {day}. {mname}"
).format(
dow=dow[td.weekday()],
day=td.day,
mname=mname[td.month-1],
)
menu = config.get("menu", [])
textmenu = "\n".join([
*generate_digest(menu),
TAGLINE,
])
htmlmenu = htmlize(textmenu)
emailargs = config.get("email", {})
send_html_email(textmenu, htmlmenu, subject, **emailargs)
@click.command()
@click.argument("config", type=click.File())
def email_digest(config):
"""
E-mail daily menu digest for a list of places.
"""
send_email_digest(yaml.safe_load(config))
```
#### File: poledni_menu/extractors/nakotlarce.py
```python
import datetime
import locale
from ..utils import parsed_html_doc
def get_url():
return "https://nakotlarce.cz/poledni-menu/"
def get_name():
return "<NAME> Na Kotlářce"
def get_menu():
doc = parsed_html_doc(get_url())
locale.setlocale(locale.LC_TIME, 'cs_CZ.UTF8')
dayname = datetime.date.today().strftime("%A")
daylink = doc.xpath('//h4[text() = "{}"]/parent::a/@href'.format(dayname),)
if len(daylink) < 1:
raise ValueError("Jídelní lístek nenalezen")
daylink = daylink[0][1:]
meals = doc.findall('//*[@id="{}"]//tr'.format(daylink))
for meal in meals:
if len(meal) != 2:
continue
name, price = [x.text_content().strip() for x in meal.getchildren()]
if not price.endswith("Kč"):
continue
yield (name, price)
```
#### File: poledni_menu/extractors/potrefene_husy.py
```python
import datetime
import locale
from ..utils import parsed_html_doc
def get_name():
return "<NAME>"
def get_url(place_id='dejvice'):
return "https://www.potrefene-husy.cz/cz/{}-poledni-menu".format(place_id)
def get_menu():
doc = parsed_html_doc(get_url())
locale.setlocale(locale.LC_TIME, 'cs_CZ.UTF8')
dayname = datetime.date.today().strftime("%A")
rows = doc.xpath(
'//*[@id="content-in"]//h3[starts-with(text(), "{}")]'
'/ancestor::tr/following-sibling::tr'.format(dayname),
)
for meal in rows:
cols = meal.findall("td")
if len(cols) == 3:
num, name, price = cols
else:
return
if num.text_content().strip() == "":
return
if (name is not None) and (price is not None):
mealname = name.text_content().strip()
if mealname:
yield (mealname, price.text_content().strip())
```
#### File: poledni-menu/tests/test_agata.py
```python
import pytest
from poledni_menu.extractors import agata
@pytest.mark.vcr()
def test_get_name():
assert agata.get_name() == "<NAME>"
assert agata.get_name("1") == "<NAME>"
assert agata.get_name("blah") == "<NAME>"
@pytest.mark.vcr()
def test_get_menu():
menu = list(agata.get_menu())
print(menu)
assert len(menu) > 5
```
#### File: poledni-menu/tests/test_budvarka.py
```python
import pytest
from poledni_menu.extractors import budvarka
@pytest.mark.vcr()
def test_get_menu():
menu = list(budvarka.get_menu())
print(menu)
assert len(menu) > 5
```
#### File: poledni-menu/tests/test_generate.py
```python
import pytest
from poledni_menu.generate import generate_menu
@pytest.mark.vcr()
def test_generate_menu():
menu = list(generate_menu("agata"))
assert menu[0].startswith("[Masarykova kolej](")
assert menu[1].startswith("----------------")
assert menu[-1] == ""
assert len(menu) > 10
``` |
{
"source": "JiriValasek/Animate",
"score": 2
} |
#### File: Animate/Animate/Control.py
```python
import FreeCAD
import FreeCADGui
import numpy
import time
import os
import sys
import re
import subprocess
import struct
from PySide2.QtWidgets import QDialogButtonBox, QMessageBox, QTreeView, \
QHBoxLayout, QPushButton
from PySide2.QtCore import Slot, QTimer, QObject
from PySide2.QtCore import Qt
from PySide2.QtGui import QStandardItemModel, QStandardItem
from os import path
## Path to a folder with the necessary icons.
PATH_TO_ICONS = path.join(FreeCAD.getHomePath(), "Mod", "Animate", "Resources",
"Icons")
## Path to a folder with the necessary user interface files.
PATH_TO_UI = path.join(FreeCAD.getHomePath(), "Mod", "Animate", "Resources",
"UIs")
## Format string to format image number inside image name while recording
NAME_NUMBER_FORMAT = "%05d"
## Ancillary private safe-to-copy PNG chunk type code.
FPS_CHUNK_CODE = b'xfPs'
class ControlPanel(QObject):
"""
Class providing funcionality to a Control panel inside the TaskView.
This class enables user to play, pause, rewind, record, export and seek through
an animation.
Attributes:
btn_abort: A QPushButton to abort exporting a sequence.
btn_confirm: A QPushButton to confirm sequence to export.
control_proxy: A proxy to an associated `Control` class.
form: A QDialog instance show in the TaskView.
image_number: An int number of a next recorded image.
last_clicked: A str showing which button was pressed last.
lyt_export: A QHBoxLayout with a `confirm` and `abort` buttons.
record_prefix: A str prefix for an image file name.
timer: A QTimer for timing animations.
trv_sequences: A QTreeView showing list of recorded sequences.
To create an instance of this class do:
form = FreeCADGui.PySideUic.loadUi(
path.join(PATH_TO_UI, "AnimationControl.ui"))
form.setWindowTitle(title)
panel = ControlPanel(fp, form)
"""
def __init__(self, control_proxy, form):
"""
Initialization method for ControlPanel.
A class instance is created. A proxy for an associated `Control` is added and
the control properties are set to read-only as not to change when control panel
is opened. A form and timer are assigned. `Pause` button is disabled as no
animation is playing.
Args:
control_proxy: A proxy to a `Control` so properties can be set read-only.
form: A Qt dialog loaded from a file.
"""
super(ControlPanel, self).__init__()
self.control_proxy = control_proxy
# Disable editing of Control properties
for prop in self.control_proxy.PropertiesList:
self.control_proxy.setEditorMode(prop, 1)
# Add QDialog to be displayed in freeCAD
self.form = form
# Connect callback functions
self.form.btn_play.clicked.connect(self.playClicked)
self.form.btn_pause.clicked.connect(self.pauseClicked)
self.form.btn_rewind.clicked.connect(self.rewindClicked)
self.form.btn_record.clicked.connect(self.recordClicked)
self.form.btn_export.clicked.connect(self.exportClicked)
self.form.sld_seek.valueChanged.connect(self.sliderChanged)
# Create timer for the animations
self.timer = QTimer(self)
# Disable pause button as animation is not running when the panel is
# opened
self.last_clicked = "pause"
self.setInvalidButtons()
def playClicked(self):
"""
Feedback method called when play button was clicked.
Invalid buttons are disabled. Active View's animation is disabled (Necessary).
Slider position is checked for invalid position (at the end) and if position
is plausible, all collisions are reset, current time is extrapolated from
the slider and an animation is played.
"""
# Disable everything except for the pause button
self.last_clicked = "play"
self.setInvalidButtons()
FreeCADGui.ActiveDocument.ActiveView.setAnimationEnabled(False)
# Check that we are not already at the end of an animation range
if self.form.sld_seek.value() == self.form.sld_seek.maximum():
# Show error if we are
QMessageBox.warning(None, 'Error while playing',
"The animation is at the end.")
self.pauseClicked()
else:
# Reset collisions
self.resetCollisions()
# Load current time from the time slider and start playing
t = self.form.sld_seek.value() \
* (self.control_proxy.StopTime
- self.control_proxy.StartTime) / 100 \
+ self.control_proxy.StartTime
self.play(t)
def pauseClicked(self):
"""
Feedback method called when pause button was clicked.
Invalid buttons are disabled in this method and that's it.
"""
# Enable everything except for the pause button
self.last_clicked = "pause"
self.setInvalidButtons()
def rewindClicked(self):
"""
Feedback method called when rewind button was clicked.
Invalid buttons are disabled. Active View's animation is disabled (Necessary).
Slider position is checked for invalid position (at the end) and if position
is plausible, all collisions are reset, current time is extrapolated from
the slider and an animation is played.
"""
# Disable everything except for the pause button
self.last_clicked = "rewind"
self.setInvalidButtons()
FreeCADGui.ActiveDocument.ActiveView.setAnimationEnabled(False)
# Check that we are not already at the start of an animation range
if self.form.sld_seek.value() == self.form.sld_seek.minimum():
# Show error if we are
QMessageBox.warning(None, 'Error while rewinding',
"The animation is at the beginning.")
self.pauseClicked()
else:
# Reset collisions
self.resetCollisions()
# Load current time from the time slider and start rewinding
t = self.form.sld_seek.value() \
* (self.control_proxy.StopTime
- self.control_proxy.StartTime) / 100 \
+ self.control_proxy.StartTime
self.rewind(t)
def recordClicked(self):
"""
Feedback method called when record button was clicked.
Invalid buttons are disabled. A record prefix is generated. An Image number is
set to 0. Active View's animation is disabled (Necessary). Slider position is
checked for invalid position (at the end) and if position is plausible, all
collisions are reset, current time is extrapolated from the slider and an
animation is played/recorded.
"""
# Disable everything except for the pause button
self.last_clicked = "record"
self.setInvalidButtons()
# Create an unique prefix for the image files which will be made
self.record_prefix = "seq" + time.strftime("%Y%m%d%H%M%S") + "-"
# Reset image number for new image sequence
self.image_number = 0
FreeCADGui.ActiveDocument.ActiveView.setAnimationEnabled(False)
# Check that we are not already at the end of an animation range
if self.form.sld_seek.value() == self.form.sld_seek.maximum():
# Show error if we are
QMessageBox.warning(None, 'Error while playing',
"The animation is at the end.")
self.pauseClicked()
# Check that Export Path is valid
elif not os.access(self.control_proxy.ExportPath, os.W_OK | os.R_OK):
# Show error if not
QMessageBox.warning(None, 'Invalid Export Path',
"You don't have access to read and write\n"
+ "in folder specified by Export Path.\n"
+ "Change it to be able to record images.")
self.pauseClicked()
else:
# Reset collisions
self.resetCollisions()
# Load current time from the time slider and start recording
t = self.form.sld_seek.value() \
* (self.control_proxy.StopTime
- self.control_proxy.StartTime) / 100 \
+ self.control_proxy.StartTime
self.record(t)
def exportClicked(self):
"""
Feedback method called when export button was clicked.
Invalid buttons are disabled. An `Export Path` is checked for files. The files
are checked for sequences. Sequences are shown with buttons to confirm or
cancel the selection.
"""
# Check that Export Path is valid
if not os.access(self.control_proxy.ExportPath, os.W_OK | os.R_OK):
# Show error if not
QMessageBox.warning(None, 'Invalid Export Path',
"You don't have access to read and write\n"
+ "in folder specified by Export Path.\n"
+ "Change it to be able to record images.")
self.pauseClicked()
return
# Disable everything
self.last_clicked = "export"
self.setInvalidButtons()
# Try to load file names from an export folder
try:
files = os.listdir(self.control_proxy.ExportPath)
except FileNotFoundError as e:
QMessageBox.warning(None, 'Export Path error', str(e))
return
# Find all recorded sequences between the files
sequences = self.findSequences(files)
if sequences != {}:
# Show them in an export menu
self.showSequences(sequences)
else:
# Show error if none found
QMessageBox.warning(None, 'Export error',
"No sequences to export.")
self.last_clicked = "pause"
self.setInvalidButtons()
def sliderChanged(self):
"""
Feedback method called when slider position is changed.
If slider is enabled (not used to show animation time) and slider position is
changed, time is extrapolated from slider position and animation in that time
is shown.
"""
# Check if the slider is enabled i.e. the change is an user input,
# not a visualization of animation progress
if self.form.sld_seek.isEnabled():
# Load current time from the time slider and show it.
t = self.form.sld_seek.value() \
* (self.control_proxy.StopTime
- self.control_proxy.StartTime) / 100 \
+ self.control_proxy.StartTime
self.distributeTime(t)
self.updateCollisions()
self.showChanges()
def setInvalidButtons(self):
"""
Method to enable/disable buttons according to a `last clicked` button.
If `pause` button was pressed, all others buttons are disabled. If any other
button was pressed, only `pause` button is left enabled.
"""
# Disable invalid buttons with respect to the last clicked button
self.form.btn_play.setEnabled(self.last_clicked == "pause" and
self.last_clicked != "export")
self.form.btn_pause.setEnabled(self.last_clicked != "pause" and
self.last_clicked != "export")
self.form.btn_rewind.setEnabled(self.last_clicked == "pause" and
self.last_clicked != "export")
self.form.btn_record.setEnabled(self.last_clicked == "pause" and
self.last_clicked != "export")
self.form.btn_export.setEnabled(self.last_clicked == "pause" and
self.last_clicked != "export")
self.form.lbl_seek.setEnabled(self.last_clicked == "pause" and
self.last_clicked != "export")
self.form.sld_seek.setEnabled(self.last_clicked == "pause" and
self.last_clicked != "export")
def reject(self):
"""
Feedback method called when Control panel is closing.
Animation is stopped. Controls properties are set to be editable. Dialog is
closed.
"""
# Stop animaiton, if it's running by clicking pause button
self.pauseClicked()
# Allow editing of Control properties again
for prop in self.control_proxy.PropertiesList:
self.control_proxy.setEditorMode(prop, 0)
# Delete reference to this panel from the view provider as the panel
# will no longer exist
self.control_proxy.ViewObject.Proxy.panel = None
# Close the dialog
FreeCADGui.Control.closeDialog()
def getStandardButtons(self, *args):
"""
Method to set just one button (close) to close the dialog.
Args:
*args: A tuple of unused arguments from Qt.
"""
return QDialogButtonBox.Close
def isAllowedAlterSelection(self):
"""
Method to tell FreeCAD if dialog is allowed to alter a selection.
Returns:
False - this dialog does not change a selection.
"""
return False
def isAllowedAlterView(self):
"""
Method to tell FreeCAD if dialog is allowed to alter a view.
Returns:
True - this dialog does change a view.
"""
return True
def isAllowedAlterDocument(self):
"""
Method to tell FreeCAD if dialog is allowed to alter a document.
Returns:
True - this dialog does change a document.
"""
return True
@Slot(float, float)
def play(self, t):
"""
Method to show an animation frame at an animation time `t` during playing.
Current clock time is loaded. If the pause button was clicked, an animation is
stopped. Otherwise the animation time `t` is distributed to appropriate
children. If the animation time `t` exceeded `Stop Time`, the animation is
stopped. Lastly next frame time is computed as well as pause time (to stick
with real time if computation did not exceeded `Step Time`). Finally the
timer is set to show the next animation frame after precomputed pause.
Args:
t: An animation time to generate an animation frame at.
"""
# Load current time
time_ = time.clock()
# Check pause button was not pressed
if self.last_clicked == "pause":
return
# Disribute the animation time to trajectories so that they change
# positions of all animated objects
self.distributeTime(t)
self.updateCollisions()
self.showChanges()
# Display current progress on the seek slider
self.form.sld_seek.setValue(
numpy.round(100*(t - self.control_proxy.StartTime)
/ (self.control_proxy.StopTime
- self.control_proxy.StartTime)))
# Stop the animation if the animation time reached a range boundary
if t >= self.control_proxy.StopTime:
self.last_clicked = "pause"
self.setInvalidButtons()
return
# Compute an animation time for the next frame
next_t = min(t + self.control_proxy.StepTime,
self.control_proxy.StopTime)
# Compute pause period so that animaiton time roughly corresponds to
# the real time
pause = round(1000*(self.control_proxy.StepTime + time_
- time.clock()))
pause = pause*(pause > 0)
# Setup a timer to show next frame if animaiton wasn't paused
if self.last_clicked != "pause":
self.timer.singleShot(pause, lambda: self.play(next_t))
@Slot(float, float)
def rewind(self, t):
"""
Method to show an animation frame at an animation time `t` during rewind.
Current clock time is loaded. If the pause button was clicked, an animation is
stopped. Otherwise the animation time `t` is distributed to appropriate
children. If the animation time `t` exceeded `Stop Time`, the animation is
stopped. Lastly next frame time is computed as well as pause time (to stick
with real time if computation did not exceeded `Step Time`). Finally the
timer is set to show the next animation frame after precomputed pause.
Args:
t: An animation time to generate an animation frame at.
"""
# Load current time
time_ = time.clock()
# Check pause button was not pressed
if self.last_clicked == "pause":
return
# Disribute the animation time to trajectories so that they change
# positions of all animated objects
self.distributeTime(t)
self.updateCollisions()
self.showChanges()
# Display current progress on the seek slider
self.form.sld_seek.setValue(
numpy.round(100*(t - self.control_proxy.StartTime)
/ (self.control_proxy.StopTime
- self.control_proxy.StartTime)))
# Stop the animation if the animation time reached a range boundary
if t <= self.control_proxy.StartTime:
self.last_clicked = "pause"
self.setInvalidButtons()
return
# Compute an animation time for the next frame
next_t = max(t - self.control_proxy.StepTime,
self.control_proxy.StartTime)
# Compute pause period so that animaiton time roughly corresponds to
# the real time
pause = round(1000*(self.control_proxy.StepTime + time_
- time.clock()))
pause = pause*(pause > 0)
# Setup a timer to show next frame if animaiton wasn't paused
if self.last_clicked != "pause":
self.timer.singleShot(pause, lambda: self.rewind(next_t))
@Slot(float, float)
def record(self, t):
"""
Method to show and save an animation frame at an animation time `t`.
Current clock time is loaded. If the pause button was clicked, an animation is
stopped. Otherwise the animation time `t` is distributed to appropriate
children. If the animation time `t` exceeded `Stop Time`, the animation is
stopped. Lastly next frame time is computed. Finally the timer is set to show
the next animation frame after precomputed pause.
Args:
t: An animation time to generate an animation frame at.
"""
# Check pause button was not pressed
if self.last_clicked == "pause":
return
# Disribute the animation time to trajectories so that they change
# positions of all animated objects, save the image
self.distributeTime(t)
self.updateCollisions()
# Show changes and save view
self.showChanges()
self.saveImage()
# Display current progress on the seek slider
self.form.sld_seek.setValue(
numpy.round(100*(t - self.control_proxy.StartTime)
/ (self.control_proxy.StopTime
- self.control_proxy.StartTime)))
# Stop the animation if the animation time reached a range boundary
if t >= self.control_proxy.StopTime:
self.last_clicked = "pause"
self.setInvalidButtons()
return
# Compute an animation time for the next frame
next_t = min(t + self.control_proxy.StepTime,
self.control_proxy.StopTime)
# Setup a timer to show next frame if animaiton wasn't paused
if self.last_clicked != "pause":
self.timer.singleShot(0, lambda: self.record(next_t))
def distributeTime(self, t):
"""
Method to distribute a time `t` to children Trajectories.
List of children is loaded. If a child is `Trajectory`, the time is set to it
and its children are added to the list.
Args:
t: A time to distribute to all child `Trajectories`.
"""
# Load list of objects inside Control group
objects = self.control_proxy.Group
# Go through them, their children and update time,
# if they are Trajectories
while len(objects) > 0:
obj = objects.pop(0)
if obj.Proxy.__class__.__name__ == "TrajectoryProxy" or \
obj.Proxy.__class__.__name__ == "RobRotationProxy" or \
obj.Proxy.__class__.__name__ == "RobTranslationProxy":
obj.Time = t
objects += obj.Group
elif obj.Proxy.__class__.__name__ == "RobWorldProxy":
objects += obj.Group
def updateCollisions(self):
"""
Method to update collisions from CollisionDetector children.
List of children is loaded. If a child is `CollisionDetector`, it's touched so
that it's recomputed.
"""
# Load list of objects inside Control group
objects = self.control_proxy.Group
# if they are CollisionDetectors, then check for collisions
while len(objects) > 0:
obj = objects.pop(0)
if obj.Proxy.__class__.__name__ == "CollisionDetectorProxy":
obj.touch()
def resetCollisions(self):
"""
Method to reset collisions from CollisionDetector children.
List of children is loaded. If a child is `CollisionDetector`, it's reset.
"""
# Load list of objects inside Control group
objects = self.control_proxy.Group
# if they are CollisionDetectors, then check for collisions
while len(objects) > 0:
obj = objects.pop(0)
if obj.Proxy.__class__.__name__ == "CollisionDetectorProxy":
obj.Proxy.reset()
def showChanges(self):
"""
Method to show changes made to objects, collisions.
This method is necessary to call after `distributeTime`, `updateCollisions` and
`resetCollisions`.
"""
FreeCAD.ActiveDocument.recompute()
FreeCADGui.updateGui()
def saveImage(self):
"""
Method to save current view as a PNG image.
An image name is pieced together from `record prefix` and `image number`.
Then an image path is constructed. Animation is disabled(obligatory) and
current view is saved as an image. Afterwards, if saving the first image(image
number 0), a chunk with a framerate corresponding to a step size is added.
Finally the image number is incremented.
"""
# Prepare complete path to an image
name = self.record_prefix + (NAME_NUMBER_FORMAT % self.image_number) \
+ ".png"
image_path = path.join(self.control_proxy.ExportPath, name)
# Export image and increase image number
FreeCADGui.ActiveDocument.ActiveView.setAnimationEnabled(False)
FreeCADGui.ActiveDocument.ActiveView.saveImage(
image_path,
self.control_proxy.VideoWidth, self.control_proxy.VideoHeight)
# Write a framerate chunk into the first image
if self.image_number == 0:
if not self.writeFramerateChunk(1 / self.control_proxy.StepTime,
image_path):
QMessageBox.warning(
None, 'Saving framerate failed',
"Framerate was not saved, this recorded image\n"
+ "sequence will have to be exported using\n"
+ "current Step Time to compute framerate.\n"
+ "Check Report View for more info.")
self.image_number += 1
def findSequences(self, files):
"""
Method to find sequences between files.
Files are scanned for sequences, the valid sequences are recognized and number
of frames is counted.
Args:
files: A list of string file names.
Returns:
A dict with sequence names and numbers of frames.
"""
# Check there are any files
if len(files) == 0:
return {}
# Go through the files
sequences = {}
for f in files:
# Check they fit the name pattern
img_name = re.search(r"(seq\d+)-(\d+)(?=\.png)", f)
if img_name is not None:
# Add new sequences
if img_name.group(1) not in list(sequences.keys()):
# Add sequence if it's starting with 0
if int(img_name.group(2)) == 0:
sequences[img_name.group(1)] = 1
last_frame = int(img_name.group(2))
# Compute number of successive frames
elif int(img_name.group(2)) == (last_frame + 1):
sequences[img_name.group(1)] += 1
last_frame += 1
# Remove sequence if a frame is missing
else:
sequences.pop(img_name.group(1))
# Leave sequences longer than 1 frame
sequences = {key: val for key, val in sequences.items() if val > 1}
return sequences
def showSequences(self, sequences):
"""
Method to show sequences to export on a dialog panel.
Sequences and frame numbers are shown in a QTreeView, and buttons `'Confirm'`
and `'Abort'` are attached under it. All of this is put under the Export button
on the dialog panel.
Args:
sequences: A dict with sequence names and numbers of frames.
"""
# Add names to columns
NAME, N_FRAMES = range(2)
# Create a tree view and set it up
self.trv_sequences = QTreeView()
self.trv_sequences.setRootIsDecorated(False)
self.trv_sequences.setAlternatingRowColors(True)
self.trv_sequences.setToolTip("Select a sequence to export.")
self.trv_sequences.setSizeAdjustPolicy(
self.trv_sequences.AdjustToContents)
self.trv_sequences.setSizePolicy(
self.trv_sequences.sizePolicy().Ignored,
self.trv_sequences.sizePolicy().Minimum)
self.trv_sequences.header().setResizeMode(
self.trv_sequences.header().Fixed)
self.trv_sequences.header().setDefaultSectionSize(120)
self.trv_sequences.setSelectionMode(self.trv_sequences.SingleSelection)
# Prepare a table
model = QStandardItemModel(0, 2, self.trv_sequences)
# Prepare a header
hdr_name = QStandardItem("Sequence Name")
model.setHorizontalHeaderItem(NAME, hdr_name)
hdr_frames = QStandardItem("# of frames")
hdr_frames.setTextAlignment(Qt.AlignmentFlag.AlignRight)
model.setHorizontalHeaderItem(N_FRAMES, hdr_frames)
# Add data to the table
for name, frames in sequences.items():
itm_name = QStandardItem(name)
itm_name.setSelectable(True)
itm_name.setEditable(False)
itm_frames = QStandardItem(str(frames))
itm_frames.setSelectable(True)
itm_frames.setEditable(False)
itm_frames.setTextAlignment(Qt.AlignmentFlag.AlignRight)
model.appendRow((itm_name, itm_frames))
# Add the table to the tree view
self.trv_sequences.setModel(model)
# Add the tree view to the panel under the EXPORT button
self.form.lyt_main.insertWidget(5, self.trv_sequences)
# Make column with the numbers of frames smaller
self.trv_sequences.setColumnWidth(1, 80)
# Select the first item
self.trv_sequences.setCurrentIndex(model.index(0, 0))
# Add horizontal layout under the tree view
self.lyt_export = QHBoxLayout()
self.form.lyt_main.insertLayout(6, self.lyt_export)
# Add buttons for confirmation of a selected sequence and
# export abortion
self.btn_confirm = QPushButton("Confirm")
self.btn_confirm.setStyleSheet(
"""
QPushButton {
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #0B0, stop: 1.0 #0D0);
font-weight: bold;
}
QPushButton:hover {border-color: #0D0;}
QPushButton:focus {
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #0C0, stop: 1.0 #0F0);
border-color: #0E0; color: #FFF;
}
QPushButton:pressed {
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #0F0, stop: 1.0 #0C0);
}""")
self.btn_confirm.clicked.connect(self.exportConfirmed)
self.btn_abort = QPushButton("Abort")
self.btn_abort.setStyleSheet(
"""
QPushButton {
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #B00, stop: 1.0 #D00);
font-weight: bold;
}
QPushButton:hover {border-color: #D00;}
QPushButton:focus {
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #C00, stop: 1.0 #F00);
border-color: #E00; color: #FFF;
}
QPushButton:pressed {
background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #F00, stop: 1.0 #C00);
}""")
self.btn_abort.clicked.connect(self.exportAborted)
self.lyt_export.addWidget(self.btn_confirm)
self.lyt_export.addWidget(self.btn_abort)
# Create a function to disable deselection
def mySelectionChanged(selected, deselected):
if selected.isEmpty() and not deselected.isEmpty():
self.trv_sequences.selectionModel().select(
deselected.first().indexes()[0],
self.trv_sequences.selectionModel().Select
| self.trv_sequences.selectionModel().Rows)
# Connect the function as a slot for signal emitted when selection is
# changed
self.trv_sequences.selectionModel().selectionChanged.connect(
mySelectionChanged)
def exportConfirmed(self):
"""
Feedback method called when confirm button was clicked.
Buttons are disabled, framerate is loaded from the first image chunks,
selected sequence name is used to create an `image name` template and
a `video name` which can be used in a FFMPEG command. Such a command
is executed to convert the video, if FFMPEG is installed.
Otherwise warnings are shown.
"""
# Disable export and confirm buttons
self.btn_confirm.setEnabled(False)
self.btn_abort.setEnabled(False)
# Prepare arguments for ffmpeg conversion
selected_seq = \
self.trv_sequences.selectionModel().selectedRows()[0].data()
# Load framerate
image_name = selected_seq + "-" + (NAME_NUMBER_FORMAT % 0) + ".png"
image_path = path.join(self.control_proxy.ExportPath, image_name)
# load fps from the first image
fps = self.readFramerateChunk(image_path)
if fps == -1.0:
fps = 1 / self.control_proxy.StepTime
QMessageBox.warning(
None, 'Loading framerate failed',
"Framerate was not loaded, this recorded image\n"
+ "sequence will be exported using current\n"
+ "Step Time: FPS = 1/(Step Time) = "
+ str(fps) + ".")
image_name = '"' + path.normpath(
path.join(self.control_proxy.ExportPath, selected_seq + "-"
+ NAME_NUMBER_FORMAT + ".png")) + '"'
video_name = '"' + path.normpath(
path.join(self.control_proxy.ExportPath,
selected_seq + ".mp4")) + '"'
# Prepare an ffmpeg command
export_command = 'ffmpeg -r ' + str(fps) + ' -i ' + image_name \
+ ' -c:v libx264 -pix_fmt yuv420p ' + video_name
# Try to run the command
try:
return_val = subprocess.call(export_command)
except OSError as e:
if e.errno == os.errno.ENOENT:
QMessageBox.warning(None, 'FFMPEG not available',
"FFMPEG is necessary to export video.\n"
+ "Please install it")
else:
QMessageBox.warning(None, 'Something failed', str(e))
if return_val == 0:
QMessageBox.information(None, 'Export successful!',
"FFMPEG successfully converted image "
+ "sequence into a video.")
else:
QMessageBox.warning(None, 'FFMPEG unsuccessfull',
"FFMPEG failed to convert sequence into "
+ "a video")
# Close the export subform
self.closeExportSubform()
def exportAborted(self):
"""
Feedback method called when abort button was clicked.
The part of the dialog panel used for video exporting is closed.
"""
# Close the export subform
self.closeExportSubform()
def closeExportSubform(self):
"""
Method used to close the part of the dialog panel used for video exporting.
The QTreeView with sequence names and their numbers of frames are closed.
Then `'Confirm'` and `'Abort'` buttons are removed and the rest of buttons
is returned to the default state (the same as if pause button was pressed).
"""
# Close all parts of export subform and remove them from the panel
self.trv_sequences.close()
self.form.lyt_main.removeWidget(self.trv_sequences)
self.btn_abort.close()
self.lyt_export.removeWidget(self.btn_abort)
self.btn_confirm.close()
self.lyt_export.removeWidget(self.btn_confirm)
self.form.lyt_main.removeItem(self.lyt_export)
self.last_clicked = "pause"
self.setInvalidButtons()
def installPyPNGNotice(self):
"""
Method telling user that pyPNG library ought to be installed into FreeCAD.
The pyPNG library is not part of FreeCAD and so we need to add it using pip.
This method tells user to do so.
"""
QMessageBox.information(
None, "Install PyPNG", "PyPNG is missing from your FreeCAD\n"
+ "Please follow these instructions to install it:\n\n"
+ "Windows:\n"
+ " 1) Open a command line window with admin privileges\n"
+ ' Press "Win + X" and "A"\n\n'
+ " 2) Go to the bin folder in your FreeCAD installation\n"
+ ' Type "CD ' + FreeCAD.getHomePath() + 'bin"\n\n'
+ " 3) Install PyPNG\n"
+ ' Type "python.exe -m pip install pyPNG"\n\n\n'
+ "Ubuntu (installed using PPA):\n"
+ " 1) Open a terminal window\n\n"
+ " 2) Install PyPNG\n"
+ ' Type "sudo python.exe -m pip install pyPNG"\n')
# Alternative way to install it directly from FreeCAD
# import pip
# if hasattr(pip, "main"):
# FreeCAD.Console.PrintLog("Installing pyPNG.\n")
# if pip.main(["install", "pyPNG"]) != 0:
# FreeCAD.Console.PrintError("pyPNG installation failed.\n")
# FreeCAD.Console.PrintLog("Installation successful.\n")
# else:
# import pip._internal
# if hasattr(pip._internal, "main"):
# if pip._internal.main(["install", "pyPNG"]) != 0:
# FreeCAD.Console.PrintError("pyPNG installation failed.\n")
# FreeCAD.Console.PrintLog("Installation successful.\n")
# else:
# FreeCAD.Console.PrintLog(
# "Unable to import and install pyPNG.\n")
def writeFramerateChunk(self, framerate, image_path):
"""
Method to write a framerate into a PNG image as one of its chunks.
This method tries to import pyPNG first. Then it tries to install it and import
again. If either import is successful, all chunks currently in the PNG image at
an `image_path` are extracted. The framerate chunk is added as the second
chunk, right behind IHDR. Finally the image is rewritten with new
list of chunks.
Args:
framerate: A float specifying the framerate to be written into the image.
image_path: A str containing a path to an image about to be augmented.
"""
# import or install pyPNG
try:
import png
except ModuleNotFoundError:
self.installPyPNGNotice()
return False
except Exception as e:
FreeCAD.Console.PrintError(
"Unexpected error occurred while importing pyPNG - " + str(e))
# Read chunks already present in a PNG image
reader = png.Reader(filename=image_path)
chunks = list(reader.chunks())
# Insert custom framerate chunk
chunks.insert(1, (FPS_CHUNK_CODE, struct.pack("f", framerate)))
# Write it into the image
with open(image_path, 'wb') as image_file:
png.write_chunks(image_file, chunks)
return True
def readFramerateChunk(self, image_path):
"""
Method to read a framerate inserted as one of a PNG image's chunks.
This method tries to import pyPNG first. Then it tries to install it and import
again. If either import is successful, all chunks currently in the PNG image at
an `image_path` are extracted. The framerate chunk ought to be stored as
the second chunk, right behind IHDR. If the chunk's code type matches,
its value is returned.
Args:
image_path: A str containing a path to an image with the framerate chunk.
Returns:
A float signifying framerate, or -1.0 if something failed.
"""
# import or install pyPNG
try:
import png
except ModuleNotFoundError:
self.installPyPNGNotice()
return -1.0
# Read chunks already present in a PNG image
reader = png.Reader(filename=image_path)
chunks = list(reader.chunks())
if chunks[1][0] == FPS_CHUNK_CODE:
return struct.unpack("f", chunks[1][1])[0]
else:
FreeCAD.Console.PrintError("Unable to unpack a framerate.\n")
return -1.0
class ControlProxy:
"""
Proxy class for a `DocumentObjectGroupPython` Control instance.
A ControlProxy instance adds properties to a `DocumentObjectGroupPython`
Control instance and responds to their changes. It provides a control panel
to control animations.
To access such a dialog double-click Control in Tree View or right click and
select *Show control panel* option from a context menu.
Attributes:
updated: A bool - True if a property was changed by a class and not user.
temporary_export_path: A str path to an export folder.
To connect this `Proxy` object to a `DocumentObjectGroupPython` Control do:
a = FreeCAD.ActiveDocument.addObject("App::DocumentObjectGroupPython",
"Control")
ControlProxy(a)
"""
updated = False
def __init__(self, fp):
"""
Initialization method for ControlProxy.
A class instance is created and made a `Proxy` for a generic
`DocumentObjectGroupPython` Control object. During initialization number of
properties are specified and preset.
Args:
fp: A barebone `DocumentObjectGroupPython` Control object to be extended.
"""
self.setProperties(fp)
fp.Proxy = self
def onDocumentRestored(self, fp):
"""
Method called when document is restored to make sure everything is as it was.
Reinitialization method - it creates properties and sets them to default, if
they were not restored automatically. Properties of connected `ViewObject` are
also recreated and reset if necessary.
Args:
fp : A restored `DocumentObjectGroupPython` Control object.
"""
fp.ViewObject.Proxy.setProperties(fp.ViewObject)
self.setProperties(fp)
def onBeforeChange(self, fp, prop):
"""
Method called before `DocumentObjectGroupPython` Control is changed.
An old export path is stored for a case in which a new export path is not
a valid path.
Args:
fp : A `DocumentObjectGroupPython` Control object.
prop: A str name of a property about to change.
"""
# Save an export path before it's changed to restore it if new
# path is invalid
if prop == "ExportPath" and hasattr(fp, "ExportPath") and \
not self.updated:
self.temporary_export_path = fp.ExportPath
def onChanged(self, fp, prop):
"""
Method called after `DocumentObjectGroupPython` Control was changed.
Values of changed properties (start time, step time, stop time, export path)
are checked for validity and edited if they are not.
Args:
fp : A `DocumentObjectGroupPython` Control object.
prop: A str name of a changed property.
"""
# Don't do anything if a value was updated because another property
# had changed
if self.updated:
self.updated = False
return
# Control animation range so that step size is less than range size
elif prop == "StartTime" and hasattr(fp, "StopTime") and \
hasattr(fp, "StepTime"):
self.updated = True
fp.StopTime = (fp.StopTime, fp.StartTime + fp.StepTime,
float("inf"), 0.5)
self.updated = True
fp.StepTime = (fp.StepTime, 0.01, fp.StopTime - fp.StartTime, 0.1)
elif prop == "StepTime" and hasattr(fp, "StartTime") and \
hasattr(fp, "StopTime"):
self.updated = True
fp.StopTime = (fp.StopTime, fp.StartTime + fp.StepTime,
float("inf"), 0.5)
self.updated = True
fp.StartTime = (fp.StartTime, -float("inf"),
fp.StopTime - fp.StepTime, 0.5)
elif prop == "StopTime" and hasattr(fp, "StartTime") and \
hasattr(fp, "StepTime"):
self.updated = True
fp.StartTime = (fp.StartTime, -float("inf"),
fp.StopTime - fp.StepTime, 0.5)
self.updated = True
fp.StepTime = (fp.StepTime, 0.01, fp.StopTime - fp.StartTime, 0.1)
# Return to previous export path if the new one is invalid
elif prop == "ExportPath":
# Test access right in the folder an show warning if they are not
# sufficient
if not os.access(fp.ExportPath, os.W_OK | os.R_OK):
QMessageBox.warning(None, 'Error while setting Export Path',
"You don't have access to read and write "
+ "in this folder.")
self.updated = True
fp.ExportPath = self.temporary_export_path
del self.temporary_export_path
def setProperties(self, fp):
"""
Method to set properties during initialization or document restoration.
The properties are set if they are not already present and an
`AnimateDocumentObserver` is recreated.
Args:
fp : A restored or barebone `DocumentObjectGroupPython` Control object.
"""
# Add (and preset) properties
if not hasattr(fp, "StartTime"):
fp.addProperty(
"App::PropertyFloatConstraint", "StartTime", "Timing",
"Animation start time. \nRange is "
"< - inf | Stop Time - Step Time >."
).StartTime = (0, -float("inf"), 9.5, 0.5)
elif hasattr(fp, "StepTime") and hasattr(fp, "StopTime"):
fp.StartTime = (fp.StartTime, -float("inf"),
fp.StopTime - fp.StepTime, 0.5)
if not hasattr(fp, "StepTime"):
fp.addProperty(
"App::PropertyFloatConstraint", "StepTime", "Timing",
"Animation step time. \nRange is "
"< 0.01 | Stop Time - Start Time >."
).StepTime = (0.5, 0.01, 10, 0.1)
elif hasattr(fp, "StartTime") and hasattr(fp, "StopTime"):
fp.StepTime = (fp.StepTime, 0.01, fp.StopTime - fp.StartTime, 0.1)
if not hasattr(fp, "StopTime"):
fp.addProperty(
"App::PropertyFloatConstraint", "StopTime", "Timing",
"Animation stop time. \nRange is "
+ "< Start Time + Step Time | inf >."
).StopTime = (10, 0.5, float("inf"), 0.5)
elif hasattr(fp, "StartTime") and hasattr(fp, "StepTime"):
fp.StopTime = (fp.StopTime, fp.StartTime + fp.StepTime,
float("inf"), 0.5)
if not hasattr(fp, "ExportPath"):
fp.addProperty(
"App::PropertyPath", "ExportPath", "Record & Export",
"Path to a folder, where recorded rendered images will be "
"saved to be converted into a video.")
if not hasattr(fp, "VideoWidth"):
fp.addProperty(
"App::PropertyIntegerConstraint", "VideoWidth",
"Record & Export", "Width of the exported video in pixels.\n"
+ "Range is < 32 | 7680 >.").VideoWidth = (1280, 32, 7680, 10)
else:
fp.VideoWidth = (fp.VideoWidth, 32, 7680, 10)
if not hasattr(fp, "VideoHeight"):
fp.addProperty(
"App::PropertyIntegerConstraint", "VideoHeight",
"Record & Export", "Height of the exported video in pixels.\n"
+ "Range is < 32 | 4320 >.").VideoHeight = (720, 32, 4320, 10)
else:
fp.VideoHeight = (fp.VideoHeight, 32, 4320, 10)
# Add an document observer to control the structure
import AnimateDocumentObserver
AnimateDocumentObserver.addObserver()
class ViewProviderControlProxy:
"""
Proxy class for `Gui.ViewProviderDocumentObject` Control.ViewObject.
A ViewProviderControlProxy instance provides a Control's icon, double-click
response and context menu with "Show control panel".
Attributes:
fp: A Control object.
panel: A ControlPanel if one is active or None.
To connect this `Proxy` object to a `Gui.ViewProviderDocumentObject`
Control.ViewObject do:
a = FreeCAD.ActiveDocument.addObject("App::DocumentObjectGroupPython",
"Control")
ViewProviderControlProxy(a.ViewObject)
"""
panel = None
fp = None
def __init__(self, vp):
"""
Initialization method for ViewProviderControlProxy.
A class instance is created and made a `Proxy` for a generic
`Gui.ViewProviderDocumentObject` Control.ViewObject. During initialization
number of properties are specified and preset.
Args:
vp: A barebone `Gui.ViewProviderDocumentObject` Control.ViewObject.
"""
self.setProperties(vp)
vp.Proxy = self
def attach(self, vp):
"""
Method called by FreeCAD after initialization.
This method adds Control as the `fp` attribute.
Args:
vp: A Control.ViewObject after initialization.
"""
# Add feature python as it's necessary to claimChildren
self.fp = vp.Object
def claimChildren(self):
"""
Method called by FreeCAD to retrieve assigned children.
When a property of a Control is touched the Control and the FreeCAD
ActiveDocument are notified. The FreeCAD ActiveDocument then emits a signal
to inform all its observers e.g. the FreeCADGui ActiveDocument. The FreeCADGui
document then emits a new signal to inform e.g. the tree view. The tree view
then invokes `claimChildren()`.
"""
if hasattr(self, "fp"):
if self.fp:
return self.fp.Group
return []
def canDropObject(self, obj):
"""
Method called by FreeCAD to ask if an object `obj` can be dropped into a Group.
FreeCAD objects of a Server, Trajectory and CollisionDetector type are allowed
to drop inside a Control group.
Args:
obj: A FreeCAD object hovering above a Control item in the Tree View.
"""
# Allow only some objects to be dropped into the Control group
if hasattr(obj, "Proxy") and \
(obj.Proxy.__class__.__name__ == "ServerProxy" or
obj.Proxy.__class__.__name__ == "TrajectoryProxy" or
obj.Proxy.__class__.__name__ == "CollisionDetectorProxy" or
obj.Proxy.__class__.__name__ == "RobWorldProxy" or
obj.Proxy.__class__.__name__ == "RobRotationProxy" or
obj.Proxy.__class__.__name__ == "RobTranslationProxy"):
return True
return False
def getIcon(self):
"""
Method called by FreeCAD to supply an icon for the Tree View.
A full path to an icon is supplied for the FreeCADGui.
Returns:
A str path to an icon.
"""
return path.join(PATH_TO_ICONS, "Control.png")
def setProperties(self, vp):
"""
Method to hide unused properties.
Properties Display Mode, Visibility are set to be invisible as they are unused.
Args:
vp: A `Gui.ViewProviderDocumentObject` Control.ViewObject.
"""
# Hide unnecessary view properties
vp.setEditorMode("DisplayMode", 2)
vp.setEditorMode("Visibility", 2)
def doubleClicked(self, vp):
"""
Method called by FreeCAD when Control is double-clicked in the Tree View.
If no dialog is opened in the Task View, a new `ControlPanel` is opened.
If a `ControlPanel` is already opened, the Model tab on the Combo View
is swapped for the Tasks tab so that the panel becomes visible.
If another dialog is opened a warning is shown.
Args:
vp: A `Gui.ViewProviderDocumentObject` Control.ViewObject.
"""
# Switch to the Task View if a Control panel is already opened
if self.panel:
FreeCADGui.Control.showTaskView()
# Try to open new Control panel
else:
# Load the QDialog from a file and name it after this object
form = FreeCADGui.PySideUic.loadUi(
path.join(PATH_TO_UI, "AnimationControl.ui"))
form.setWindowTitle(vp.Object.Label)
# Create a control panel and try to show it
self.panel = ControlPanel(vp.Object, form)
try:
FreeCADGui.Control.showDialog(self.panel)
except RuntimeError as e:
self.panel = None
if str(e) == "Active task dialog found":
QMessageBox.warning(None,
'Error while opening control panel',
"A panel is already active on "
+ "the Tasks tab of the Combo View.")
FreeCADGui.Control.showTaskView()
return True
def setupContextMenu(self, vp, menu):
"""
Method called by the FreeCAD to customize a context menu for a Control.
The *Transform* and *Set colors...* items are removed from the context menu
shown upon right click on `DocumentObjectGroupPython` Control in the
Tree View. The option to *Show control panel* is added instead.
Args:
vp: A right-clicked `Gui.ViewProviderDocumentObject` Control.ViewObject.
menu: A Qt's QMenu to be edited.
"""
# Add an option to open the Control panel
menu.clear()
action = menu.addAction("Show control panel")
action.triggered.connect(lambda f=self.doubleClicked, arg=vp: f(arg))
def __getstate__(self):
"""
Necessary method to avoid errors when trying to save unserializable objects.
This method is used by JSON to serialize unserializable objects during
autosave. Without this an Error would rise when JSON would try to do
that itself.
We need this for unserializable `fp` attribute, but we don't
serialize it, because it's enough to reset it when object is restored.
Returns:
None, because we don't serialize anything.
"""
return None
def __setstate__(self, state):
"""
Necessary method to avoid errors when trying to restore unserializable objects.
This method is used during a document restoration. We need this for
unserializable `fp` attribute, but we do not restore it, because it's enough
to reset it.
"""
pass
class ControlCommand(object):
"""
ControlCommand class specifying Animate workbench's Control button/command.
This class provides resources for a toolbar button and a menu button.
It controls their behaivor(Active/Inactive) and responds to callbacks after
either of them was clicked(Activated).
"""
def GetResources(self):
"""
Method used by FreeCAD to retrieve resources to use for this command.
Returns:
A dict with items `PixMap`, `MenuText` and `ToolTip` which contain
a path to a command icon, a text to be shown in a menu and
a tooltip message.
"""
return {'Pixmap': path.join(PATH_TO_ICONS, "ControlCmd.png"),
'MenuText': "Control",
'ToolTip': "Create Control instance."}
def Activated(self):
"""
Method used as a callback when the toolbar button or the menu item is clicked.
This method creates a Control instance in currently active document.
Afterwards it adds a ControlProxy as a `Proxy` to this instance as well as
ViewProviderControlProxy to its `ViewObject.Proxy`, if FreeCAD runs in the
Graphic mode.
"""
doc = FreeCAD.ActiveDocument
a = doc.addObject("App::DocumentObjectGroupPython", "Control")
ControlProxy(a)
if FreeCAD.GuiUp:
ViewProviderControlProxy(a.ViewObject)
doc.recompute()
return
def IsActive(self):
"""
Method to specify when the toolbar button and the menu item are enabled.
The toolbar button `Control` and menu item `Control` are set to be active only
when there is an active document in which a Control instance can be created.
Returns:
True if buttons shall be enabled and False otherwise.
"""
if FreeCAD.ActiveDocument is None:
return False
else:
return True
if FreeCAD.GuiUp:
# Add command to FreeCAD Gui when importing this module in InitGui
FreeCADGui.addCommand('ControlCommand', ControlCommand())
```
#### File: Animate/Doxygen/correct_doxypypy_output_file.py
```python
import sys
def modify_file(filename):
# Open input file read-only
with open(filename, 'r') as in_file:
# Copy input file to temporary file, modifying as we go
lines = in_file.readlines()
# Remove all even lines which are empty due to some doxypypy error
correct_lines = lines[::2]
# Reopen input file writable
with open(filename, "w") as out_file:
# Overwriting original file with temporary file contents
for line in correct_lines:
out_file.write(line)
if __name__ == "__main__":
if len(sys.argv) > 1:
modify_file(sys.argv[1])
else:
print("It's necessary to specify an doxypypy output file")
```
#### File: Doxygen/Doxypypy outputs/CollisionObject.py
```python
import FreeCAD
from os import path
## Path to a folder with the necessary icons.
PATH_TO_ICONS = path.join(FreeCAD.getHomePath(), "Mod", "Animate", "Resources",
"Icons")
## @brief Proxy class for a `FeaturePython` Collision instance.
#
#
#To connect this `Proxy` object to a `DocumentObjectGroupPython`
#CollisionDetector do:
#
# a = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",
# "Collision")
# CollisionProxy(a, shape, cause1, cause2)
#
class CollisionProxy(object):
## @brief Initialization method for CollisionProxy.
#
#A class instance is created and made a `Proxy` for a generic `FeaturePython`
#Collision object. During initialization number of properties are specified and
#preset. An object shape is supplied and the object is labeled so its known
#which objects caused this `collision`.
#
#
# @param fp A barebone `FeaturePython` Server object to be extended.
# @param shape A `Solid` object defining the shape of an intersection.
# @param cause1 An FreeCAD object observed intersecting with the `cause2`.
# @param cause2 An FreeCAD object observed intersecting with the `cause1`.
#
def __init__(self, fp, shape=None, cause1=None, cause2=None):
if shape is not None:
fp.Shape = shape
if cause1 is not None and cause2 is not None:
fp.Label = cause1.Label + " x " + cause2.Label
self.setProperties(fp, cause1=cause1, cause2=cause2)
fp.Proxy = self
## @brief Method called when document is restored to make sure everything is as it was.
#
# Reinitialization it creates properties and sets them to
#default values, if they were not restored automatically. Properties of
#connected `ViewObject` are also recreated and reset if necessary.
#
#
# @param fp A restored `FeaturePython` CollisionObject object.
#
def onDocumentRestored(self, fp):
self.setProperties(fp)
fp.ViewObject.Proxy.setProperties(
fp.ViewObject)
## @brief Method to set properties during initialization or document restoration.
#
#The properties are set if they are not already present. Later they are set read
#only, because an user is not allowed to edit any instance of
#the CollisionObject.
#
#
# @param fp A restored or barebone `FeaturePython` CollisionObject object.
#
def setProperties(self, fp, cause1=None, cause2=None):
if not hasattr(fp, "CausedBy"):
fp.addProperty(
"App::PropertyLinkList", "CausedBy", "Collision",
"Objects that made this collision").CausedBy = [cause1, cause2]
if not hasattr(fp, "Volume"):
fp.addProperty(
"App::PropertyVolume", "Volume", "Collision",
"Overlapping volume of interfering objects."
).Volume = fp.Shape.Volume
fp.setEditorMode("Placement", 1)
fp.setEditorMode("CausedBy", 1)
fp.setEditorMode("Volume", 1)
fp.setEditorMode("Label", 1)
# Add ViewObject to __dict__ so that it can be accessed using
# __getattribute__
fp.__dict__["ViewObject"] = fp.ViewObject
## @brief Proxy class for a `Gui.ViewProviderDocumentObject` Collision.ViewObject.
#
#A ViewProviderServerProxy instance changes a `FeaturePython` Collision's icon.
#It prevents user from transforming a `Collision` object after double-clicking
#it in the Tree View. It also removes options to *Transform* and
#*Set colors...* from a context menu.
#
#To connect this `Proxy` object to a `Gui.ViewProviderDocumentObject`
#Collision.ViewObject do:
#
# a = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",
# "Collision")
# CollisionProxy(a, shape, cause1, cause2)
#
class ViewProviderCollisionProxy(object):
## @brief Initialization method for ViewProviderCollisionProxy.
#
#A class instance is created and made a `Proxy` for a generic
#`Gui.ViewProviderDocumentObject` Collision.ViewObject. This method changes
#`LineColor`, `PointColor`, ShapeColor`, `LineWidth` and `PointSize` properties
#of a Collision instance and hides unnecessary unused View properties.
#
#
# @param vp A barebone `Gui.ViewProviderDocumentObject` Collision.ViewObject.
# @param color A tuple of floats specifying Point, Line and Shape RGB color.
#
def __init__(self, vp, color=None):
if color is None:
color = (1.0, 0.0, 0.0)
vp.LineColor = vp.PointColor = vp.ShapeColor = color
vp.LineWidth = 10.0
vp.PointSize = 10.0
self.setProperties(vp)
vp.Proxy = self
## @brief Method called when CollisionDetector is double-clicked in the Tree View.
#
#It just prevents user from accessing transformation panel and transforming
#a `Collision` object. It's enough to just implement it and return `True` for
#this purpose.
#
#
# @param vp A double-clicked Collision.ViewObject.
#
# @return
# True to specify that it was implemented and executed.
#
def doubleClicked(self, vp):
return True
## @brief Method editing a context menu for right click on a Collision.
#
#The *Transform* and *Set colors...* items are removed from the context menu
#shown upon right click on the Collision in the Tree View. This is done to
#prevent user from transforming the `Collision` object or changing its color.
#
#
# @param vp A right-clicked Collision.ViewObject.
# @param menu A Qt's QMenu to be edited.
#
def setupContextMenu(self, vp, menu):
menu.clear()
## @brief Method used to get a path to an icon which will appear in the tree view.
#
# @return
# A path to the icon.
#
def getIcon(self):
return path.join(PATH_TO_ICONS, "Collision.png")
## @brief Method to hide unused properties.
#
#All unused unnecessary `FeaturePython`s properties are hidden except for
#`Transparency` and `Visibility`.
#
#
# @param vp A `Gui.ViewProviderDocumentObject` Collision.ViewObject.
#
def setProperties(self, vp):
vp.setEditorMode("AngularDeflection", 2)
vp.setEditorMode("BoundingBox", 2)
vp.setEditorMode("Deviation", 2)
vp.setEditorMode("DisplayMode", 2)
vp.setEditorMode("DrawStyle", 2)
vp.setEditorMode("Lighting", 2)
vp.setEditorMode("LineColor", 2)
vp.setEditorMode("LineWidth", 2)
vp.setEditorMode("PointColor", 2)
vp.setEditorMode("PointSize", 2)
vp.setEditorMode("Selectable", 2)
vp.setEditorMode("SelectionStyle", 2)
vp.setEditorMode("ShapeColor", 2)
``` |
{
"source": "JiriVales/zdo2021-vales",
"score": 2
} |
#### File: zdo2021-vales/tests/test_zdo2021.py
```python
import pytest
import os
import skimage.io
from skimage.draw import polygon
import glob
import numpy as np
from pathlib import Path
import zdo2021.main
import sklearn.metrics
# cd ZDO2021
# python -m pytest
def test_run_random():
vdd = zdo2021.main.VarroaDetector()
# Nastavte si v operačním systém proměnnou prostředí 'VARROA_DATA_PATH' s cestou k datasetu.
# Pokud není nastavena, využívá se testovací dataset tests/test_dataset
dataset_path = os.getenv('VARROA_DATA_PATH_', default=Path(__file__).parent / 'test_dataset/')
# dataset_path = Path(r"H:\biology\orig\zdo_varroa_detection_coco_001")
# print(f'dataset_path = {dataset_path}')
files = glob.glob(f'{dataset_path}/images/*.jpg')
cislo_obrazku = np.random.randint(0, len(files))
filename = files[cislo_obrazku]
im = skimage.io.imread(filename)
imgs = np.expand_dims(im, axis=0)
# print(f"imgs.shape={imgs.shape}")
prediction = vdd.predict(imgs)
assert prediction.shape[0] == imgs.shape[0]
# Toto se bude spouštět všude mimo GitHub
if not os.getenv('CI'):
import matplotlib.pyplot as plt
plt.imshow(prediction[0])
plt.show()
def test_run_all():
vdd = zdo2021.main.VarroaDetector()
# Nastavte si v operačním systém proměnnou prostředí 'VARROA_DATA_PATH' s cestou k datasetu.
# Pokud není nastavena, využívá se testovací dataset tests/test_dataset
dataset_path = os.getenv('VARROA_DATA_PATH_', default=Path(__file__).parent / 'test_dataset/')
# dataset_path = Path(r"H:\biology\orig\zdo_varroa_detection_coco_001")
# print(f'dataset_path = {dataset_path}')
files = glob.glob(f'{dataset_path}/images/*.jpg')
f1s = []
for filename in files:
im = skimage.io.imread(filename)
imgs = np.expand_dims(im, axis=0)
# print(f"imgs.shape={imgs.shape}")
prediction = vdd.predict(imgs)
import json
ann_pth = Path(dataset_path)/"annotations/instances_default.json"
assert ann_pth.exists()
# gt_ann = json.loads(str(ann_pth))
with open(ann_pth, 'r') as infile:
gt_ann = json.load(infile)
ground_true_mask = prepare_ground_true_mask(gt_ann, filename, dataset=True)
f1i = f1score(ground_true_mask, prediction, im, show=True)
# assert f1i > 0.55
f1s.append(f1i)
f1 = np.mean(f1s)
print(f"f1score={f1}")
# assert f1 > 0.55
def f1score(ground_true_mask:np.ndarray, prediction:np.ndarray, image=None, show=False):
"""
Measure f1 score for one image
:param ground_true_mask:
:param prediction:
:return:
"""
if (ground_true_mask.shape[-1] == prediction.shape[-2]) and (ground_true_mask.shape[-2] == prediction.shape[-1]):
print(f"Warning: Prediction shape [{ground_true_mask.shape}] does not fit ground true shape [{prediction.shape}]. Tansposition applied.")
ground_true_mask=np.rot90(ground_true_mask, k=1)
if ground_true_mask.shape[-1] != prediction.shape[-1]:
raise ValueError(f"Prediction shape [{ground_true_mask.shape}] does not fit ground true shape [{prediction.shape}]")
if ground_true_mask.shape[-2] != prediction.shape[-2]:
raise ValueError(f"Prediction shape [{ground_true_mask.shape}] does not fit ground true shape [{prediction.shape}]")
f1 = sklearn.metrics.f1_score(ground_true_mask.astype(bool).flatten(), prediction.astype(bool).flatten(), average="macro")
if (image is not None) and show:
from matplotlib import pyplot as plt
plt.imshow(image)
plt.contour(prediction[0,:,:], colors=['red'])
plt.contour(ground_true_mask, color=['green'])
plt.suptitle(f"f1score={f1}")
plt.show()
return f1
def prepare_ground_true_mask(gt_ann, filename, dataset=True):
name = None
for ann_im in gt_ann['images']:
if ann_im["file_name"] == Path(filename).name:
# mask = np.zeros([], dtype=bool)
M = np.zeros((ann_im["height"], ann_im["width"]), dtype=bool)
immage_id = ann_im["id"]
for ann in gt_ann['annotations']:
if ann["image_id"] == immage_id:
S = ann['segmentation']
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)
M[rr, cc] = True
if dataset:
# M=M.transpose()
M=np.rot90(M, k=3)
return M
``` |
{
"source": "JiriVanek/nixpy",
"score": 2
} |
#### File: nixio/pycore/group.py
```python
from __future__ import (absolute_import, division, print_function)
from ..group import GroupMixin
from .entity_with_sources import EntityWithSources
from .data_array import DataArray
from .tag import Tag
from .multi_tag import MultiTag
from . import util
class Group(EntityWithSources, GroupMixin):
def __init__(self, nixparent, h5group):
super(Group, self).__init__(nixparent, h5group)
@classmethod
def _create_new(cls, nixparent, h5parent, name, type_):
newentity = super(Group, cls)._create_new(nixparent, h5parent,
name, type_)
return newentity
# DataArray
def _get_data_array_by_id(self, id_or_name):
data_arrays = self._h5group.open_group("data_arrays")
if not util.is_uuid(id_or_name):
id_or_name = self._parent.data_arrays[id_or_name].id
# Using get_by_name - linked entries use id as name in backend
return DataArray(self._parent, data_arrays.get_by_name(id_or_name))
def _get_data_array_by_pos(self, pos):
data_arrays = self._h5group.open_group("data_arrays")
return DataArray(self._parent, data_arrays.get_by_pos(pos))
def _delete_data_array_by_id(self, id_):
data_arrays = self._h5group.open_group("data_arrays")
data_arrays.delete(id_)
def _data_array_count(self):
return len(self._h5group.open_group("data_arrays"))
def _add_data_array_by_id(self, id_or_name):
if id_or_name not in self._parent.data_arrays:
raise RuntimeError("Group._add_data_array_by_id: "
"DataArray not found in Block!")
target = self._parent.data_arrays[id_or_name]
data_arrays = self._h5group.open_group("data_arrays")
data_arrays.create_link(target, target.id)
def _has_data_array_by_id(self, id_or_name):
data_arrays = self._h5group.open_group("data_arrays")
return data_arrays.has_by_id(id_or_name)
# MultiTag
def _get_multi_tag_by_id(self, id_or_name):
multi_tags = self._h5group.open_group("multi_tags")
if not util.is_uuid(id_or_name):
id_or_name = self._parent.multi_tags[id_or_name].id
# Using get_by_name - linked entries use id as name in backend
return MultiTag(self._parent, multi_tags.get_by_name(id_or_name))
def _get_multi_tag_by_pos(self, pos):
multi_tags = self._h5group.open_group("multi_tags")
return MultiTag(self._parent, multi_tags.get_by_pos(pos))
def _delete_multi_tag_by_id(self, id_):
multi_tags = self._h5group.open_group("multi_tags")
multi_tags.delete(id_)
def _multi_tag_count(self):
return len(self._h5group.open_group("multi_tags"))
def _add_multi_tag_by_id(self, id_or_name):
if id_or_name not in self._parent.multi_tags:
raise RuntimeError("Group._add_multi_tag_by_id: "
"MultiTag not found in Block!")
target = self._parent.multi_tags[id_or_name]
multi_tags = self._h5group.open_group("multi_tags")
multi_tags.create_link(target, target.id)
def _has_multi_tag_by_id(self, id_or_name):
multi_tags = self._h5group.open_group("multi_tags")
return multi_tags.has_by_id(id_or_name)
# Tag
def _get_tag_by_id(self, id_or_name):
tags = self._h5group.open_group("tags")
if not util.is_uuid(id_or_name):
id_or_name = self._parent.tags[id_or_name].id
# Using get_by_name - linked entries use id as name in backend
return Tag(self._parent, tags.get_by_name(id_or_name))
def _get_tag_by_pos(self, pos):
tags = self._h5group.open_group("tags")
return Tag(self._parent, tags.get_by_pos(pos))
def _delete_tag_by_id(self, id_):
tags = self._h5group.open_group("tags")
tags.delete(id_)
def _tag_count(self):
return len(self._h5group.open_group("tags"))
def _add_tag_by_id(self, id_or_name):
if id_or_name not in self._parent.tags:
raise RuntimeError("Group._add_tag_by_id: "
"Tag not found in Block!")
target = self._parent.tags[id_or_name]
tags = self._h5group.open_group("tags")
tags.create_link(target, target.id)
def _has_tag_by_id(self, id_or_name):
tags = self._h5group.open_group("tags")
return tags.has_by_id(id_or_name)
```
#### File: pycore/util/names.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
def sanitizer(name):
"""
Sanitizes a string supposed to be an entity name. That is,
invalid characters like slashes are substituted with underscores.
:param name: A string representing the name.
:returns: The sanitized name.
:rtype: str
"""
return name.replace("/", "_")
def check(name):
"""
Checks a string whether is needs to be sanitized.
:param name: The name.
:returns: True if the name is valid, false otherwise.
:rtype: bool
"""
if isinstance(name, bytes):
name = name.decode()
return "/" not in name
```
#### File: nixio/test/test_file.py
```python
from __future__ import (absolute_import, division, print_function)
import os
import unittest
import h5py
import nixio as nix
import nixio.pycore.file as filepy
from nixio.pycore.exceptions.exceptions import InvalidFile
skip_cpp = not hasattr(nix, "core")
class FileTestBase(unittest.TestCase):
backend = None
testfilename = "filetest.h5"
def setUp(self):
self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite,
backend=self.backend)
def tearDown(self):
self.file.close()
os.remove(self.testfilename)
def test_file_format(self):
assert(self.file.format == "nix")
assert(self.file.version == filepy.HDF_FF_VERSION)
def test_file_timestamps(self):
created_at = self.file.created_at
assert(created_at > 0)
updated_at = self.file.updated_at
assert(updated_at > 0)
self.file.force_created_at(1403530068)
assert(self.file.created_at == 1403530068)
def test_file_blocks(self):
assert(len(self.file.blocks) == 0)
block = self.file.create_block("test block", "recordingsession")
assert(len(self.file.blocks) == 1)
assert(block in self.file.blocks)
assert(block.id in self.file.blocks)
assert("notexist" not in self.file.blocks)
assert(block.id == self.file.blocks[0].id)
assert(block.id == self.file.blocks[-1].id)
del self.file.blocks[0]
assert(len(self.file.blocks) == 0)
def test_file_sections(self):
assert(len(self.file.sections) == 0)
section = self.file.create_section("test section", "recordingsession")
assert(len(self.file.sections) == 1)
assert(section in self.file.sections)
assert(section.id in self.file.sections)
assert("notexist" not in self.file.sections)
assert(section.id == self.file.sections[0].id)
assert(section.id == self.file.sections[-1].id)
del self.file.sections[0]
assert(len(self.file.sections) == 0)
def test_file_find_sections(self):
for i in range(2):
self.file.create_section("level1-p0-s" + str(i), "dummy")
for i in range(2):
self.file.sections[0].create_section("level2-p1-s" + str(i),
"dummy")
for i in range(2):
self.file.sections[1].create_section("level2-p2-s" + str(i),
"dummy")
for i in range(2):
self.file.sections[0].sections[0].create_section(
"level3-p1-s" + str(i), "dummy"
)
assert(len(self.file.find_sections()) == 8)
assert(len(self.file.find_sections(limit=1)) == 2)
assert(len(self.file.find_sections(filtr=lambda x: "level2-p1-s" in
x.name)) == 2)
assert(len(self.file.find_sections(filtr=lambda x: "level2-p1-s" in
x.name,
limit=1)) == 0)
def test_order_tracking(self):
blknames = []
for idx in range(10):
name = "block_" + str(idx)
self.file.create_block(name, "ordertest")
blknames.append(name)
danames = []
datablockname = blknames[0]
datablock = self.file.blocks[datablockname]
for idx in range(7):
name = "data_" + str(idx)
da = datablock.create_data_array(name, "thedata", data=[0])
da.definition = "da definition"
danames.append(name)
self.file.close()
self.file = nix.File.open(self.testfilename, nix.FileMode.ReadOnly,
backend=self.backend)
for idx in range(len(self.file.blocks)):
self.assertEqual(blknames[idx], self.file.blocks[idx].name)
datablock = self.file.blocks[datablockname]
for idx in range(len(datablock.data_arrays)):
self.assertEqual(danames[idx], datablock.data_arrays[idx].name)
def test_context_open(self):
fname = "contextopen.nix"
with nix.File.open(fname, nix.FileMode.Overwrite,
backend=self.backend) as nf:
nf.create_block("blocky", "test-block")
with nix.File.open(fname, nix.FileMode.ReadOnly,
backend=self.backend) as nf:
self.assertEqual(nf.blocks[0].name, "blocky")
@unittest.skipIf(skip_cpp, "HDF5 backend not available.")
class TestFileCPP(FileTestBase):
backend = "hdf5"
class TestFilePy(FileTestBase):
backend = "h5py"
class TestFileVerPy(unittest.TestCase):
backend = "h5py"
testfilename = "versiontest.h5"
filever = filepy.HDF_FF_VERSION
fformat = filepy.FILE_FORMAT
def try_open(self, mode):
f = nix.File.open(self.testfilename, mode, backend=self.backend)
f.close()
def set_header(self, fformat=None, version=None):
if fformat is None:
fformat = self.fformat
if version is None:
version = self.filever
self.h5root.attrs["format"] = fformat
self.h5root.attrs["version"] = version
self.h5root.attrs["created_at"] = 0
self.h5root.attrs["updated_at"] = 0
if "data" not in self.h5root:
self.h5root.create_group("data")
self.h5root.create_group("metadata")
def setUp(self):
self.h5file = h5py.File(self.testfilename, mode="w")
self.h5root = self.h5file["/"]
def tearDown(self):
self.h5file.close()
os.remove(self.testfilename)
def test_read_write(self):
self.set_header()
self.try_open(nix.FileMode.ReadWrite)
def test_read_only(self):
vx, vy, vz = self.filever
roversion = (vx, vy, vz+2)
self.set_header(version=roversion)
self.try_open(nix.FileMode.ReadOnly)
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadWrite)
def test_no_open(self):
vx, vy, vz = self.filever
noversion = (vx, vy+3, vz+2)
self.set_header(version=noversion)
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadWrite)
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadOnly)
noversion = (vx, vy+1, vz)
self.set_header(version=noversion)
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadWrite)
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadOnly)
noversion = (vx+1, vy, vz)
self.set_header(version=noversion)
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadWrite)
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadOnly)
def test_bad_tuple(self):
self.set_header(version=(-1, -1, -1))
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadOnly)
self.set_header(version=(1, 2))
with self.assertRaises(RuntimeError):
self.try_open(nix.FileMode.ReadOnly)
def test_bad_format(self):
self.set_header(fformat="NOT_A_NIX_FILE")
with self.assertRaises(InvalidFile):
self.try_open(nix.FileMode.ReadOnly)
``` |
{
"source": "jirivrany/kagle-statoil",
"score": 3
} |
#### File: jirivrany/kagle-statoil/cnn_keras_datagen.py
```python
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
np.random.seed(1207) # The seed I used - pick your own or comment out for a random seed. A constant seed allows for better comparisons though
# Import Keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
import model_simple as model_source
MODEL_FILE = '.mdl_kerasgen_simple_w.hdf5'
SUBMISSION = 'simple_32batch.csv'
# ## Load Training Data
df_train = pd.read_json('./input/train.json') # this is a dataframe
# Need to reshape and feature scale the images:
def get_scaled_imgs(df):
imgs = []
for i, row in df.iterrows():
#make 75x75 image
band_1 = np.array(row['band_1']).reshape(75, 75)
band_2 = np.array(row['band_2']).reshape(75, 75)
band_3 = band_1 + band_2 # plus since log(x*y) = log(x) + log(y)
# Rescale
a = (band_1 - band_1.mean()) / (band_1.max() - band_1.min())
b = (band_2 - band_2.mean()) / (band_2.max() - band_2.min())
c = (band_3 - band_3.mean()) / (band_3.max() - band_3.min())
imgs.append(np.dstack((a, b, c)))
return np.array(imgs)
Xdata = get_scaled_imgs(df_train)
# Get the response variable "is_iceberg"
Ydata = np.array(df_train['is_iceberg'])
# Some of the incident angle from the satellite are unknown and marked as "na". Replace these na with 0 and find the indices where the incident angle is >0 (this way you can use a truncated set or the full set of training data).
df_train.inc_angle = df_train.inc_angle.replace('na',0)
idx_tr = np.where(df_train.inc_angle>0)
# You can now use the option of training with only known incident angles or the whole set. I found slightly better results training with only the known incident angles so:
Ydata = Ydata[idx_tr[0]]
Xdata = Xdata[idx_tr[0],...]
model = model_source.get_model()
model.summary()
X_train, X_val, Y_train, Y_val = train_test_split(Xdata, Ydata, test_size = 0.15, random_state=1207)
#batch_size = 32
mcp_save = ModelCheckpoint(MODEL_FILE, save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=6, verbose=1, epsilon=1e-4, mode='min')
#
#
#
##model.fit(Xtr_more, Ytr_more, batch_size=batch_size, epochs=50, verbose=1, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.25)
#model.fit(Xtr_more, Ytr_more, batch_size=batch_size, epochs=60, verbose=1, callbacks=[mcp_save, reduce_lr_loss])
#
# ## Results
# Set a learning rate annealer
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=7,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 30
batch_size = 32
datagen = ImageDataGenerator(
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=True) # randomly flip images
datagen.fit(X_train)
# Fit the model
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
epochs = epochs,
validation_data = (X_val,Y_val),
verbose = 1,
steps_per_epoch=X_train.shape[0] * 10 // batch_size,
callbacks=[learning_rate_reduction, mcp_save, reduce_lr_loss])
model.load_weights(filepath = MODEL_FILE)
score = model.evaluate(Xdata, Ydata, verbose=1)
print('Train score:', score[0])
print('Train accuracy:', score[1])
# Now, to make a submission, load the test data and train the model and output a csv file.
df_test = pd.read_json('./input/test.json')
df_test.inc_angle = df_test.inc_angle.replace('na',0)
Xtest = (get_scaled_imgs(df_test))
pred_test = model.predict(Xtest)
submission = pd.DataFrame({'id': df_test["id"], 'is_iceberg': pred_test.reshape((pred_test.shape[0]))})
print(submission.head(10))
submission.to_csv(SUBMISSION, index=False)
``` |
{
"source": "jirivrany/MLND-capstone",
"score": 3
} |
#### File: MLND-capstone/src/preprocess.py
```python
import glob
import numpy as np
import tensorflow as tf
import os
from scipy.signal import wiener
from sklearn import preprocessing
def transform_row(row):
row = row.replace("[", "")
row = row.replace("],", "")
return row
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def create_label(filename):
"""
create label from the file name
"""
keys = {"Sphere": 0, "Vertical": 1, "Horizontal": 2}
names = filename.split(os.sep)
names = names[4].split()
return keys[names[0].strip()]
def sanitize_name(filename):
filename = filename.replace(".txt", "")
filename = filename.replace(" ", "_")
filename = filename.split(os.sep)[-1]
return filename
def create_files(files, output_dir, testset=False):
for i, fname in enumerate(files):
with open(fname) as fdata:
data = fdata.readlines()
data = [transform_row(data[0]).split(",") for row in data]
data = np.array(data)
data = data.astype(np.float64)
# filter the noise
data = wiener(data)
# normalize data
data = preprocessing.normalize(data, norm='l2')
# flip the data
data_s1 = np.fliplr(data)
data_s2 = np.flipud(data)
filename = os.path.join(
output_dir, sanitize_name(fname) + '.tfrecords')
filename_s1 = os.path.join(
output_dir, sanitize_name(fname) + 'split_left.tfrecords')
filename_s2 = os.path.join(
output_dir, sanitize_name(fname) + 'split_up.tfrecords')
if not i % 500:
print(i, 'writing', filename)
if testset:
pairs = ((filename, data),)
else:
pairs = ((filename, data), (filename_s1, data_s1), (filename_s2, data_s2))
for fn, dn in pairs:
writer = tf.python_io.TFRecordWriter(fn)
features = tf.train.Features(feature={
'height': _int64_feature(100),
'width': _int64_feature(100),
'depth': _int64_feature(1),
'label': _int64_feature(create_label(fname)),
'image': _bytes_feature(dn.tostring())
})
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
writer.close()
def create_training():
files = glob.glob('../data/gravimetrie/random_zeronoise/*.txt')
output_dir = '../data/gravimetrie/random_tf_normalized'
create_files(files, output_dir)
def create_validation():
files = glob.glob('../data/gravimetrie/validation_set/*.txt')
output_dir = '../data/gravimetrie/validation_set_tf'
create_files(files, output_dir, True)
if __name__ == "__main__":
create_validation()
``` |
{
"source": "JiriWeiss/ais",
"score": 3
} |
#### File: ais/src/jsonAnalyze.py
```python
import asyncio
from encodings import utf_8
from bs4 import BeautifulSoup
import pyppeteer
import os
import json
import pandas as pd
from pathlib import Path
import re
import urllib.request
from sympy import *
width, height = 1440, 900
URL = "https://www.unob.cz/Stranky/default.aspx"
async def get_urls(browser, url):
"""Return a page after waiting for the given selector"""
page = await browser.newPage()
await page.goto(url)
urls = await page.evaluate("""() => {
const links = document.querySelectorAll(".ecm-uo-motive-nav a")
const urls = Array.from(links).map(link => link.href)
return urls
}
""")
return urls
async def get_RightList(browser, url):
page = await browser.newPage()
await page.goto(url)
urls = await page.evaluate("""() => {
const links = document.querySelectorAll(".box-right-list a")
const urls = Array.from(links).map(link => link.href)
return urls
}
""")
return urls
async def get_Lide(browser, url):
page = await browser.newPage()
await page.goto(url)
urls = await page.evaluate("""() => {
const links = document.querySelectorAll(".ms-WPBody a")
const urls = Array.from(links).map(link => link.href)
return urls
}
""")
return urls
async def get_data(browser, url):
page = await browser.newPage()
await page.goto(url)
data = await page.evaluate("""() => {
const content = document.querySelectorAll("td")
const ps = Array.from(content).map(element => element.textContent)
return ps
}
""")
return data
async def main():
result = {"FacURL" : [], "KatURL" : [], "RightListURL" : [], "LideURL" : [], "NamesURL" : [], "info" : []}
fakulty = {"fvt" : [], "fvl" : [], "fvz" : []}
fvl = {"katedry" : [], "lide" : []}
fvt = {"katedry" : []}
fvz = {"katedry" : []}
browser = await pyppeteer.launch()
#page = await get_page(browser, URL.format(0), "div.ecm-uo-motive-nav")
urls = await get_urls(browser, URL)
"""
fakulty["fvl"].append(urls[0])
fakulty["fvt"].append(urls[1])
fakulty["fvz"].append(urls[2])
for url in fakulty["fvl"]:
katurl = await get_urls(browser, url)
#fvl["katedry"].append(katurl)
for url in katurl:
RightList = await get_RightList(browser, url)
fvl["katedry"].append(RightList[1])
for url in fakulty["fvt"]:
katurl = await get_urls(browser, url)
#fvt["katedry"].append(katurl)
for url in katurl:
RightList = await get_RightList(browser, url)
fvt["katedry"].append(RightList[1])
for url in fakulty["fvz"]:
katurl = await get_urls(browser, url)
#fvz["katedry"].append(katurl)
for url in katurl:
RightList = await get_RightList(browser, url)
fvz["katedry"].append(RightList[1])
with open("outputs/fvl.json", "w", encoding = "utf8") as f:
json.dump(fvl, f, indent=2)
with open("outputs/fvt.json", "w", encoding = "utf8") as f:
json.dump(fvt, f, indent=2)
with open("outputs/fvz.json", "w", encoding = "utf8") as f:
json.dump(fvz, f, indent=2)
"""
for url in urls:
result["FacURL"].append(url)
for url in urls:
katurl = await get_urls(browser, url)
result["KatURL"].append(katurl)
for url in katurl:
RightList = await get_RightList(browser, url)
result["RightListURL"].append(RightList)
with open("outputs/RightList.json", "r") as f:
content = json.load(f)
for index in content:
lideURL = index[1]
result["LideURL"].append(lideURL)
with open("outputs/lide.json", "r") as f:
names = json.load(f)
for url in names:
NameURL = await get_Lide(browser, url)
result["NamesURL"].append(NameURL)
with open("outputs/Names.json", "r") as f:
jmena = json.load(f)
for url in jmena:
info = await get_data(browser, url)
result["info"].append(info)
print(info)
with open("outputs/katedry.json", "w", encoding = "utf8") as f:
json.dump(result["KatURL"], f, indent=2)
with open("outputs/RightList.json", "w", encoding = "utf8") as f:
json.dump(result["RightListURL"], f, indent=2)
with open("outputs/lide.json", "w", encoding = "utf8") as f:
json.dump(result["LideURL"], f, indent=2)
with open("outputs/Names.json", "w", encoding = "utf8") as f:
json.dump(result["NamesURL"], f, indent=2)
with open("outputs/info.json", "w", encoding = "utf8") as f:
json.dump(result["info"], f, indent=2)
await browser.close()
asyncio.get_event_loop().run_until_complete(main())
``` |
{
"source": "jirkacechak/chatbot",
"score": 3
} |
#### File: jirkacechak/chatbot/utils.py
```python
from __future__ import print_function
import sys
import os
from datetime import timedelta
from data import errorMessages
import constants as c
def oneLinePrint(forPrint):
"""Prints one line text.
Args:
forPrint: text for printing
"""
print(forPrint, end="")
sys.stdout.flush()
def clearConsoleLine():
"""Clears one line in console."""
oneLinePrint("\r\t\t\t\t\t\t\t\t\t\t\t\r")
def printDivider():
"""Prints divider to console."""
print("\n--------------------------------------------------------------------------------\n")
def printErrorAndExit(errNum):
"""Prints error message and exits program.
Args:
errNum: error code
"""
oneLinePrint("Error: " + errorMessages[errNum])
sys.exit()
def printHelpAndExit():
"""Prints generated help text using constants in file constants.py and exits program."""
defaultText = " (default)"
indent = " "
print("")
print("Usage:")
print("{}py chatbot.py [-h|--help] [--mode=[{}|{}|{}]] [--model=<number>] [--dataLimit=<number>] [--testDataLimit=<number>] [--testing=[{}|{}]] [--usw=[{}|{}]] [--gui=[{}|{}]]".format(
indent, c.MODE_TRAIN, c.MODE_CHAT, c.MODE_TEST, c.TEXT_ENABLE, c.TEXT_DISABLE, c.TEXT_ENABLE, c.TEXT_DISABLE, c.TEXT_ENABLE, c.TEXT_DISABLE))
print("")
print("Options:")
modelNumberText = "Used model number ("
for i in range(1, c.NUMBER_OF_MODELS + 1):
modelNumberText += "{}{}{}".format(i, defaultText if c.DEFAULT_MODEL ==
i else "", "/" if i < c.NUMBER_OF_MODELS else ")")
rows = [["-h, --help", "Show help."], ["--mode=[{}|{}|{}]".format(c.MODE_TRAIN, c.MODE_CHAT, c.MODE_TEST), "Training{}/chatting{}/testing{} mode.".format(defaultText if c.DEFAULT_MODE == c.MODE_TRAIN else "", defaultText if c.DEFAULT_MODE == c.MODE_CHAT else "", defaultText if c.DEFAULT_MODE == c.MODE_TEST else "")], ["--model=<number>", modelNumberText], ["--dataLimit=<number>", "Limit for training data (<number> >= {} | <number> == 0 (no limit)).".format(c.MIN_DATA_SIZE)], ["--testDataLimit=<number>", "Limit for testing data (<number> >= {} | <number> == 0 (no limit)).".format(c.MIN_TEST_DATA_SIZE)], [
"--testing=[{}|{}]".format(c.TEXT_ENABLE, c.TEXT_DISABLE), "Enable{}/disable{} testing each training epoch.".format(defaultText if c.DEFAULT_TESTING else "", defaultText if not c.DEFAULT_TESTING else "")], ["--usw=[{}|{}]".format(c.TEXT_ENABLE, c.TEXT_DISABLE), "Train model using{}/without using{} saved model weights.".format(defaultText if c.DEFAULT_USW else "", defaultText if not c.DEFAULT_USW else "")], ["--gui=[{}|{}]".format(c.TEXT_ENABLE, c.TEXT_DISABLE), "Chatting using{}/without using{} GUI.".format(defaultText if c.DEFAULT_USE_CHAT_GUI else "", defaultText if not c.DEFAULT_USE_CHAT_GUI else "")]]
col0Width = max(len(row[0]) for row in rows) + 2
for row in rows:
print("{}{}{}".format(indent, row[0].ljust(col0Width), row[1]))
print("")
print("Examples:")
print("{}py chatbot.py --help".format(indent))
print("{}py chatbot.py --model=1 --dataLimit=1000 --testing={} --usw={}".format(
indent, c.TEXT_DISABLE, c.TEXT_DISABLE))
print("{}py chatbot.py --mode={} --model=1".format(indent, c.MODE_CHAT))
print("{}py chatbot.py --mode={} --model=1 --gui={}".format(indent,
c.MODE_CHAT, c.TEXT_DISABLE))
print("{}py chatbot.py --mode={} --model=1 --testDataLimit=100".format(indent, c.MODE_TEST))
sys.exit()
def fileExistsAndNotEmpty(fileName):
"""Checks if file exists and is not empty.
Args:
fileName: path to file for check
Returns:
True if file exists and is not empty, False otherwise.
"""
return os.path.isfile(fileName) and os.stat(fileName).st_size > 0
def clearConsole():
"""Clears console."""
os.system("cls")
def timestampToTime(timestamp):
"""Returns seconds converted to hours, minutes and seconds (HH:mm:ss).
Args:
timestamp: seconds
Returns:
Hours, minutes and seconds in HH:mm:ss format.
"""
return str(timedelta(seconds=timestamp)).split(".")[0]
``` |
{
"source": "jirkadanek/quiver",
"score": 2
} |
#### File: quiver/python/brokerlib.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import collections as _collections
import os as _os
import proton as _proton
import proton.handlers as _handlers
import proton.reactor as _reactor
import uuid as _uuid
import shutil as _shutil
import subprocess as _subprocess
import sys as _sys
import time as _time
import tempfile as _tempfile
import pathlib as _pathlib
class Broker(object):
def __init__(self, scheme, host, port, id=None, user=None, password=<PASSWORD>, ready_file=None,
cert=None, key=None, key_password=None, trusted_db=None):
self.scheme = scheme
self.host = host
self.port = port
self.id = id
self.user = user
self.password = password
self.ready_file = ready_file
self.cert = cert
self.key = key
self.key_password = key_password
self.trusted_db = trusted_db
if self.id is None:
self.id = "broker-{0}".format(_uuid.uuid4())
self.container = _reactor.Container(_Handler(self), self.id)
self._config_dir = None
def init(self):
if self.user is not None:
if self.password is None:
self.fail("A password is required for user authentication")
self._init_sasl_config()
if self.scheme == "amqps":
if self.key is None or self.cert is None:
self.fail("if scheme is amqps, key and cert files must be specified")
if not _pathlib.Path(self.key).is_file():
self.fail("key file %s does not exist" % (self.key))
if not _pathlib.Path(self.cert).is_file():
self.fail("cert file %s does not exist" % (self.cert))
if self.trusted_db and not _pathlib.Path(self.trusted_db).is_file():
self.fail("trusted db file %s does not exist" % (self.trusted_db))
def _init_sasl_config(self):
self._config_dir = _tempfile.mkdtemp(prefix="brokerlib-", suffix="")
config_file = _os.path.join(self._config_dir, "proton-server.conf")
sasldb_file = _os.path.join(self._config_dir, "users.sasldb")
_os.environ["PN_SASL_CONFIG_PATH"] = self._config_dir
with open(config_file, "w") as f:
f.write("sasldb_path: {0}\n".format(sasldb_file))
f.write("mech_list: PLAIN SCRAM-SHA-1\n")
command = "echo '{0}' | saslpasswd2 -p -f {1} '{2}'".format \
(self.password, sasldb_file, self.user)
try:
_subprocess.check_call(command, shell=True)
except _subprocess.CalledProcessError as e:
self.fail("Failed adding user to SASL database: {0}", e)
def info(self, message, *args):
pass
def notice(self, message, *args):
pass
def warn(self, message, *args):
pass
def error(self, message, *args):
_sys.stderr.write("{0}\n".format(message.format(*args)))
_sys.stderr.flush()
def fail(self, message, *args):
self.error(message, *args)
_sys.exit(1)
def run(self):
self.container.run()
if _os.path.exists(self._config_dir):
_shutil.rmtree(self.dir, ignore_errors=True)
class _Queue(object):
def __init__(self, broker, address):
self.broker = broker
self.address = address
self.messages = _collections.deque()
self.consumers = _collections.deque()
self.broker.info("Created {0}", self)
def __repr__(self):
return "queue '{0}'".format(self.address)
def add_consumer(self, link):
assert link.is_sender
assert link not in self.consumers
self.consumers.append(link)
self.broker.info("Added consumer for {0} to {1}", link.connection, self)
def remove_consumer(self, link):
assert link.is_sender
try:
self.consumers.remove(link)
except ValueError:
return
self.broker.info("Removed consumer for {0} from {1}", link.connection, self)
def store_message(self, delivery, message):
self.messages.append(message)
self.broker.notice("Stored {0} from {1} on {2}", message, delivery.connection, self)
def forward_messages(self):
credit = sum([x.credit for x in self.consumers])
sent = 0
if credit == 0:
return
while sent < credit:
for consumer in self.consumers:
if consumer.credit == 0:
continue
try:
message = self.messages.popleft()
except IndexError:
self.consumers.rotate(sent)
return
consumer.send(message)
sent += 1
self.broker.notice("Forwarded {0} on {1} to {2}", message, self, consumer.connection)
self.consumers.rotate(sent)
class _Handler(_handlers.MessagingHandler):
def __init__(self, broker):
super(_Handler, self).__init__()
self.broker = broker
self.queues = dict()
self.verbose = False
def on_start(self, event):
interface = "{0}://{1}:{2}".format(self.broker.scheme, self.broker.host, self.broker.port)
if self.broker.scheme == "amqps":
server_ssl_domain = event.container.ssl.server
server_ssl_domain.set_credentials(self.broker.cert, self.broker.key, self.broker.key_password)
if self.broker.trusted_db:
server_ssl_domain.set_trusted_ca_db(self.broker.trusted_db)
server_ssl_domain.set_peer_authentication(_proton.SSLDomain.VERIFY_PEER, self.broker.trusted_db)
else:
server_ssl_domain.set_peer_authentication(_proton.SSLDomain.ANONYMOUS_PEER)
self.acceptor = event.container.listen(interface)
self.broker.notice("Listening for connections on '{0}'", interface)
if self.broker.ready_file is not None:
_time.sleep(0.1) # XXX
with open(self.broker.ready_file, "w") as f:
f.write("ready\n")
def get_queue(self, address):
try:
queue = self.queues[address]
except KeyError:
queue = self.queues[address] = _Queue(self.broker, address)
return queue
def on_link_opening(self, event):
if event.link.is_sender:
if event.link.remote_source.dynamic:
address = "{0}/{1}".format(event.connection.remote_container, event.link.name)
else:
address = event.link.remote_source.address
assert address is not None
event.link.source.address = address
queue = self.get_queue(address)
queue.add_consumer(event.link)
if event.link.is_receiver:
address = event.link.remote_target.address
event.link.target.address = address
def on_link_closing(self, event):
if event.link.is_sender:
queue = self.queues[event.link.source.address]
queue.remove_consumer(event.link)
def on_connection_init(self, event):
event.transport.sasl().allow_insecure_mechs=True
def on_connection_opening(self, event):
# XXX I think this should happen automatically
event.connection.container = event.container.container_id
def on_connection_opened(self, event):
self.broker.notice("Opened connection from {0}", event.connection)
def on_connection_closing(self, event):
self.remove_consumers(event.connection)
def on_connection_closed(self, event):
self.broker.notice("Closed connection from {0}", event.connection)
def on_disconnected(self, event):
self.broker.notice("Disconnected from {0}", event.connection)
self.remove_consumers(event.connection)
def remove_consumers(self, connection):
link = connection.link_head(_proton.Endpoint.REMOTE_ACTIVE)
while link is not None:
if link.is_sender:
queue = self.queues[link.source.address]
queue.remove_consumer(link)
link = link.next(_proton.Endpoint.REMOTE_ACTIVE)
def on_link_flow(self, event):
if event.link.is_sender and event.link.drain_mode:
event.link.drained()
def on_sendable(self, event):
queue = self.get_queue(event.link.source.address)
queue.forward_messages()
def on_settled(self, event):
template = "Container '{0}' {1} {2} to {3}"
container = event.connection.remote_container
source = event.link.source
delivery = event.delivery
if delivery.remote_state == delivery.ACCEPTED:
self.broker.info(template, container, "accepted", delivery, source)
elif delivery.remote_state == delivery.REJECTED:
self.broker.warn(template, container, "rejected", delivery, source)
elif delivery.remote_state == delivery.RELEASED:
self.broker.notice(template, container, "released", delivery, source)
elif delivery.remote_state == delivery.MODIFIED:
self.broker.notice(template, container, "modified", delivery, source)
def on_message(self, event):
message = event.message
delivery = event.delivery
address = event.link.target.address
if address is None:
address = message.address
queue = self.get_queue(address)
queue.store_message(delivery, message)
queue.forward_messages()
#
# def on_unhandled(self, name, event):
# _sys.stderr.write("{0} {1}\n".format(name, event))
# _sys.stderr.flush()
if __name__ == "__main__":
def _print(message, *args):
message = message.format(*args)
_sys.stderr.write("{0}\n".format(message))
_sys.stderr.flush()
class _Broker(Broker):
def info(self, message, *args): _print(message, *args)
def notice(self, message, *args): _print(message, *args)
def warn(self, message, *args): _print(message, *args)
try:
host, port = _sys.argv[1:3]
except IndexError:
_print("Usage: brokerlib <host> <port>")
_sys.exit(1)
try:
port = int(port)
except ValueError:
_print("The port must be an integer")
_sys.exit(1)
broker = _Broker(host, port)
try:
broker.run()
except KeyboardInterrupt:
pass
``` |
{
"source": "Jirka-Lhotka/emb2emb",
"score": 2
} |
#### File: emb2emb/autoencoders/data_loaders.py
```python
import sys
sys.path.append("../")
import os
import numpy as np
import h5py
from pathlib import Path
import torch
from torch.utils import data
from collections import defaultdict
from progress.bar import Bar
import argparse
from tokenizers import (CharBPETokenizer, SentencePieceBPETokenizer)
TOKENIZER_LIST = ["CharBPETokenizer",
"SentencePieceBPETokenizer"]
def get_params():
parser = argparse.ArgumentParser()
parser.add_argument("input_text_file", type=str,
help="The text file to load dataset from.")
parser.add_argument("output_file", type=str,
help="The .h5 file to output the dataset to.")
parser.add_argument("batch_size", type=int,
help="The batch size which the dataset will batched into.")
# From-Scratch Data Loading
parser.add_argument("-v", "--vocab_file", default="vocab", type=str,
help="The file to output the dataset vocab / tokenizer model to.")
parser.add_argument("-mf", "--min_freq", type=int, default=5,
help="The min frequency to accept a word in vocab.")
parser.add_argument("-mw", "--max_words", type=int, default=30000,
help="The max number of words to have in the vocab.")
# Pre-Trained Tokenizer
parser.add_argument("-t", "--tokenizer", required=True, type=str,
help="Specify the tokenizer to use.", choices=TOKENIZER_LIST)
parser.add_argument("--location", type=str,
help="Path where to find the tokenizer", default=None)
params, _ = parser.parse_known_args()
return params
def generate_dataset_with_tokenizer(TEXT_FILE,
DATASET_FILE,
TOKENIZER,
MAX_SENTENCE_LENGTH,
BATCH_SIZE=64,
MIN_FREQ=5,
MAX_FILE_SIZE_BATCHES=2000000,
MAX_WORDS=30000): # note: with a batch size of 64 and MAX_FILE_SIZE_BATCHES 200k each file equates to roughly 1.5-2GB):
TOKENIZER.train([TEXT_FILE], vocab_size=MAX_WORDS, special_tokens=[
"[PAD]", "<unk>", "<SOS>", "<EOS>"], min_frequency=MIN_FREQ)
TOKENIZER.save("/".join(DATASET_FILE.split("/")[:-1]), "tokenizer")
###### Save sequences to dataset #####
file_counter = 0
dataset = h5py.File(DATASET_FILE + str(file_counter) + ".h5", 'w')
sent_counter = 0
batch_counter = 0
ided_sentences_by_length = defaultdict(list)
with Bar('Writing sentences to hdf5') as bar:
with open(TEXT_FILE, 'r') as f:
def save_to_h5(sentlist, length):
nonlocal dataset, batch_counter, file_counter, MAX_FILE_SIZE_BATCHES
lengths_batch = np.array(
[length] * len(sentlist), dtype=np.uint32)
sentences_batch = np.zeros(
(len(sentlist), length), dtype=np.uint32)
for i, s in enumerate(sentlist):
for j, index in enumerate(s):
sentences_batch[i, j] = index
g = dataset.create_group("BATCH" + str(batch_counter))
g.create_dataset('data', data=sentences_batch)
g.create_dataset('length', data=lengths_batch)
batch_counter += 1
if (batch_counter % MAX_FILE_SIZE_BATCHES) == 0:
dataset.close()
file_counter += 1
dataset = h5py.File(
DATASET_FILE + str(file_counter) + ".h5", 'w')
for line in f:
ided = TOKENIZER.encode(
"<SOS>" + line.rstrip() + "<EOS>").ids
ided_len = len(ided)
if ided_len >= 2 and ided_len <= MAX_SENTENCE_LENGTH:
ided_sentences_by_length[ided_len].append(ided)
# ided_sentences.append(sentence_ids)
sent_counter += 1
n_sent_by_len = len(ided_sentences_by_length[ided_len])
if n_sent_by_len == BATCH_SIZE:
save_to_h5(
ided_sentences_by_length[ided_len], ided_len)
ided_sentences_by_length[ided_len] = []
bar.next()
# push out all remaining sentences
for k, v in ided_sentences_by_length.items():
if len(v) > 0:
save_to_h5(v, k)
dataset.close()
def _tokens_to_index(token_list, word2index):
index_list = [word2index["<SOS>"]]
for t in token_list:
if t in word2index:
index_list.append(word2index[t])
index_list.append(word2index["<EOS>"])
return index_list
class HDF5Dataset(data.Dataset):
"""Represents an abstract HDF5 dataset.
Input params:
file_path: Path to the folder containing the dataset (one or multiple HDF5 files).
recursive: If True, searches for h5 files in subdirectories.
load_data: If True, loads all the data immediately into RAM. Use this if
the dataset is fits into memory. Otherwise, leave this at false and
the data will load lazily.
data_cache_size: Number of HDF5 files that can be cached in the cache (default=3).
transform: PyTorch transform to apply to every data instance (default=None).
"""
def __init__(self, file_path, recursive, load_data, data_cache_size=3, transform=None):
super().__init__()
self.data_info_type = {}
self.data_info = []
self.data_cache = {}
self.data_cache_size = data_cache_size
self.transform = transform
# Search for all h5 files
p = Path(file_path)
assert(p.is_dir())
if recursive:
files = sorted(p.glob('**/*.h5'))
else:
files = sorted(p.glob('*.h5'))
if len(files) < 1:
raise RuntimeError('No hdf5 datasets found')
for h5dataset_fp in files:
self._add_data_infos(str(h5dataset_fp.resolve()), load_data)
def __getitem__(self, index):
# get data
x = self.get_data("data", index).astype("int64")
if self.transform:
x = self.transform(x)
else:
x = torch.from_numpy(x)
# get length
y = self.get_data("length", index).astype("int64")
y = torch.from_numpy(y)
return (x, y)
def __len__(self):
return len(self.get_data_infos('data'))
def _add_data_infos(self, file_path, load_data):
with h5py.File(file_path, 'r') as h5_file:
# Walk through all groups, extracting datasets
for gname, group in h5_file.items():
for dname, ds in group.items():
# if data is not loaded its cache index is -1
idx = -1
if load_data:
# add data to the data cache
idx = self._add_to_cache(ds.value, file_path)
# type is derived from the name of the dataset; we expect the dataset
# name to have a name such as 'data' or 'label' to identify its type
# we also store the shape of the data in case we need it
self.data_info.append(
{'file_path': file_path, 'type': dname, 'shape': ds[()].shape, 'cache_idx': idx})
def _load_data(self, file_path):
"""Load data to the cache given the file
path and update the cache index in the
data_info structure.
"""
with h5py.File(file_path, 'r') as h5_file:
for gname, group in h5_file.items():
for dname, ds in group.items():
# add data to the data cache and retrieve
# the cache index
idx = self._add_to_cache(ds[()], file_path)
# find the beginning index of the hdf5 file we are looking
# for
file_idx = next(i for i, v in enumerate(
self.data_info) if v['file_path'] == file_path)
# the data info should have the same index since we loaded
# it in the same way
self.data_info[file_idx + idx]['cache_idx'] = idx
# remove an element from data cache if size was exceeded
if len(self.data_cache) > self.data_cache_size:
# remove one item from the cache at random
removal_keys = list(self.data_cache)
removal_keys.remove(file_path)
self.data_cache.pop(removal_keys[0])
# remove invalid cache_idx
self.data_info = [{'file_path': di['file_path'], 'type': di['type'], 'shape': di['shape'],
'cache_idx': -1} if di['file_path'] == removal_keys[0] else di for di in self.data_info]
def _add_to_cache(self, data, file_path):
"""Adds data to the cache and returns its index. There is one cache
list for every file_path, containing all datasets in that file.
"""
if file_path not in self.data_cache:
self.data_cache[file_path] = [data]
else:
self.data_cache[file_path].append(data)
return len(self.data_cache[file_path]) - 1
def get_data_infos(self, type):
"""Get data infos belonging to a certain type of data.
"""
if type not in self.data_info_type:
data_info_type = [
di for di in self.data_info if di['type'] == type]
self.data_info_type[type] = data_info_type
else:
data_info_type = self.data_info_type[type]
return data_info_type
def get_data(self, type, i):
"""Call this function anytime you want to access a chunk of data from the
dataset. This will make sure that the data is loaded in case it is
not part of the data cache.
"""
fp = self.get_data_infos(type)[i]['file_path']
if fp not in self.data_cache:
self._load_data(fp)
# get new cache_idx assigned by _load_data_info
cache_idx = self.get_data_infos(type)[i]['cache_idx']
return self.data_cache[fp][cache_idx]
def get_tokenizer(tokenizer, location='bert-base-uncased'):
if tokenizer == "BERT":
return BertTokenizer.from_pretrained(location)
else:
if location is not None:
return eval(tokenizer)(vocab_file=location + '-vocab.json',
merges_file=location + '-merges.txt')
else:
return eval(tokenizer)()
if __name__ == "__main__":
params = get_params()
os.makedirs(os.path.dirname(params.output_file), exist_ok=True)
if params.tokenizer:
generate_dataset_with_tokenizer(params.input_text_file,
params.output_file,
get_tokenizer(
params.tokenizer, location=params.location),
100,
BATCH_SIZE=params.batch_size,
MIN_FREQ=params.min_freq,
MAX_WORDS=params.max_words)
```
#### File: emb2emb/autoencoders/rnn_decoder.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import random
from .autoencoder import Decoder
class RNNDecoder(Decoder):
def __init__(self, config):
super(RNNDecoder, self).__init__(config)
self.teacher_forcing_ratio = config.teacher_forcing_ratio
self.unit_sphere = config.unit_sphere
self.teacher_forcing_batchwise = config.teacher_forcing_batchwise
self.config = config
self.device = config.device
self.vocab_size = config.vocab_size + 1
self.type = config.type
self.hidden_size = config.hidden_size
self.max_sequence_len = config.max_sequence_len
self.input_size = config.hidden_size
self.embedding = nn.Embedding(
self.vocab_size, config.input_size, padding_idx=0) # let 0 denote padding
self.eos_idx = config.eos_idx
self.sos_idx = config.sos_idx
self.unk_idx = config.unk_idx
self.word_dropout = config.word_dropout
self.layers = config.layers
# Consider using GRU?
if self.type == "LSTM":
self.decoder = nn.LSTM(
input_size=config.input_size,
hidden_size=self.hidden_size,
num_layers=self.layers,
batch_first=True
)
elif self.type == "GRU":
self.decoder = nn.GRU(
input_size=config.input_size,
hidden_size=self.hidden_size,
num_layers=self.layers,
batch_first=True
)
# let 0 denote padding
self.out = nn.Linear(self.hidden_size, config.vocab_size + 1)
def init_hidden(self, x):
if self.type == "LSTM":
return x.repeat(self.layers, 1, 1), torch.zeros(self.layers, x.shape[0], self.hidden_size, device=self.device)
elif self.type == "GRU":
return x.repeat(self.layers, 1, 1)
def decode(self, x, train=False, actual=None, lengths=None, beam_width=1):
if self.unit_sphere:
h = h / h.norm(p=None, dim=-1, keepdim=True)
if not train:
if beam_width != 1:
return self.beam_decode(x, beam_width)
else:
return self.greedy_decode(x)
else:
h = self.init_hidden(x)
embedded_input = self.embedding(torch.tensor(
[[self.sos_idx]], device=self.device).repeat(x.shape[0], 1))
predictions = []
for t in range(1, lengths.max()):
output, h = self.decoder(embedded_input, h)
# lstm input: (batch, seq_len, input_size)
# lstm output: (batch, seq_len, hidden_size)
res = self.out(output.squeeze(1))
ret = res.clone()
ret *= torch.gt(lengths.reshape(-1, 1), t).float()
predictions.append(ret)
if random.random() < self.teacher_forcing_ratio:
next_token = actual[:, t].reshape(-1, 1)
else:
topv, topi = res.topk(1)
next_token = topi.detach()
if train and random.random() < self.word_dropout:
next_token = torch.tensor(
[[self.unk_idx]], device=self.device).repeat(x.shape[0], 1)
embedded_input = self.embedding(next_token)
predictions = torch.stack(predictions).permute(1, 0, 2)
# is: seq, batch, pred
# want: batch, seq, pred
# Add SOS prediction to the output
sos_padding = torch.zeros(
(x.shape[0], 1, self.vocab_size), device=self.device)
sos_padding[:, :, self.sos_idx] = 1
return torch.cat((sos_padding, predictions), 1)
def decode_teacher_forcing(self, x, actual, lengths):
h = self.init_hidden(x)
# We want to feed everything but the last element (so the network can
# predict the <EOS> token). We copy the actual sequence, remove <EOS>
# token, then reshape the seq_len.
teacher_input = actual.clone()
teacher_input[torch.arange(
teacher_input.shape[0], device=self.device), lengths - 1] = 0
if self.train and self.word_dropout > 0.:
mask = torch.rand_like(
teacher_input, device=teacher_input.device) < self.word_dropout
teacher_input[mask] = self.unk_idx
embedded_teacher = self.embedding(
teacher_input[:, :teacher_input.shape[1] - 1])
packed_teacher = pack_padded_sequence(
embedded_teacher, lengths - 1, batch_first=True)
packed_output, h = self.decoder(packed_teacher, h)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
# A "hacky" way to run the dense layer per timestep
predictions = self.out(
output.contiguous().view(
-1, output.shape[2])).reshape(
output.shape[0], output.shape[1], self.vocab_size)
# Add SOS prediction to the output
sos_padding = torch.zeros(
(x.shape[0], 1, self.vocab_size), device=self.device)
sos_padding[:, :, self.sos_idx] = 1
return torch.cat((sos_padding, predictions), 1)
# return self.softmax(predictions) # Commented since cross entropy
# does a softmax
def decode_train_greedy(self, x, lengths):
h = self.init_hidden(x)
embedded_input = self.embedding(torch.tensor(
[[self.sos_idx]], device=self.device).repeat(x.shape[0], 1))
predictions = []
for t in range(1, lengths.max()):
output, h = self.decoder(embedded_input, h)
# lstm input: (batch, seq_len, input_size)
# lstm output: (batch, seq_len, hidden_size)
res = self.out(output.squeeze(1))
ret = res.clone()
ret *= torch.gt(lengths.reshape(-1, 1), t).float()
predictions.append(ret)
topv, topi = res.topk(1)
embedded_input = self.embedding(topi.detach())
predictions = torch.stack(predictions).permute(1, 0, 2)
# is: seq, batch, pred
# want: batch, seq, pred
# Add SOS prediction to the output
sos_padding = torch.zeros(
(x.shape[0], 1, self.vocab_size), device=self.device)
sos_padding[:, :, self.sos_idx] = 1
return torch.cat((sos_padding, predictions), 1)
# Removes the extra EOS tokens added
def clip_predictions(self, pred):
results = []
for s in pred:
curr = []
for idx in s:
curr.append(idx)
if idx == self.eos_idx:
break
results.append(curr)
return results
class BeamNode:
def __init__(self, hidden_state, previous_node, word_id, log_prob, length):
self.hidden_state = hidden_state
self.previous_node = previous_node
self.word_id = word_id
self.log_prob = log_prob
self.length = length
# Greedy decode for LSTMAE and LSTMAE
def greedy_decode(self, x):
h = self.init_hidden(x)
embedded_input = self.embedding(torch.tensor(
[[self.sos_idx]], device=self.device).repeat(x.shape[0], 1))
predictions = [[self.sos_idx] for _ in range(x.shape[0])]
for t in range(1, self.max_sequence_len):
output, h = self.decoder(embedded_input, h)
res = self.out(output.squeeze(1))
topv, topi = res.topk(1)
done_count = 0
for b in range(x.shape[0]):
if predictions[b][-1] != self.eos_idx:
predictions[b].append(topi[b].cpu().item())
# if last token placed, and not eos, just cut off
if t == self.max_sequence_len - 1 and predictions[b][-1] != self.eos_idx:
predictions[b].append(self.eos_idx)
else:
done_count += 1
if done_count == x.shape[0]:
break
embedded_input = self.embedding(topi.detach())
return self.clip_predictions(predictions)
# Only works for LSTM
def beam_decode(self, x, beam_width=10):
# x = (batch, hidden_size)
# hidden_lstm = (layers, batch, hidden)
h = self.init_hidden(x)
decoded = [None for i in range(x.shape[0])]
# beam_width nodes per batch
incomplete = {ba: [
self.BeamNode(h, None, torch.tensor(self.sos_idx, device=self.device), 0, 1) for be in range(beam_width)
] for ba in range(x.shape[0])}
# create first hypotheses:
# lstm input: (batch, seq_len, input_size)
# lstm output: (batch, seq_len, hidden_size)
embedded_input = self.embedding(torch.tensor(
[[self.sos_idx]], device=self.device).repeat(x.shape[0], 1))
decoder_output, h = self.decoder(embedded_input, h)
# h_n of shape (num_layers, batch, hidden_size)
for b in range(x.shape[0]):
# decoder_output[b] shape: (1, hidden_size)
log_probs = F.log_softmax(
self.out(decoder_output[b]), dim=1).squeeze(0)
k_log_probs, k_indices = torch.topk(log_probs, beam_width)
for i in range(beam_width):
prev_node = incomplete[b][i]
if self.type == "LSTM":
incomplete[b][i] = self.BeamNode(
(h[0][:, b], h[1][:, b]), prev_node, k_indices[i], k_log_probs[i], 2)
elif self.type == "GRU":
incomplete[b][i] = self.BeamNode(
h[:, b], prev_node, k_indices[i], k_log_probs[i], 2)
for t in range(2, self.max_sequence_len):
if len(incomplete) == 0:
break
# Prepare step [ batch1_beams | batch2_beams | | ]
embedding_input = torch.tensor(
[beam.word_id for batch in incomplete for beam in incomplete[batch]], device=self.device)
# keep track of the order which beams are put in
input_order = [batch for batch in incomplete]
# embedding_input shape: (batch * beam_len)
embedding_input = embedding_input.reshape(-1, 1)
# embedding_input shape: (batch*beam_len, 1[seq_len])
embedded_input = self.embedding(embedding_input)
# embedded_input shape: (batch*beam_len, 1, input_size)
# want: h_prev of (num_layers, batch*beam_len, input_size)
# Do (batch*beam_len, num_layers, input_size) then move axis
if self.type == "LSTM":
h_prev = torch.stack(
[beam.hidden_state[0] for batch in incomplete for beam in incomplete[batch]]).permute(1, 0, 2)
c_prev = torch.stack(
[beam.hidden_state[1] for batch in incomplete for beam in incomplete[batch]]).permute(1, 0, 2)
h = (h_prev.contiguous(), c_prev.contiguous())
elif self.type == "GRU":
h = torch.stack(
[beam.hidden_state for batch in incomplete for beam in incomplete[batch]]).permute(1, 0, 2).contiguous()
decoder_output, h = self.decoder(embedded_input, h)
# lstm output: (batch*beam_len, 1, hidden_size)
for batch_index, batch in enumerate(input_order):
# Each batch is a seperate beam search.
# Get the probabilites from each beam
log_probs = F.log_softmax(self.out(
decoder_output[batch_index * beam_width:(batch_index + 1) * beam_width].squeeze(1)), dim=1)
# Put all the beam probabilities in a single vector, with the
# full seq prob
seq_probs = torch.cat(
[incomplete[batch][i].log_prob + log_probs[i] for i in range(beam_width)])
# Get the top k
k_seq_probs, k_indices = torch.topk(seq_probs, beam_width)
new_beams = []
for seq_prob, index in zip(k_seq_probs, k_indices):
beam_index = index // self.vocab_size
word_index = index % self.vocab_size
prev_beam = incomplete[batch][beam_index]
if word_index == self.eos_idx:
# we hit the end of the sequence! Therefore, this element
# of the batch is now complete.
# Since we wont be training, we will turn these into regular
# values, rather than tensors.
seq = [self.eos_idx]
prev = prev_beam
while prev != None:
seq.append(prev.word_id.cpu().item())
prev = prev.previous_node
seq = seq[::-1]
decoded[batch] = seq
del incomplete[batch]
break
if self.type == "LSTM":
new_beams.append(
self.BeamNode(
(h[0][:, batch_index * beam_width + beam_index],
h[1][:, batch_index * beam_width + beam_index]),
prev_beam,
word_index,
seq_prob,
prev_beam.length + 1))
elif self.type == "GRU":
new_beams.append(
self.BeamNode(
h[:, batch_index * beam_width + beam_index],
prev_beam,
word_index,
seq_prob,
prev_beam.length + 1))
# if we didn't complete the sequence
if batch in incomplete:
incomplete[batch] = new_beams
# For elements which hit the max seq length, we will cut them off at the
# most probable sequence so far.
for batch in incomplete:
seq = [self.eos_idx]
# The first beam will be the most probable sequence so far
prev = incomplete[batch][0]
while prev != None:
seq.append(prev.word_id.cpu().item())
prev = prev.previous_node
seq = seq[::-1]
decoded[batch] = seq
return self.clip_predictions(decoded)
```
#### File: Jirka-Lhotka/emb2emb/emb2emb_autoencoder.py
```python
from torch.nn.utils.rnn import pad_sequence
from autoencoders.autoencoder import AutoEncoder
from autoencoders.rnn_encoder import RNNEncoder
from autoencoders.rnn_decoder import RNNDecoder
from emb2emb.encoding import Encoder, Decoder
from tokenizers import CharBPETokenizer, SentencePieceBPETokenizer
from emb2emb.utils import Namespace
import torch
import os
import json
import copy
HUGGINGFACE_TOKENIZERS = ["CharBPETokenizer", "SentencePieceBPETokenizer"]
def tokenize(s):
# TODO: more sophisticated tokenization
return s.split()
def get_tokenizer(tokenizer, location='bert-base-uncased'):
# TODO: do we need to pass more options to the file?
tok = eval(tokenizer)(vocab_file=location + '-vocab.json',
merges_file=location + '-merges.txt')
tok.add_special_tokens(["[PAD]", "<unk>", "<SOS>", "<EOS>"])
return tok
def get_autoencoder(config):
if os.path.exists(config["default_config"]):
with open(config["default_config"]) as f:
model_config_dict = json.load(f)
else:
model_config_dict = {}
with open(os.path.join(config["modeldir"], "config.json")) as f:
orig_model_config = json.load(f)
model_config_dict.update(orig_model_config)
model_config = Namespace()
model_config.__dict__.update(model_config_dict)
tokenizer = get_tokenizer(
model_config.tokenizer, model_config.tokenizer_location)
model_config.__dict__["vocab_size"] = tokenizer.get_vocab_size()
model_config.__dict__["sos_idx"] = tokenizer.token_to_id("<SOS>")
model_config.__dict__["eos_idx"] = tokenizer.token_to_id("<EOS>")
model_config.__dict__["unk_idx"] = tokenizer.token_to_id("<unk>")
model_config.__dict__["device"] = config["device"]
encoder_config, decoder_config = copy.deepcopy(
model_config), copy.deepcopy(model_config)
encoder_config.__dict__.update(model_config.__dict__[model_config.encoder])
encoder_config.__dict__["tokenizer"] = tokenizer
decoder_config.__dict__.update(model_config.__dict__[model_config.decoder])
if model_config.encoder == "RNNEncoder":
encoder = RNNEncoder(encoder_config)
if model_config.decoder == "RNNDecoder":
decoder = RNNDecoder(decoder_config)
model = AutoEncoder(encoder, decoder, tokenizer, model_config)
checkpoint = torch.load(os.path.join(
config["modeldir"], model_config.model_file), map_location=config["device"])
model.load_state_dict(checkpoint["model_state_dict"])
return model
class AEEncoder(Encoder):
def __init__(self, config):
super(AEEncoder, self).__init__(config)
self.device = config["device"]
self.model = get_autoencoder(config)
self.use_lookup = self.model.encoder.variational
def _prepare_batch(self, indexed, lengths):
X = pad_sequence([torch.tensor(index_list, device=self.device)
for index_list in indexed], batch_first=True, padding_value=0)
lengths, idx = torch.sort(torch.tensor(
lengths, device=self.device).long(), descending=True)
return X[idx], lengths, idx
def _undo_batch(self, encoded, sort_idx):
ret = [[] for _ in range(encoded.shape[0])]
for i, c in zip(sort_idx, range(encoded.shape[0])):
ret[i] = encoded[c]
return torch.stack(ret)
def encode(self, S_list):
indexed = [self.model.tokenizer.encode(
"<SOS>" + s + "<EOS>").ids for s in S_list]
lengths = [len(i) for i in indexed]
X, X_lens, sort_idx = self._prepare_batch(indexed, lengths)
encoded = self.model.encode(X, X_lens)
# Since _prepare_batch sorts by length, we will need to undo this.
return self._undo_batch(encoded, sort_idx)
class AEDecoder(Decoder):
def __init__(self, config):
super(AEDecoder, self).__init__()
self.device = config["device"]
self.model = get_autoencoder(config)
def _prepare_batch(self, indexed, lengths):
X = pad_sequence([torch.tensor(index_list, device=self.device)
for index_list in indexed], batch_first=True, padding_value=0)
#lengths, idx = torch.sort(torch.tensor(lengths, device=self.device).long(), descending=True)
# return X[idx], lengths, idx
lengths = torch.tensor(lengths, device=self.device).long()
return X, lengths
def _encode(self, S_list):
indexed = [self.model.tokenizer.encode(
"<SOS>" + s + "<EOS>").ids for s in S_list]
lengths = [len(i) for i in indexed]
X, X_lens = self._prepare_batch(indexed, lengths)
return X, X_lens
def predict(self, S_batch, target_batch=None):
if self.training:
target_batch, target_length = self._encode(target_batch)
out = self.model.decode_training(
S_batch, target_batch, target_length)
return out, target_batch
else:
return self.model.decode(S_batch, beam_width=15)
def prediction_to_text(self, predictions):
predictions = [self.model.tokenizer.decode(
p, skip_special_tokens=True) for p in predictions]
return predictions
```
#### File: emb2emb/emb2emb/utils.py
```python
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def word_index_mapping(vocab):
word2index = {}
index2word = {}
for index, word in enumerate(vocab):
word2index[word] = index + 1 # plus 1, since the 0th index is padding
index2word[index + 1] = word
return word2index, index2word
``` |
{
"source": "Jirka-Mayer/BachelorThesis",
"score": 4
} |
#### File: BachelorThesis/app/Dataset.py
```python
import numpy as np
from typing import Optional, List
class Dataset:
"""
Abstract class representing a dataset
Handles the data feeding logic and exposes abstract methods that are used
as the data source.
"""
def __init__(self):
# permutation used for data retrieval (when training)
self.permutation: Optional[np.ndarray] = None
###########################
# Internal data interface #
###########################
@property
def size(self) -> int:
raise NotImplementedError("Override me")
def get_annotation(self, index: int) -> str:
raise NotImplementedError("Override me")
def get_image(self, index: int) -> np.ndarray:
raise NotImplementedError("Override me")
###########################
# External data interface #
###########################
def prepare_epoch(self):
"""Call this before you start training an epoch"""
self.permutation = np.random.permutation(self.size)
def has_batch(self):
"""Returns true if there is at least one more batch to be returned"""
if self.permutation is None:
return False
elif len(self.permutation) == 0:
return False
return True
def next_batch(self, batch_size=1):
"""Returns the next batch for training"""
# take batch of indices
take = min(batch_size, len(self.permutation))
indices = self.permutation[0:take]
self.permutation = self.permutation[take:]
# resolve indices to data
picked_images: List[np.ndarray] = []
picked_annotations: List[str] = []
for i in indices:
picked_images.append(self.get_image(i))
picked_annotations.append(self.get_annotation(i))
return picked_images, picked_annotations
def count_batches(self, batch_size):
"""Returns the number of batches, with respect to a given batch size"""
return self.size // batch_size
```
#### File: BachelorThesis/app/Network.py
```python
import tensorflow as tf
import os
import cv2
import numpy as np
import datetime
import shutil
from typing import List, Optional
from app.sparse_tensor_from_sequences import sparse_tensor_from_sequences
from app.vocabulary import VOCABULARY
from app.Dataset import Dataset
class Network:
"""
In goes a grayscale image normalized to 0.0 - 1.0
and out goes a sequence of classes (int[]).
Number of classes is specified by a constructor parameter.
Image height is a constant determined by the architecture.
"""
IMAGE_HEIGHT = 64 # fixed by the CNN block architecture
IMAGE_PADDING_COLOR = 0.0 # color to put after image end in a batch tensor
NETWORK_SCOPE = "network"
def __init__(
self,
name: str = None,
vocabulary: Optional[List[str]] = None,
continual_saving: bool = False,
create_logdir: bool = False,
threads: int = 1,
):
# name of the model (for continual saving)
self.name: str = name
if self.name is None:
raise Exception("Network name has to be specified")
# vocabulary used for output encoding
self.vocabulary: List[str] = VOCABULARY if vocabulary is None else vocabulary
# number of output classes
self.num_classes: int = len(self.vocabulary)
# does the network save itself on improvement during dev evaluation?
self.continual_saving: bool = continual_saving
# whether summaries are logged or not
self._has_summaries: bool = create_logdir
# Create an empty graph and a session
self.graph = tf.Graph()
self.session = tf.Session(
graph=self.graph,
config=tf.ConfigProto(
inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads
)
)
# List fields that will be initialized during network construction
self.images = None
self.image_widths = None
self.labels = None
self.is_training = None
self.learning_rate = None
self.training = None
self.reset_metrics = None
self.saver = None
# Construct the network
logdir = None
if create_logdir:
logdir = Network.create_logdir(self.name)
self.construct(logdir=logdir)
################################
# Network structure definition #
################################
def construct(self, logdir=None):
"""
Constructs the computation TF graf.
Needs to be called before anything is done with the network
(right after the constructor).
"""
with self.session.graph.as_default():
# === input part ===
# dataset data input
self.images = tf.placeholder(
tf.float32,
[None, Network.IMAGE_HEIGHT, None],
name="images"
)
self.image_widths = tf.placeholder(
tf.int32,
[None],
name="widths"
)
self.labels = tf.sparse_placeholder(
tf.int32,
name="labels"
)
# metadata input
self.is_training = tf.placeholder(
tf.bool, [], name="is_training"
)
self.learning_rate = tf.placeholder(
tf.float32, [], name="learning_rate"
)
# === trainable part ===
with tf.variable_scope(Network.NETWORK_SCOPE):
# CNN
cnn_out_4d, widths = self._construct_cnn(
self.images,
self.image_widths
)
# RNN
logits = self._construct_rnn(cnn_out_4d)
# CTC
losses = self._construct_ctc(
logits,
self.labels,
widths
)
# === training logic part ===
self.training = self._construct_training(
self.learning_rate,
self.loss
)
# === summaries, metrics and others part ===
self._construct_metrics(losses, logdir)
self.reset_metrics = tf.variables_initializer(
tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
)
# Initialize variables
self.session.run(tf.global_variables_initializer())
# Saver
self.saver = tf.train.Saver(
var_list=tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
#scope=Network.NETWORK_SCOPE # nope, save it with global_step
)
)
def _construct_cnn(self, cnn_in_3d, widths):
"""Creates CNN layers and returns output of these layers"""
cnn_in_4d = tf.expand_dims(input=cnn_in_3d, axis=3)
# list of parameters for the layers
kernel_vals = [5, 5, 5, 3, 3, 3]
feature_vals = [1, 16, 32, 64, 128, 128, 256]
stride_vals = pool_vals = [(2, 2), (2, 2), (2, 1), (2, 1), (2, 1), (2, 1)]
num_layers = len(stride_vals)
# create layers
pool = cnn_in_4d # input to the first CNN layer
for i in range(num_layers):
kernel = tf.Variable(
tf.truncated_normal(
[
kernel_vals[i],
kernel_vals[i],
feature_vals[i],
feature_vals[i + 1]
],
stddev=0.1
)
)
conv = tf.nn.conv2d(
pool,
kernel,
padding="SAME",
strides=(1, 1, 1, 1)
)
# TODO: possible batch normalization here
relu = tf.nn.relu(conv)
pool = tf.nn.max_pool(
relu,
(1, pool_vals[i][0], pool_vals[i][1], 1),
(1, stride_vals[i][0], stride_vals[i][1], 1),
"VALID"
)
# update widths of the images
# (for RNN layers and CTC to know how wide is meaningful data)
if pool_vals[i][1] == 2:
widths = tf.floor_div(widths, tf.fill(tf.shape(widths), 2))
return pool, widths
def _construct_rnn(self, rnn_in_4d):
"""Creates RNN layers and returns output of these layers"""
rnn_in_3d = tf.squeeze(rnn_in_4d, axis=[1])
self.dropout = tf.placeholder(dtype=tf.float32, name="dropout")
# basic cells which are used to build RNN
num_hidden = 256
num_layers = 1
cells = [
tf.nn.rnn_cell.DropoutWrapper(
tf.contrib.rnn.LSTMCell(
num_units=num_hidden,
state_is_tuple=True
),
input_keep_prob=1 - self.dropout
)
for _ in range(num_layers)
]
# stack basic cells
stacked = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
# bidirectional RNN
# BxTxF -> BxTx2H
((fw, bw), _) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=stacked,
cell_bw=stacked,
inputs=rnn_in_3d,
dtype=rnn_in_3d.dtype
)
fully_num_hidden = 256
fully_layers = 0 # no fully connected layers after the RNN block
# BxTxH + BxTxH -> BxTx2H
rnn_outputs = tf.concat([fw, bw], 2)
fully_hidden = rnn_outputs
for _ in range(fully_layers):
fully_hidden = tf.contrib.layers.fully_connected(
fully_hidden,
fully_num_hidden,
activation_fn=None,
)
# reshape to output classes with a single fully connected layer
return tf.contrib.layers.fully_connected(
fully_hidden,
self.num_classes + 1,
activation_fn=None,
)
def _construct_ctc(self, logits, labels, logit_widths):
"""Creates the CTC loss and returns individual losses and their mean"""
# time major
logits = tf.transpose(logits, [1, 0, 2])
# WARNING:
# my version of tensorflow (1.12.0) uses "num_classes - 1" as the blank
# index however the new tensorflow uses "0"
# if tf.__version__ not in ["1.12.0", "1.5.0"]: # these have been tested to work
# raise Exception("Make sure you know, how your blank is encoded!")
# loss
losses = tf.nn.ctc_loss(
labels,
logits,
logit_widths
)
self.loss = tf.reduce_mean(losses)
# beam predictions
top_beam_predictions, _ = tf.nn.ctc_beam_search_decoder(
logits,
logit_widths,
merge_repeated=False
)
self.predictions = top_beam_predictions[0]
# greedy predictions
top_greedy_predictions, _ = tf.nn.ctc_greedy_decoder(
logits,
logit_widths
)
self.greedy_predictions = top_greedy_predictions[0]
# edit distance
self.edit_distance = tf.reduce_mean(
tf.edit_distance(
self.predictions,
tf.cast(labels, tf.int64)
)
)
self.greedy_edit_distance = tf.reduce_mean(
tf.edit_distance(
self.greedy_predictions,
tf.cast(labels, tf.int64)
)
)
return losses
def _construct_training(self, learning_rate, loss):
"""Creates an optimizer"""
self.global_step = tf.train.create_global_step()
return tf.train.AdamOptimizer().minimize(
loss, global_step=self.global_step, name="training"
)
# return tf.train.RMSPropOptimizer(learning_rate).minimize(
# loss,
# global_step=self.global_step,
# name="training"
# )
def _construct_metrics(self, losses, logdir):
"""Creates summaries"""
self.current_edit_distance, self.update_edit_distance = tf.metrics.mean(self.edit_distance)
self.current_greedy_edit_distance, self.update_greedy_edit_distance = tf.metrics.mean(self.greedy_edit_distance)
self.current_loss, self.update_loss = tf.metrics.mean(losses)
# the following code handles logging
# so continue only if we have a logdir specified
if logdir is None:
return
summary_writer = tf.contrib.summary.create_file_writer(logdir, flush_millis=10 * 1000)
self.summaries = {}
with summary_writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(10):
self.summaries["train"] = [
tf.contrib.summary.scalar("train/loss", self.update_loss),
tf.contrib.summary.scalar("train/edit_distance", self.update_greedy_edit_distance)
]
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
for dataset in ["dev", "test"]:
self.summaries[dataset] = [
tf.contrib.summary.scalar(dataset + "/loss", self.current_loss),
tf.contrib.summary.scalar(dataset + "/edit_distance", self.current_edit_distance)
]
with summary_writer.as_default():
tf.contrib.summary.initialize(
session=self.session,
graph=self.session.graph
)
###################
# Utility methods #
###################
@staticmethod
def create_logdir(model_name: str):
if not os.path.exists("tf-logs"):
os.mkdir("tf-logs")
return "tf-logs/{}-{}".format(
model_name,
datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
)
@staticmethod
def normalize_image(img: np.ndarray):
# fix up image format
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if img.max(initial=0) > 1.0:
img = img / 255
# normalize height
target = Network.IMAGE_HEIGHT
ratio = target / img.shape[0]
w = int(img.shape[1] * ratio)
return cv2.resize(img, (w, target), interpolation=cv2.INTER_AREA)
def encode_model_output(self, annotation: str) -> List[int]:
return [self.vocabulary.index(s) for s in annotation.split()]
def decode_model_output(self, model_output: List[int]) -> str:
return " ".join([self.vocabulary[i] for i in model_output])
def get_next_batch_from(self, dataset: Dataset, batch_size: int):
images, annotations = dataset.next_batch(batch_size)
take = len(images)
assert len(images) == len(annotations)
# pull data from dataset and normalize
norm_images = [Network.normalize_image(img) for img in images]
labels = [self.encode_model_output(a) for a in annotations]
# convert the data into tensors
max_image_width = max([i.shape[1] for i in norm_images])
image_tensor = np.empty(
shape=(take, Network.IMAGE_HEIGHT, max_image_width),
dtype=np.float32
)
image_widths = np.empty(shape=(take,), dtype=np.int32)
for i in range(take):
w = norm_images[i].shape[1]
image_tensor[i, :, 0:w] = norm_images[i]
image_tensor[i, :, w:] = Network.IMAGE_PADDING_COLOR
image_widths[i] = w
return (
image_tensor,
image_widths,
labels
)
#########################
# Training & prediction #
#########################
def train(self, train_dataset, dev_dataset, epochs, batch_size):
"""
Train the model for given number of epochs over a given training dataset
and evaluate after each epoch on a given testing dataset.
"""
for epoch in range(epochs):
self.train_epoch(
train_dataset,
dev_dataset,
epoch + 1,
epochs,
batch_size
)
def train_epoch(self, train_dataset, dev_dataset, epoch, epochs, batch_size):
batches = train_dataset.count_batches(batch_size)
batch = 1
train_dataset.prepare_epoch()
while train_dataset.has_batch():
images, widths, labels = self.get_next_batch_from(train_dataset, batch_size)
rate = self._calculate_learning_rate(self.get_global_step())
# vars to evaluate
evaluate = [self.loss, self.greedy_edit_distance, self.training]
if self._has_summaries:
evaluate.append(self.summaries["train"])
# train
self.session.run(self.reset_metrics)
evaluated = self.session.run(evaluate, {
self.images: images,
self.image_widths: widths,
self.labels: sparse_tensor_from_sequences(labels),
self.is_training: True,
self.learning_rate: rate,
self.dropout: 0.5
})
loss = evaluated[0]
greedy_edit_distance = evaluated[1]
print("Epoch: %d/%s Batch: %d/%d Loss: %f ED: %f" % (
epoch, str(epochs), batch, batches, loss, greedy_edit_distance
))
batch += 1
# and evaluate the performance after the epoch
return self._evaluate(dev_dataset, batch_size)
def _evaluate(self, dataset, batch_size, dataset_name="dev"):
batches = dataset.count_batches(batch_size)
batch = 1
self.session.run(self.reset_metrics)
dataset.prepare_epoch()
right_items = 0
all_items = 0
wrong_examples = []
while dataset.has_batch():
images, widths, labels = self.get_next_batch_from(dataset, batch_size)
predictions, _, _ = self.session.run([
self.predictions,
self.update_edit_distance,
self.update_loss
], {
self.images: images,
self.image_widths: widths,
self.labels: sparse_tensor_from_sequences(labels),
self.is_training: False,
self.dropout: 0.0
})
all_items += batch_size
offset = 0
for i in range(len(labels)):
indices = predictions.indices[predictions.indices[:, 0] == i, 1]
l = 0 if len(indices) == 0 else indices.max() + 1
label: List[int] = labels[i]
pred: List[int] = list(predictions.values[offset:offset+l])
ok = "[ok]" if label == pred else "[err]"
if label == pred:
right_items += 1
print(
ok,
self.decode_model_output(label),
" ==> ",
self.decode_model_output(pred)
)
offset += l
if label != pred:
wrong_examples.append((label, pred))
print("Batch: %d / %d" % (batch, batches))
batch += 1
word_accuracy = (right_items / all_items) * 100
edit_distance, loss = self.session.run([
self.current_edit_distance,
self.current_loss
])
print("Edit distance: %f Word accuracy: %f%% Loss: %f" % (edit_distance, word_accuracy, loss))
if word_accuracy >= 10: # do not show completely terrible results
print("Some wrong examples:")
for i in range(min(10, len(wrong_examples))):
print(
self.decode_model_output(wrong_examples[i][0]),
" ==> ",
self.decode_model_output(wrong_examples[i][1])
)
# save validation loss and edit distance to summaries
if self._has_summaries:
self.session.run(self.summaries[dataset_name])
# perform continual saving
if self.continual_saving:
self.save_if_better(edit_distance)
# return loss and edit distance
return loss, edit_distance
def predict(self, img):
"""Predicts symbols in a single image"""
img = Network.normalize_image(img)
width = img.shape[1]
predictions = self.session.run(self.predictions, {
self.images: [img],
self.image_widths: [width],
self.is_training: False,
self.dropout: 0.0
})
annotation: str = self.decode_model_output(predictions.values)
return annotation
def get_global_step(self):
"""Returns value of the global step"""
return self.session.run(self.global_step)
def _calculate_learning_rate(self, batches_trained):
# return 0.01
if batches_trained > 10000:
return 0.0001
elif batches_trained > 10:
return 0.001
else:
return 0.01
#####################
# Model persistence #
#####################
"""
Models are persisted in the following files:
- trained-models/{model-name}/model.index
- trained-models/{model-name}/model.meta
- trained-models/{model-name}/model.data-...
- trained-models/{model-name}/checkpoint
- trained-models/{model-name}/model.edit-distance
- trained-models/{model-name}/model.vocabulary
"""
@staticmethod
def load(name: str, **kwargs):
"""Loads the model of a given name"""
if not Network.exists(name):
raise Exception("Model %s does not exist" % (name,))
vocabulary = Network._load_vocabulary(name)
network = Network(
name=name,
vocabulary=vocabulary,
**kwargs
)
network.saver.restore(
network.session,
network._get_model_path(network.name)
)
return network
def save(self, edit_distance=None):
"""Saves the model and also saves edit distance if provided"""
dirname = self._get_model_directory(self.name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.saver.save(
self.session,
self._get_model_path(self.name)
)
if edit_distance is not None:
self._save_edit_distance(self.name, edit_distance)
self._save_vocabulary(self.name)
def _save_edit_distance(self, model_name: str, edit_distance: float):
"""Saves the edit distance"""
dirname = self._get_model_directory(model_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
with open(self._get_model_path(model_name) + ".edit_distance", "w") as file:
file.write(str(edit_distance))
def _save_vocabulary(self, model_name: str):
dirname = self._get_model_directory(model_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
with open(self._get_model_path(model_name) + ".vocabulary", "w") as file:
file.write(str("\n".join(self.vocabulary)))
def save_if_better(self, edit_distance):
"""Saves the model only if it has smaller edit distance, than the saved"""
if self._get_saved_edit_distance(self.name) > edit_distance:
print("Saving...")
self.save(edit_distance)
def _get_saved_edit_distance(self, model_name: str) -> float:
"""Returns edit distance of the saved model"""
if not self.exists(model_name):
return float("inf")
with open(self._get_model_path(model_name) + ".edit_distance", "r") as file:
ed = float(file.read())
return ed
@staticmethod
def delete_model(model_name: str):
if Network.exists(model_name):
shutil.rmtree(Network._get_model_directory(model_name))
@staticmethod
def _load_vocabulary(model_name: str) -> List[str]:
with open(Network._get_model_path(model_name) + ".vocabulary", "r") as file:
return [l.strip() for l in file.readlines()]
@staticmethod
def exists(model_name: str) -> bool:
"""Returns true if a given model exists"""
return os.path.isdir(Network._get_model_directory(model_name))
@staticmethod
def _get_model_directory(model_name: str) -> str:
"""Returns directory path of a given model name"""
return os.path.dirname(os.path.realpath(__file__)) + \
"/../trained-models/" + model_name
@staticmethod
def _get_model_path(model_name: str) -> str:
"""Returns path for tensorflow saver to save the model to"""
return Network._get_model_directory(model_name) + "/model"
```
#### File: BachelorThesis/app/ParallelFeedingDataset.py
```python
import numpy as np
from app.Dataset import Dataset
from app.AnnotationsDataset import AnnotationsDataset
from threading import Thread
from queue import Queue
PRINT_DEBUG_INFO = False
class ParallelFeedingDataset(Dataset):
def __init__(self, source: Dataset, queue_size=100):
super().__init__()
# the source dataset that contains the actual data
self.source = source
# swap out the get_image method
self.original_source_get_image = self.source.get_image
self.source.get_image = self.get_image_source_replacement
# copy of self.permutation
self._indices_to_prepare: np.ndarray = None
# preparing thread
self._worker_thread: Thread = None
# queue of prepared images
self._queue = Queue(
maxsize=queue_size # buffer only so many images
# to keep the memory overhead constant
)
######################################
# Redirect API to the source dataset #
######################################
def check_dataset_visually(self, example_count=10):
if isinstance(self.source, AnnotationsDataset):
self.source.get_image = self.original_source_get_image
self.source.check_dataset_visually(example_count)
self.source.get_image = self.get_image_source_replacement
else:
raise Exception("Source dataset does not allow inspection")
@property
def size(self) -> int:
return self.source.size
def get_annotation(self, index: int) -> str:
return self.source.get_annotation(index)
def has_batch(self):
return self.source.has_batch()
def count_batches(self, batch_size):
return self.source.count_batches(batch_size)
######################################################
# Hook into important methods and not quite redirect #
######################################################
def prepare_epoch(self):
self.source.prepare_epoch()
self._indices_to_prepare = self.source.permutation.copy()
self._worker_thread = Thread(
target=self._worker_thread_loop,
daemon=True
)
self._worker_thread.start()
def next_batch(self, batch_size=1):
batch = self.source.next_batch(batch_size)
if not self.source.has_batch():
self._last_batch_prepared()
return batch
######################################################
# Replacement for the get_image method of the source #
######################################################
def get_image_source_replacement(self, index: int) -> np.ndarray:
if self._worker_thread is None:
raise Exception("prepare_epoch() hasn't been called")
given_img, given_index = self._queue.get(
block=True # wait for an item
)
assert given_index == index
return given_img
#######################
# Worker thread logic #
#######################
def _worker_thread_loop(self):
if PRINT_DEBUG_INFO:
print("[worker] Started.")
n = len(self._indices_to_prepare)
for i in range(n):
index = self._indices_to_prepare[i]
if PRINT_DEBUG_INFO:
print("[worker] Preparing item " + str(i))
img = self.original_source_get_image(index)
self._queue.put(
(img, index),
block=True # wait for a free slot (when queue full)
)
if PRINT_DEBUG_INFO:
print("[worker] Done.")
def _last_batch_prepared(self):
if PRINT_DEBUG_INFO:
print("Joining on the worker...")
self._worker_thread.join()
if PRINT_DEBUG_INFO:
print("Worker joined.")
```
#### File: Jirka-Mayer/BachelorThesis/confusion_matrix.py
```python
import json
from app.muscima_annotations import MUSCIMA_RAW_ANNOTATIONS
from app.editops_levenshtein import editops_levenshtein_sequenced
from app.vocabulary import repair_annotation
from app.vocabulary import trim_non_repeat_barlines
from app.vocabulary import to_generic
from app.vocabulary import get_pitch
from typing import List
GROUP_BY_KIND_WHEN_PRINTING = True
class Statistics:
JOINER = " → "
EMPTY_SEQUENCE = "∅"
def __init__(self, title, transformer=None):
# what to print on the report
self.title = title
# transforms token sequences before aggregation
self.transformer = transformer or (lambda x: x)
# internal statistics "seq": count
self.stats = {}
def add_replacements(self, replacements):
"""Adds replacements right from the modified Levenshtein func"""
for r in replacements:
self.add_replacement(r[0], r[1])
def add_replacement(self, pred, gold):
pred = self.transformer(pred)
gold = self.transformer(gold)
if len(pred) == 0:
pred_str = Statistics.EMPTY_SEQUENCE
else:
pred_str = " ".join(pred)
if len(gold) == 0:
gold_str = Statistics.EMPTY_SEQUENCE
else:
gold_str = " ".join(gold)
key = pred_str + Statistics.JOINER + gold_str
if key not in self.stats:
self.stats[key] = 0
self.stats[key] += 1
def print_report(self, grouped=False):
count = len(self.stats)
def key_by_count(i):
return i[1]
def compare_grouped(i, j):
ia, ib = i[0].split(Statistics.JOINER)
ja, jb = j[0].split(Statistics.JOINER)
# pure insertions first
if ia == Statistics.EMPTY_SEQUENCE and ja != Statistics.EMPTY_SEQUENCE: return 1
if ia != Statistics.EMPTY_SEQUENCE and ja == Statistics.EMPTY_SEQUENCE: return -1
# pure deletions second
if ib == Statistics.EMPTY_SEQUENCE and jb != Statistics.EMPTY_SEQUENCE: return 1
if ib != Statistics.EMPTY_SEQUENCE and jb == Statistics.EMPTY_SEQUENCE: return -1
# token count (inversed)
if len(i[0].split()) < len(j[0].split()): return 1
if len(i[0].split()) > len(j[0].split()): return -1
# occurence count
if i[1] < j[1]: return -1
if i[1] > j[1]: return 1
# string lexi
if i[0] < j[0]: return -1
if i[0] > j[0]: return 1
return 0
import functools
key = functools.cmp_to_key(compare_grouped) if grouped else key_by_count
print()
print()
print("Replacement statistics: " + self.title)
print("-------------------------------------------------")
print("<count>: <replacement>")
for trans, count in sorted(self.stats.items(), key=key, reverse=True):
print(str(count).rjust(7) + ": " + trans)
def main():
print("We are investigating operations required to turn PREDICTION to GOLD")
print("(it's the inverse of the mistakes the model made)")
with open("experiment-predictions.json") as f:
prediction_sheet = json.load(f)
# TODO: transformer needs to run before levenshtein, not after
stats = [
Statistics("Basic stats"),
Statistics("Generic tokens", lambda seq: [to_generic(t) for t in seq]),
Statistics("Pitch only", lambda seq: [str(get_pitch(t) or "no") for t in seq]),
]
for writer, parts in MUSCIMA_RAW_ANNOTATIONS.items():
for part, staves in parts.items():
for i in range(len(staves)):
gold = MUSCIMA_RAW_ANNOTATIONS[writer][part][i]
prediction = prediction_sheet[str(writer)][str(part)][i]
gold = trim_non_repeat_barlines(repair_annotation(gold)[0])
prediction = trim_non_repeat_barlines(repair_annotation(prediction)[0])
for s in stats:
replacements = editops_levenshtein_sequenced(
s.transformer(gold.split()),
s.transformer(prediction.split())
)
s.add_replacements(replacements)
for s in stats:
s.print_report(GROUP_BY_KIND_WHEN_PRINTING)
main()
```
#### File: Jirka-Mayer/BachelorThesis/experiment_04.py
```python
import argparse
import sys
USAGE_TEXT = '''python experiment_04.py <command> [<args>]
The available commands are:
inspect Inspects datasets that will be used for training and validation
train Train new model for the experiment
evaluate Evaluate the trained model against annotated muscima parts
evaluate_on_primus Evaluate trained model against some primus incipits
evaluate_on_real Evaluate trained model against real scanned sheet music
Experiment 04:
- Train on 63K Primus incipits and 63K generated incipits
- Validate on 1K Primus incipits
- Generates images with staves above and below
- Use symbols from all writers except for the evaluating ones
- Minimal staff width of 1200px
'''
class Experiment04(object):
def __init__(self):
parser = argparse.ArgumentParser(usage=USAGE_TEXT)
parser.add_argument('command', help='Command to run')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def _prepare_datasets(self):
from mashcima import Mashcima
mc = Mashcima(use_cache=True, skip_writers=[13, 17, 20, 34, 41, 49])
from experiment_utils import prepare_annotations
training_annotations = prepare_annotations(
primus_skip=0, primus_take=63127, generated_take=63127
)
validation_annotations = prepare_annotations(
primus_skip=63127, primus_take=1000, generated_take=0
)
from experiment_utils import prepare_dataset
training_dataset = prepare_dataset(
mc, training_annotations, min_staff_with=1200, single_staff=False
)
validation_dataset = prepare_dataset(
mc, validation_annotations, min_staff_with=1200, single_staff=False
)
return training_dataset, validation_dataset
def inspect(self):
training_dataset, validation_dataset = self._prepare_datasets()
print("\n\nInspecting TRAINING dataset: (20 items)")
training_dataset.check_dataset_visually(example_count=20)
print("\n\nInspecting VALIDATION dataset: (20 items)")
validation_dataset.check_dataset_visually(example_count=20)
def train(self):
parser = argparse.ArgumentParser()
parser.add_argument('--model', default="experiment_04")
parser.add_argument('--epochs', default=10, type=int)
parser.add_argument('--batch_size', default=10, type=int)
parser.add_argument('--threads', default=4, type=int)
parser.add_argument('--load_model', action="store_true", help="continue training a model")
parser.add_argument('--seed_offset', default=0, type=int)
args = parser.parse_args(sys.argv[2:])
# set seed
import tensorflow as tf
import numpy as np
import random
tf.set_random_seed(20200524 + args.seed_offset)
np.random.seed(20200524 + args.seed_offset)
random.seed(20200524 + args.seed_offset)
training_dataset, validation_dataset = self._prepare_datasets()
# train
from app.Network import Network
# continue training
if args.load_model:
# load old one
print("Loading old model...")
network = Network.load(args.model)
else:
# delete old one
if Network.exists(args.model):
if input("Type 'yes' to delete the old, trained model.") != "yes":
exit("No model will be overwritten")
print("Deleting old model...")
Network.delete_model(args.model)
# create new one
print("Creating new model...")
network = Network(
name=args.model,
continual_saving=True,
create_logdir=True,
threads=args.threads
)
network.train(
training_dataset,
validation_dataset,
epochs=args.epochs,
batch_size=args.batch_size
)
def evaluate(self):
parser = argparse.ArgumentParser()
parser.add_argument('--model', default="experiment_04")
parser.add_argument('--writers', type=str, help="writers to evaluate on e.g. '1,2,3'")
parser.add_argument('--pages', type=str, help="pages to evaluate on e.g. '1,2,3'")
args = parser.parse_args(sys.argv[2:])
from experiment_evaluation import evaluate_model
evaluate_model(args.model, args.writers, args.pages)
def evaluate_on_real(self):
parser = argparse.ArgumentParser()
parser.add_argument('--model', default="experiment_04")
args = parser.parse_args(sys.argv[2:])
from experiment_evaluation import evaluate_on_real
evaluate_on_real(args.model)
def evaluate_on_primus(self):
parser = argparse.ArgumentParser()
parser.add_argument('--model', default="experiment_04")
args = parser.parse_args(sys.argv[2:])
from experiment_evaluation import evaluate_on_primus
evaluate_on_primus(args.model)
if __name__ == '__main__':
Experiment04()
```
#### File: Jirka-Mayer/BachelorThesis/experiment_utils.py
```python
from typing import List
from app.AnnotationsDataset import AnnotationsDataset
from app.ParallelFeedingDataset import ParallelFeedingDataset
from mashcima import Mashcima
from mashcima.annotation_to_image import multi_staff_annotation_to_image
from mashcima.primus_adapter import load_primus_as_mashcima_annotations
from app.generate_random_annotation import generate_random_annotation
import numpy as np
PRIMUS = load_primus_as_mashcima_annotations()
def prepare_annotations(
primus_skip=0,
primus_take=0,
generated_take=0
) -> List[str]:
"""Prepares annotations as a mix of primus and generated items"""
primus_annotations = [
item["mashcima"]
for item in PRIMUS[primus_skip:primus_skip+primus_take]
]
generated_annotations = [
generate_random_annotation()
for _ in range(generated_take)
]
return primus_annotations + generated_annotations
def _complex_image_generator(
mc: Mashcima,
annotation_index: int,
annotations: List[str],
single_staff: bool,
min_width: int
) -> np.ndarray:
n = len(annotations)
above_index = (11 + annotation_index * 13) % n
below_index = (5 + annotation_index * 17) % n
if above_index % 4 in [1, 3]:
above_index = None
if below_index % 4 in [2, 3]:
below_index = None
if single_staff:
above_index = None
below_index = None
return multi_staff_annotation_to_image(
mc,
main_annotation=annotations[annotation_index],
above_annotation=None if above_index is None else annotations[above_index],
below_annotation=None if below_index is None else annotations[below_index],
transform_image=True,
min_width=min_width
)
def prepare_dataset(
mc: Mashcima,
annotations: List[str],
min_staff_with: int,
single_staff=False
):
"""Prepares image dataset from a list of annotations"""
def _image_generator(annotation_index: int, _: List[str]) -> np.ndarray:
return _complex_image_generator(
mc, annotation_index, annotations, single_staff, min_staff_with
)
dataset = AnnotationsDataset(annotations, _image_generator)
dataset = ParallelFeedingDataset(dataset) # make batch preparation parallel
return dataset
```
#### File: Jirka-Mayer/BachelorThesis/extract_symbols.py
```python
from mashcima import Mashcima
from mashcima.Sprite import Sprite
from mashcima.SpriteGroup import SpriteGroup
from typing import List, Optional
import cv2
import sys
import os
import shutil
import numpy as np
def store_sprite(directory: str, symbol_type: str, sprite: Sprite, index: int) -> str:
"""Stores a sprite and returns the corresponding csv line"""
file_name = "{}.png".format(str(index).rjust(5, "0"))
path = os.path.join(directory, symbol_type, file_name)
cv2.imwrite(path, (1.0 - sprite.mask) * 255)
return "{},{},{}".format(
str(index),
str(-sprite.x), # invert since we're moving from the global frame
str(-sprite.y) # to the sprite's frame of reference
)
def store_sprites(directory: str, symbol_type: str, sprites: List[Sprite]):
print("Writing {}...".format(symbol_type))
os.mkdir(os.path.join(directory, symbol_type))
with open(os.path.join(directory, symbol_type + ".csv"), "w") as f:
f.write("index,origin_x,origin_y\n")
for i in range(len(sprites)):
line = store_sprite(directory, symbol_type, sprites[i], i)
f.write(line + "\n")
def convert_sprite_group_to_sprite(sprite_group: SpriteGroup) -> Sprite:
# single sprite groups are easy to handle
if len(sprite_group.sprites) == 1:
return list(sprite_group.sprites.values())[0]
# multi-sprite groups need to be rendered
sprite_group.recalculate_bounding_box()
sprite_group.position_x = -sprite_group.left
sprite_group.position_y = -sprite_group.top
mask = np.zeros(
shape=(sprite_group.height, sprite_group.width),
dtype=np.float32
)
sprite_group.render(mask)
return Sprite(sprite_group.left, sprite_group.top, mask)
def store_sprite_groups(directory: str, symbol_type: str, sprite_groups: List[SpriteGroup]):
sprites = [convert_sprite_group_to_sprite(g) for g in sprite_groups]
store_sprites(directory, symbol_type, sprites)
def print_usage_and_exit(error: Optional[str] = None):
if error is not None:
print("Error: " + error)
print()
print("Usage:")
print("\t" + "extract_symbols.py [symbols-directory]")
print()
print("symbols-directory\tThe directory where to place the extracted symbols")
exit()
def main():
if len(sys.argv) != 2:
print_usage_and_exit()
directory = sys.argv[1]
if not os.path.isdir(directory):
print_usage_and_exit("Given path is not a directory")
# clear the directory
shutil.rmtree(directory)
os.mkdir(directory)
# skip all the writers present in the evaluation dataset
mc = Mashcima(use_cache=True, skip_writers=[13, 17, 20, 34, 41, 49])
store_sprite_groups(directory, "whole_note", mc.WHOLE_NOTES)
store_sprite_groups(directory, "half_note", mc.HALF_NOTES)
store_sprite_groups(directory, "quarter_note", mc.QUARTER_NOTES)
store_sprite_groups(directory, "eighth_note", mc.EIGHTH_NOTES)
store_sprite_groups(directory, "sixteenth_note", mc.SIXTEENTH_NOTES)
store_sprite_groups(directory, "longa_rest", mc.LONGA_RESTS)
store_sprite_groups(directory, "breve_rest", mc.BREVE_RESTS)
store_sprite_groups(directory, "whole_rest", mc.WHOLE_RESTS)
store_sprite_groups(directory, "half_rest", mc.HALF_RESTS)
store_sprite_groups(directory, "quarter_rest", mc.QUARTER_RESTS)
store_sprite_groups(directory, "eighth_rest", mc.EIGHTH_RESTS)
store_sprite_groups(directory, "sixteenth_rest", mc.SIXTEENTH_RESTS)
store_sprites(directory, "sharp", mc.SHARPS)
store_sprites(directory, "flat", mc.FLATS)
store_sprites(directory, "natural", mc.NATURALS)
store_sprites(directory, "dot", mc.DOTS)
store_sprites(directory, "ledger_line", mc.LEDGER_LINES)
store_sprite_groups(directory, "bar_line", mc.BAR_LINES)
store_sprite_groups(directory, "tall_bar_line", mc.TALL_BAR_LINES)
store_sprite_groups(directory, "g_clef", mc.G_CLEFS)
store_sprite_groups(directory, "f_clef", mc.F_CLEFS)
store_sprite_groups(directory, "c_clef", mc.C_CLEFS)
for key in mc.TIME_MARKS:
store_sprite_groups(
directory,
"time_mark_" + key[len("time_"):],
mc.TIME_MARKS[key]
)
main()
```
#### File: Jirka-Mayer/BachelorThesis/inspect_mashcima_symbols.py
```python
from mashcima import Mashcima
from mashcima.Sprite import Sprite
from mashcima.SpriteGroup import SpriteGroup
from mashcima.debug import show_images
from typing import List
import numpy as np
# mc = Mashcima([
# "CVC-MUSCIMA_W-01_N-10_D-ideal.xml",
# "CVC-MUSCIMA_W-01_N-14_D-ideal.xml",
# "CVC-MUSCIMA_W-01_N-19_D-ideal.xml",
#
# # "CVC-MUSCIMA_W-02_N-06_D-ideal.xml",
# # "CVC-MUSCIMA_W-02_N-13_D-ideal.xml",
# # "CVC-MUSCIMA_W-02_N-17_D-ideal.xml",
# ])
mc = Mashcima(use_cache=True)
def inspect(items: List):
batch: List[np.ndarray] = []
BATCH_SIZE = 50
for index, item in enumerate(items):
if isinstance(item, Sprite):
batch.append(item.inspect())
elif isinstance(item, SpriteGroup):
batch.append(item.inspect())
if len(batch) == BATCH_SIZE:
print("Showing indices:", index - BATCH_SIZE + 1, "-", index, "/", len(items))
show_images(batch, row_length=10)
batch = []
if len(batch) != 0:
print("Showing indices:", len(items) - len(batch), "-", len(items) - 1, "/", len(items))
show_images(batch, row_length=10)
###############
# INSPECTIONS #
###############
# DEFAULT SYMBOL SAVING:
# import cv2, os
# s = mc.BREVE_RESTS[0].sprite("rest")
# p = os.path.join(os.path.dirname(__file__), "mashcima/default_symbols/rest_breve")
# cv2.imwrite(p + ".png", s.mask * 255)
# with open(p + ".txt", "w") as f:
# f.write(str(-s.x) + " " + str(-s.y))
# inspect(mc.WHOLE_NOTES)
# inspect(mc.HALF_NOTES)
# inspect(mc.QUARTER_NOTES)
# inspect(mc.EIGHTH_NOTES)
# inspect(mc.SIXTEENTH_NOTES)
# inspect(mc.LONGA_RESTS)
# inspect(mc.BREVE_RESTS)
# inspect(mc.WHOLE_RESTS)
# inspect(mc.HALF_RESTS)
# inspect(mc.QUARTER_RESTS)
# inspect(mc.EIGHTH_RESTS)
# inspect(mc.SIXTEENTH_RESTS)
#
# inspect(mc.FLATS)
# inspect(mc.SHARPS)
# inspect(mc.NATURALS)
#
# inspect(mc.DOTS)
# inspect(mc.LEDGER_LINES)
# inspect(mc.BAR_LINES)
#
# inspect(mc.G_CLEFS)
# inspect(mc.F_CLEFS)
# inspect(mc.C_CLEFS)
#
# inspect(mc.TIME_MARKS["time_0"])
# inspect(mc.TIME_MARKS["time_1"])
# inspect(mc.TIME_MARKS["time_2"])
# inspect(mc.TIME_MARKS["time_3"])
# inspect(mc.TIME_MARKS["time_4"])
# inspect(mc.TIME_MARKS["time_5"])
# inspect(mc.TIME_MARKS["time_6"])
# inspect(mc.TIME_MARKS["time_7"])
# inspect(mc.TIME_MARKS["time_8"])
# inspect(mc.TIME_MARKS["time_9"])
# inspect(mc.TIME_MARKS["time_c"])
```
#### File: mashcima/canvas_items/KeySignature.py
```python
from mashcima import Mashcima
from mashcima.canvas_items.CanvasItem import CanvasItem
from mashcima.Sprite import Sprite
import random
import copy
from typing import Dict, List
class KeySignature(CanvasItem):
def __init__(self, types: List[str], pitches: List[int], **kwargs):
super().__init__(**kwargs)
assert len(types) == len(pitches)
for t in types:
assert t in ["#", "b", "N"]
self.types = types
self.pitches = pitches
self.item_sprites: List[Sprite] = []
def get_annotation_tokens(self):
tokens = []
for i in range(len(self.pitches)):
tokens.append(self.types[i] + str(self.pitches[i]))
return tokens
def select_sprites(self, mc: Mashcima):
for i, t in enumerate(self.types):
s = None
if t == "#":
s = copy.deepcopy(random.choice(mc.SHARPS))
if t == "b":
s = copy.deepcopy(random.choice(mc.FLATS))
if t == "N":
s = copy.deepcopy(random.choice(mc.NATURALS))
assert s is not None
self.sprites.add("item_" + str(i), s)
self.item_sprites.append(s)
super().select_sprites(mc)
def place_item(self, head: int, pitch_positions: Dict[int, int]) -> int:
self.sprites.position_x = head
self.sprites.position_y = 0
local_head = 0
for i, s in enumerate(self.item_sprites):
s.x += s.width // 2 + local_head
s.y += pitch_positions[self.pitches[i]]
local_head += s.width
if i < len(self.item_sprites) - 1:
local_head += random.randint(0, 5) # padding between items
self.sprites.recalculate_bounding_box()
return local_head
```
#### File: mashcima/canvas_items/StemNote.py
```python
from mashcima import Mashcima
from mashcima.canvas_items.Note import Note
from mashcima.debug import draw_cross
import numpy as np
import random
class StemNote(Note):
def __init__(self, pitch: int, **kwargs):
super().__init__(pitch, **kwargs)
# is the note flipped upside-down?
# decided in select_sprites(...)
self.flipped = False
@property
def stem_head_x(self):
return self.sprites.point("stem_head")[0]
@property
def stem_head_y(self):
return self.sprites.point("stem_head")[1]
def select_sprites(self, mc: Mashcima):
super().select_sprites(mc)
# decide whether to flip or not
self.flipped = self.pitch > 0
if self.pitch in self.canvas_options.randomize_stem_flips_for_pitches:
self.flipped = random.choice([True, False])
def place_sprites(self):
if self.flipped:
self.sprites = self.sprites.create_flipped_copy(
[
"notehead", "stem", "stem_head",
"flag_8", "flag_16", "flag_32"
]
)
super().place_sprites()
def render(self, img: np.ndarray):
super().render(img)
if self.DEBUG_RENDER:
draw_cross(
img,
self.sprites.position_x + self.stem_head_x,
self.sprites.position_y + self.stem_head_y,
5
)
```
#### File: BachelorThesis/mashcima/Slur.py
```python
import numpy as np
import cv2
import random
from mashcima.canvas_items.SlurableItem import SlurableItem
from mashcima.canvas_items.StemNote import StemNote
class Slur:
def __init__(self, start_item: SlurableItem, end_item: SlurableItem):
self.start_item = start_item
self.end_item = end_item
# True: /\ False: \/
self.flipped = False
# whether to use the simple tail-to-tail attachment
# or below-note to below-note
self.tail_to_tail = True
def _set_is_flipped(self):
# both ends have a stem
if isinstance(self.start_item, StemNote) and isinstance(self.end_item, StemNote):
# both not flipped
if not self.start_item.flipped and not self.end_item.flipped:
self.flipped = False
self.tail_to_tail = False
return
# both flipped
if self.start_item.flipped and self.end_item.flipped:
self.flipped = True
self.tail_to_tail = False
return
# otherwise randomize
self.flipped = random.choice([True, False])
return
# start has a stem
if isinstance(self.start_item, StemNote):
self.flipped = self.start_item.flipped
self.tail_to_tail = False
return
# end has a stem
if isinstance(self.end_item, StemNote):
self.flipped = self.end_item.flipped
self.tail_to_tail = False
return
# otherwise randomize
self.flipped = random.choice([True, False])
return
def render(self, img: np.ndarray):
slur_thickness = 3
# NOTE: the slur is rendered as a parabola going through 3 points
# (two attachments and one center point)
self._set_is_flipped()
start_attachment = self.start_item.get_slur_start_attachment_point(self)
end_attachment = self.end_item.get_slur_end_attachment_point(self)
width = end_attachment[0] - start_attachment[0]
# calculate center point
center_point = [
(start_attachment[0] + end_attachment[0]) // 2,
(start_attachment[1] + end_attachment[1]) // 2
]
center_point[1] += (-1 if self.flipped else 1) * min(int(width / 5), 20)
center_point = tuple(center_point)
# calculate coefficients a of: y = ax^2 +bx +c
A = np.array([
[start_attachment[0] ** 2, start_attachment[0], 1],
[center_point[0] ** 2, center_point[0], 1],
[end_attachment[0] ** 2, end_attachment[0], 1]
])
v = np.array([
[start_attachment[1]],
[center_point[1]],
[end_attachment[1]]
])
try:
abc = np.linalg.inv(A).dot(v)
except:
print("Slur didn't render - singular matrix")
return
f = lambda x: abc[0] * x**2 + abc[1] * x + abc[2]
for x in range(start_attachment[0], end_attachment[0]):
cv2.line(
img,
(x, f(x)),
(x + 1, f(x + 1)),
thickness=slur_thickness,
color=1
)
``` |
{
"source": "Jirka-Mayer/Mashcima",
"score": 2
} |
#### File: Mashcima/mashcima/annotation_to_image.py
```python
from mashcima.CanvasOptions import CanvasOptions
import numpy as np
from typing import List, Optional
from mashcima.vocabulary import get_pitch, to_generic
from mashcima.vocabulary import is_accidental
from mashcima.vocabulary import parse_annotation_into_token_groups
from mashcima.vocabulary import KeySignatureTokenGroup, TimeSignatureTokenGroup, TokenGroup
from mashcima.SymbolRepository import SymbolRepository
from mashcima.Canvas import Canvas
from mashcima.canvas_items.Barline import Barline
from mashcima.canvas_items.Clef import Clef
from mashcima.canvas_items.Rest import Rest
from mashcima.canvas_items.WholeNote import WholeNote
from mashcima.canvas_items.HalfNote import HalfNote
from mashcima.canvas_items.QuarterNote import QuarterNote
from mashcima.canvas_items.FlagNote import FlagNote
from mashcima.canvas_items.BeamedNote import BeamedNote
from mashcima.canvas_items.WholeTimeSignature import WholeTimeSignature
from mashcima.canvas_items.TimeSignature import TimeSignature
from mashcima.canvas_items.KeySignature import KeySignature
ITEM_CONSTRUCTORS = {
"|": Barline,
"clef.G": lambda **kwargs: Clef(clef="G", **kwargs),
"clef.F": lambda **kwargs: Clef(clef="F", **kwargs),
"clef.C": lambda **kwargs: Clef(clef="C", **kwargs),
"time.C": lambda **kwargs: WholeTimeSignature(crossed=False, **kwargs),
"time.C/": lambda **kwargs: WholeTimeSignature(crossed=True, **kwargs),
# numeric time signatures are created in a special way
"w": WholeNote,
"h": HalfNote,
"q": QuarterNote,
"e": lambda **kwargs: FlagNote(flag_kind="e", **kwargs),
"s": lambda **kwargs: FlagNote(flag_kind="s", **kwargs),
"lr": lambda **kwargs: Rest(rest_kind="lr", **kwargs),
"br": lambda **kwargs: Rest(rest_kind="br", **kwargs),
"wr": lambda **kwargs: Rest(rest_kind="wr", **kwargs),
"hr": lambda **kwargs: Rest(rest_kind="hr", **kwargs),
"qr": lambda **kwargs: Rest(rest_kind="qr", **kwargs),
"er": lambda **kwargs: Rest(rest_kind="er", **kwargs),
"sr": lambda **kwargs: Rest(rest_kind="sr", **kwargs),
"e=": lambda **kwargs: BeamedNote(beams=1, left_beamed=False, right_beamed=True, **kwargs),
"=e=": lambda **kwargs: BeamedNote(beams=1, left_beamed=True, right_beamed=True, **kwargs),
"=e": lambda **kwargs: BeamedNote(beams=1, left_beamed=True, right_beamed=False, **kwargs),
"s=": lambda **kwargs: BeamedNote(beams=2, left_beamed=False, right_beamed=True, **kwargs),
"=s=": lambda **kwargs: BeamedNote(beams=2, left_beamed=True, right_beamed=True, **kwargs),
"=s": lambda **kwargs: BeamedNote(beams=2, left_beamed=True, right_beamed=False, **kwargs),
"t=": lambda **kwargs: BeamedNote(beams=3, left_beamed=False, right_beamed=True, **kwargs),
"=t=": lambda **kwargs: BeamedNote(beams=3, left_beamed=True, right_beamed=True, **kwargs),
"=t": lambda **kwargs: BeamedNote(beams=3, left_beamed=True, right_beamed=False, **kwargs),
}
def token_groups_to_canvas(canvas: Canvas, groups: List[TokenGroup]):
"""Appends token groups to a canvas instance"""
for group in groups:
# special token groups
if isinstance(group, TimeSignatureTokenGroup):
canvas.add(TimeSignature(
top=int(group.first_token[len("time."):]),
bottom=int(group.second_token[len("time."):])
))
continue
if isinstance(group, KeySignatureTokenGroup):
canvas.add(KeySignature(
types=[to_generic(a) for a in group.before_attachments],
pitches=[get_pitch(a) for a in group.before_attachments]
))
continue
# default token group
accidental = None
accidentals = [b for b in group.before_attachments if is_accidental(b)]
if len(accidentals) > 0:
accidental = to_generic(accidentals[0])
duration_dots = None
if "*" in group.after_attachments:
duration_dots = "*"
elif "**" in group.after_attachments:
duration_dots = "**"
canvas.add(ITEM_CONSTRUCTORS[to_generic(group.token)](**{
"pitch": get_pitch(group.token),
"accidental": accidental,
"duration_dots": duration_dots,
"staccato": "." in group.after_attachments,
"slur_start": "(" in group.after_attachments,
"slur_end": ")" in group.before_attachments,
}))
def annotation_to_canvas(canvas: Canvas, annotation: str, print_warnings=True):
"""Appends symbols in annotation to the canvas"""
groups, warnings = parse_annotation_into_token_groups(annotation)
if print_warnings and len(warnings) > 0:
print("Warnings when parsing: " + annotation)
print("\t" + "\t\n".join(warnings))
token_groups_to_canvas(canvas, groups)
# make sure the canvas produced what it was supposed to produce
given_annotation = " ".join(annotation.split())
generated_annotation = " ".join(canvas.get_annotations())
if given_annotation != generated_annotation:
print("Canvas generated different annotation from the one given:")
print("Given: ", given_annotation)
print("Generated: ", generated_annotation)
assert given_annotation == generated_annotation # kill the program
def annotation_to_image(repo: SymbolRepository, annotation: str) -> np.ndarray:
"""Generates an image from an annotation string"""
canvas = Canvas()
annotation_to_canvas(canvas, annotation)
img = canvas.render(repo)
return img
def multi_staff_annotation_to_image(
repo: SymbolRepository,
main_annotation: str,
above_annotation: Optional[str],
below_annotation: Optional[str],
main_canvas_options: Optional[CanvasOptions] = None,
above_canvas_options: Optional[CanvasOptions] = None,
below_canvas_options: Optional[CanvasOptions] = None,
min_width=0, # keep some empty staff lines after the end
crop_horizontally=True,
crop_vertically=True,
transform_image=True
) -> np.ndarray:
"""
Advanced function that creates image of a staff with staves above and
below and applies transformations if requested.
"""
from mashcima.generate_staff_lines import generate_staff_lines
from mashcima.transform_image import transform_image as transform_image_function
staff_img, pitch_positions = generate_staff_lines(repo.CONFIG)
staff_height = staff_img.shape[0] // 3
staff_width = staff_img.shape[1]
img = np.zeros(
shape=(staff_height * 9, staff_width),
dtype=np.float32
)
# draw staff lines
if above_annotation is not None:
img[staff_height * 1:staff_height * 4, :] = staff_img
img[staff_height * 3:staff_height * 6, :] = staff_img
if below_annotation is not None:
img[staff_height * 5:staff_height * 8, :] = staff_img
# draw above staff symbols
if above_annotation is not None:
canvas = Canvas()
canvas.options.barlines_up = False
canvas.options.barlines_down = False
canvas.options.override_values_from(above_canvas_options)
annotation_to_canvas(canvas, above_annotation)
canvas.render_onto_image(
repo, img,
{pitch: y + staff_height * 1 for pitch, y in pitch_positions.items()},
head_start=0
)
# draw main staff symbols
canvas = Canvas()
canvas.options.barlines_up = above_annotation is not None
canvas.options.barlines_down = below_annotation is not None
canvas.options.override_values_from(main_canvas_options)
annotation_to_canvas(canvas, main_annotation)
head_end = canvas.render_onto_image(
repo, img,
{pitch: y + staff_height * 3 for pitch, y in pitch_positions.items()},
head_start=0
)
# draw below staff symbols
if below_annotation is not None:
canvas = Canvas()
canvas.options.barlines_up = False
canvas.options.barlines_down = False
canvas.options.override_values_from(below_canvas_options)
annotation_to_canvas(canvas, below_annotation)
canvas.render_onto_image(
repo, img,
{pitch: y + staff_height * 5 for pitch, y in pitch_positions.items()},
head_start=0
)
# minimal length
if head_end < min_width:
head_end = min_width
# cropping box
box = [
0, # x
staff_height * 3 if crop_vertically else 0, # y
head_end if crop_horizontally else staff_width, # width
staff_height * 3 if crop_vertically else img.shape[0] # height
]
# transform or just crop
if transform_image:
img = transform_image_function(img, box)
else:
img = img[box[1]:box[1]+box[3], box[0]:box[0]+box[2]]
return img
```
#### File: mashcima/canvas_items/WholeNote.py
```python
from mashcima.SymbolRepository import SymbolRepository
from mashcima.canvas_items.Note import Note
import random
import copy
class WholeNote(Note):
def get_note_generic_annotation(self) -> str:
return "w"
def select_sprites(self, repo: SymbolRepository):
self.sprites = copy.deepcopy(random.choice(repo.WHOLE_NOTES))
super().select_sprites(repo)
```
#### File: Mashcima/mashcima/CanvasOptions.py
```python
class CanvasOptions:
def __init__(self):
# what pitches should have stem orientation randomized 50:50
self.randomize_stem_flips_for_pitches = [-2, -1, 0, 1, 2]
# do barlines point up, above the staff?
self.barlines_up = False
# do barlines point down, below the staff?
self.barlines_down = False
# probability that a random space between symbols will be inserted
# (a relatively large space meant to make the trained model more robust)
self.random_space_probability = 0.03
# size range for the random space in pixels
# (size in addition to the regular horizontal padding space)
self.random_space_size = (50, 300)
@staticmethod
def get_empty():
"""Returns empty options, ready for overriding"""
opts = CanvasOptions()
for key in vars(opts).keys():
setattr(opts, key, None)
return opts
def override_values_from(self, other):
"""
Overrides local values by those in the other object that
are not set to None.
"""
if other is None:
return
for key, val in vars(other).items():
if val is not None:
setattr(self, key, val)
```
#### File: Mashcima/mashcima/Canvas.py
```python
from typing import List, Optional, Dict
from mashcima.SymbolRepository import SymbolRepository
from mashcima.canvas_items.CanvasItem import CanvasItem
from mashcima.canvas_items.SlurableItem import SlurableItem
from mashcima.canvas_items.InvisibleSlurEnd import InvisibleSlurEnd
from mashcima.canvas_items.BeamedNote import BeamedNote
from mashcima.Slur import Slur
from mashcima.Beam import Beam
from mashcima.CanvasOptions import CanvasOptions
import numpy as np
import random
class Canvas:
def __init__(self, options: Optional[CanvasOptions] = None):
# items on the canvas
self.items: List[CanvasItem] = []
# beams between beamed notes
self.beams: List[Beam] = []
# slurs between items
self.slurs: List[Slur] = []
# was the construction finished or not
self._construction_finished = False
# options that alter how is the staff printed
self.options = CanvasOptions() if options is None else options
def add(self, item: CanvasItem):
if self._construction_finished:
raise Exception("Cannot add item, construction has been finished")
self.items.append(item)
item.set_canvas_options(self.options)
def get_annotations(self) -> List[str]:
out: List[str] = []
for item in self.items:
out += item.get_annotation_tokens()
return out
def finish_construction(self):
"""Creates additional data structures around canvas items"""
if self._construction_finished:
raise Exception("Construction has been already finished")
self._create_beams()
self._create_slurs()
def _create_beams(self):
self.beams = []
in_beam = False
beam_items = []
for i in self.items:
if not isinstance(i, BeamedNote):
continue
if in_beam:
# append item to a built beam
beam_items.append(i)
# end the beam
if not i.right_beamed:
self.beams.append(Beam(beam_items))
in_beam = False
beam_items = []
else:
# start new beam
if i.right_beamed:
beam_items.append(i)
in_beam = True
assert not in_beam, "Unfinished beam group added to the canvas"
def _create_slurs(self):
self.slurs = []
slur_stack: List[SlurableItem] = []
def add_slur(start: SlurableItem, end: SlurableItem):
self.slurs.append(Slur(start, end))
def create_invisible_slur_end(at_index: int, start_here: bool) -> SlurableItem:
ise = InvisibleSlurEnd(
slur_end=not start_here,
slur_start=start_here
)
self.items.insert(at_index, ise)
return ise
# iterate over slurable items
i = 0
while i < len(self.items):
item = self.items[i]
if isinstance(item, SlurableItem):
if item.slur_end:
if len(slur_stack) == 0: # slur ending out of nowhere
slur_stack.append(create_invisible_slur_end(i, True))
i += 1
add_slur(slur_stack.pop(), item)
if item.slur_start:
slur_stack.append(item)
pass # here do something
i += 1
# slurs not ending anywhere
while len(slur_stack) != 0:
start = slur_stack.pop()
end = create_invisible_slur_end(self.items.index(start) + 1, False)
add_slur(start, end)
def render(self, repo: SymbolRepository):
"""Simple rendering that creates a single cropped staff"""
from mashcima.generate_staff_lines import generate_staff_lines
img, pitch_positions = generate_staff_lines(repo.CONFIG)
head = self.render_onto_image(
repo,
img,
pitch_positions,
0
)
# crop the result
img = img[:, 0:head]
return img
def render_onto_image(
self,
repo: SymbolRepository,
img: np.ndarray,
pitch_positions: Dict[int, int],
head_start: int
) -> int:
"""More advanced rendering that renders onto a given staff image"""
if not self._construction_finished:
self.finish_construction()
# select sprites
for item in self.items:
item.select_sprites(repo)
# place sprites and place items
head = self._place_items(pitch_positions, head_start)
# place beams
for b in self.beams:
b.place()
# render
for item in self.items:
item.render(img)
for b in self.beams:
b.render(img)
for s in self.slurs:
s.render(img)
return head
def _place_items(self, pitch_positions, head_start):
"""Move items to proper places in the pixel space"""
for item in self.items:
item.place_sprites()
def generate_random_space():
if self.options.random_space_probability == 0:
return 0
if random.random() < self.options.random_space_probability:
return random.randint(*self.options.random_space_size)
return 0
def generate_padding():
return random.randint(5, 25)
head = head_start
for i, item in enumerate(self.items):
head += generate_random_space()
head += generate_padding() # left padding
head += item.place_item(head, pitch_positions)
head += generate_padding() # right padding
return head
``` |
{
"source": "Jirka-Mayer/Unisave",
"score": 3
} |
#### File: Unisave/scripts/create-templates.py
```python
from typing import List
def create_template(source_file: str, template_file: str):
print("Creating template:")
print(source_file)
print(" -->", template_file)
lines = open(source_file).read().splitlines()
lines = remove_marked_lines(lines)
lines = mark_namespace(lines)
with open(template_file, "w") as f:
for line in lines:
f.write(line + "\n")
def mark_namespace(lines: List[str]):
namespace_found = False
i = 0
while i < len(lines):
if lines[i].startswith("namespace"):
lines[i] = "$NAMESPACE_BEGIN$"
if lines[i + 1] != "{":
raise Exception("Namespace lacks opening brace.")
del lines[i + 1]
namespace_found = True
break
i += 1
if not namespace_found:
raise Exception("No namespace found.")
while lines[-1] == "":
del lines[-1]
if lines[-1] != "}":
raise Exception("Last line is not a closing brace.")
lines[-1] = "$NAMESPACE_END$\n"
return lines
def remove_marked_lines(lines: List[str]):
i = 0
while i < len(lines):
if lines[i].endswith("// $$ REMOVE_FROM_TEMPLATE"):
del lines[i]
i -= 1
i += 1
return lines
########
# Main #
########
fixture_path = "../Assets/UnisaveFixture/"
templates_path = "../Assets/Unisave/Templates/"
# Email authentication
print("\nEMAIL AUTHENTICATION")
create_template(
fixture_path + "Backend/EmailAuthentication/EmailAuthUtils.cs",
templates_path + "EmailAuthentication/EmailAuthUtils.txt"
)
create_template(
fixture_path + "Backend/EmailAuthentication/EmailLoginFacet.cs",
templates_path + "EmailAuthentication/EmailLoginFacet.txt"
)
create_template(
fixture_path + "Backend/EmailAuthentication/EmailRegisterFacet.cs",
templates_path + "EmailAuthentication/EmailRegisterFacet.txt"
)
create_template(
fixture_path + "Backend/EmailAuthentication/EmailRegisterResponse.cs",
templates_path + "EmailAuthentication/EmailRegisterResponse.txt"
)
create_template(
fixture_path + "Scripts/EmailAuthentication/EmailLoginForm.cs",
templates_path + "EmailAuthentication/EmailLoginForm.txt"
)
create_template(
fixture_path + "Scripts/EmailAuthentication/EmailRegisterForm.cs",
templates_path + "EmailAuthentication/EmailRegisterForm.txt"
)
# Steam authentication
print("\nSTEAM AUTHENTICATION")
create_template(
fixture_path + "Backend/SteamAuthentication/SteamLoginFacet.cs",
templates_path + "SteamAuthentication/SteamLoginFacet.txt"
)
create_template(
fixture_path + "Scripts/SteamAuthentication/SteamLoginClient.cs",
templates_path + "SteamAuthentication/SteamLoginClient.txt"
)
# Steam microtransactions
print("\nSTEAM MICROTRANSACTIONS")
create_template(
fixture_path + "Backend/SteamMicrotransactions/VirtualProducts/ExampleVirtualProduct.cs",
templates_path + "SteamMicrotransactions/ExampleVirtualProduct.txt"
)
create_template(
fixture_path + "Backend/SteamMicrotransactions/IVirtualProduct.cs",
templates_path + "SteamMicrotransactions/IVirtualProduct.txt"
)
create_template(
fixture_path + "Backend/SteamMicrotransactions/SteamPurchasingServerFacet.cs",
templates_path + "SteamMicrotransactions/SteamPurchasingServerFacet.txt"
)
create_template(
fixture_path + "Backend/SteamMicrotransactions/SteamTransactionEntity.cs",
templates_path + "SteamMicrotransactions/SteamTransactionEntity.txt"
)
create_template(
fixture_path + "Scripts/SteamMicrotransactions/SteamPurchasingClient.cs",
templates_path + "SteamMicrotransactions/SteamPurchasingClient.txt"
)
``` |
{
"source": "jirka-tribi/async-cache",
"score": 2
} |
#### File: async-cache/cache/async_ttl.py
```python
from typing import Any
from collections import OrderedDict
import datetime
class AsyncTTL:
class _TTL(OrderedDict):
def __init__(self, time_to_live, min_cleanup_interval, *args, **kwargs):
super().__init__(*args, **kwargs)
self.time_to_live = datetime.timedelta(seconds=time_to_live)
self.min_cleanup_interval = datetime.timedelta(seconds=min_cleanup_interval)
self.last_expiration_cleanup_datetime = datetime.datetime.now()
def __contains__(self, key):
if key not in self.keys():
return False
else:
key_values = super().__getitem__(key)
key_expiration = key_values[0]
if key_expiration < datetime.datetime.now():
del self[key]
return False
else:
return True
def __getitem__(self, key):
value = super().__getitem__(key)
return value
def __setitem__(self, key, value):
ttl_value = datetime.datetime.now() + self.time_to_live
values = [ttl_value, value]
super().__setitem__(key, values)
def cleanup_expired_keys(self):
current_datetime = datetime.datetime.now()
if current_datetime - self.last_expiration_cleanup_datetime < self.min_cleanup_interval:
return
self.last_expiration_cleanup_datetime = current_datetime
for key in list(self.keys()):
key_expiration = self[key][0]
if key_expiration < current_datetime:
del self[key]
else:
break
class _Key:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __eq__(self, obj):
return hash(self) == hash(obj)
def __hash__(self):
def _hash(param: Any):
if isinstance(param, tuple):
return tuple(map(_hash, param))
if isinstance(param, dict):
return tuple(map(_hash, param.items()))
elif hasattr(param, '__dict__'):
return str(vars(param))
else:
return str(param)
return hash(_hash(self.args) + _hash(self.kwargs))
def __init__(self, time_to_live=1, min_cleanup_interval=5):
self.ttl = self._TTL(time_to_live=time_to_live, min_cleanup_interval=min_cleanup_interval)
def __call__(self, func):
async def wrapper(*args, **kwargs):
key = self._Key(args, kwargs)
if key in self.ttl:
val = self.ttl[key][1]
else:
self.ttl[key] = await func(*args, **kwargs)
val = self.ttl[key][1]
self.ttl.cleanup_expired_keys()
return val
wrapper.__name__ += func.__name__
return wrapper
``` |
{
"source": "JiroKokubunji/players",
"score": 3
} |
#### File: JiroKokubunji/players/players.py
```python
import importlib
import argparse
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, \
recall_score, \
f1_score, \
mean_squared_error, \
r2_score, \
mean_absolute_error
import pandas as pd
from io import StringIO
from models.models import ProjectDatumColumns, \
TrainingRequests, \
TrainingRequestQueues, \
ClassificationTrainingResults, \
RegressionTrainingResults
from db import setup_db
ANALYSES_COLLECTION_NAME = 'analyses'
PROJECTS_COLLECTION_NAME = 'projects'
COLUMNS_COLLECTION_NAME = 'columns'
MODELS_COLLECTION = 'models'
ALGORITHMS_COLLECTION_NAME = 'algorithms'
CLASSIFICATION_RESULTS_COLLECTION_NAME = 'classification_results'
PREPROCESSED_DATA_COLLECTION_NAME = 'preprocess_data'
PREPROCESS_ORDER_COLLECTION_NAME = 'preprocess_order'
NUMBER_OF_MAX_PROCESS = 4
class Dispacher:
""" dispathcer of playsers, dispatch tasks to players """
def __init__(self, db):
self.db = db
def dispatch(self):
while True:
players = self.__prepare_players()
pool = Pool(processes=4)
results = pool.map(wrap_players, players)
for result in results:
self.__record_result(result)
pool.close()
pool.join()
break
def __record_result(self, result):
if result['category'] == 'classification':
ClassificationTrainingResults(
training_request_id=result['training_request_id'],
accuracy=result['accuracy'],
recall=result['recall'],
f1=result['f1'],
).save()
elif result['category'] == 'regression':
RegressionTrainingResults(
training_request_id=result['training_request_id'],
mse=result['mse'],
mae=result['mae'],
r2=result['r2'],
).save()
trgr_queue = TrainingRequests.objects.raw({"_id": result['training_request_id']}).first()
trgr_queue.status = "completed"
trgr_queue.save()
def __prepare_players(self):
trgr_queues = TrainingRequestQueues.objects.raw({"status": "pendding"})
players = []
for q in trgr_queues:
# prepare data, and data
trgr = q.training_request_id
project_data = trgr.project_datum_id
data = project_data.data
algorithm = trgr.machine_learning_algorithm_id
# get valid columns and data
trc = ProjectDatumColumns.objects.raw({
"project_datum_id" : trgr.project_datum_id._id,
"active" : True,
"target" : False
})
columns = list(map(lambda x: x.name, list(trc)))
tgc = ProjectDatumColumns.objects.raw({
"project_datum_id" : trgr.project_datum_id._id,
"active" : True,
"target" : True
}).first().name
players.append(Player(trgr._id
, algorithm.category
, algorithm.module_name
, algorithm.class_name
, data
, columns
, tgc))
return players
class Player:
TEST_SIZE = 0.33
def __init__(self, training_request_id, category, package_name, class_name, data, train_columns, target_columns):
self.training_request_id = training_request_id
self.category = category
self.package_name = package_name
self.class_name = class_name
self.data = data
self.train_columns = train_columns
self.target_columns = target_columns
def play(self):
module = importlib.import_module(self.package_name)
instance = getattr(module, self.class_name)()
df = pd.read_csv(StringIO(self.data))
train = df.loc[:, self.train_columns]
target = df.loc[:, self.target_columns]
x_train, x_test, y_train, y_test = train_test_split(train, target, test_size=self.TEST_SIZE)
instance.fit(x_train, y_train)
y_pred = instance.predict(x_test)
if self.category == 'classification':
return {"training_request_id": self.training_request_id
, "category": self.category
, "accuracy": accuracy_score(y_test, y_pred)
, "recall": recall_score(y_test, y_pred, average='macro')
, "f1": f1_score(y_test, y_pred, average='macro')}
elif self.category == 'regression':
return {"training_request_id": self.training_request_id
, "category": self.category
, "mse": mean_squared_error(y_test, y_pred)
, "mae": mean_absolute_error(y_test, y_pred)
, "r2": r2_score(y_test, y_pred)}
else:
pass
def wrap_players(args):
return args.play()
def main(db):
dispatcher = Dispacher(db)
dispatcher.dispatch()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='The players is a app that run sklearn machine learning algorithms one by one. A data is provided by the Orchestra.')
parser.add_argument('-e', '--environment', help='specify environment', default='development')
args = parser.parse_args()
environment = args.environment
main(setup_db(environment))
```
#### File: players/tests/test_players.py
```python
import unittest
from players import *
from db import *
class TestPlayers(unittest.TestCase):
@classmethod
def __initialize(cls):
cls.con[PROJECTS_COLLECTION_NAME].drop()
cls.con[COLUMNS_COLLECTION_NAME].drop()
cls.con[ALGORITHMS_COLLECTION_NAME].drop()
cls.con[ANALYSES_COLLECTION_NAME].drop()
cls.con[CLASSIFICATION_RESULTS_COLLECTION_NAME].drop()
cls.con[PREPROCESSED_DATA_COLLECTION_NAME].drop()
@classmethod
def __prepare_mongodb_db(cls, file_name):
with open(file_name, 'r') as f:
header = f.readline().split(',')
mongo_data = []
for row in f.readlines():
mongo_row_data = {}
row_clumns = row.split(',')
for column, r in zip(header, row_clumns):
mongo_row_data[column.strip()] = r.strip()
mongo_data.append(mongo_row_data)
return mongo_data
@classmethod
def __prepare_analses(cls):
preprocessed_data = list(cls.con[PREPROCESSED_DATA_COLLECTION_NAME].find())
algorithms = list(cls.con[ALGORITHMS_COLLECTION_NAME].find())
data = [{'preprocessed_data_id': p['_id'], 'algorithm_id': a['_id']} for p, a in zip(preprocessed_data, algorithms)]
cls.con[ANALYSES_COLLECTION_NAME].insert_many(data)
@classmethod
def __prepare_projects(cls):
# prepare project data, it has a multiple processed data, first one is the original data.
cls.con[PROJECTS_COLLECTION_NAME].insert_many(cls.__prepare_mongodb_db('./tests/data/projects.csv'))
cursor = cls.con[PROJECTS_COLLECTION_NAME].find()
for c in cursor:
file_name = c['file_name']
with open(file_name, 'r') as f:
file = f.read()
cls.con[PROJECTS_COLLECTION_NAME].update_one({'_id': c['_id']}, {"$set": {'file': file}})
# create first processed data, first one is not processed exactly.
result = cls.con[PREPROCESSED_DATA_COLLECTION_NAME].insert_one({'project_id': c['_id'], 'data': file})
# create columns data of first processed data
header = StringIO(file).readline().strip().split(',')
# consider last columns is target in this test
train_header = [{'preprocessed_data_id': result.inserted_id, 'name': th, 'target': False} for th in header[:-1]]
target_header = [{'preprocessed_data_id': result.inserted_id, 'name': th, 'target': True} for th in header[-1]]
train_header.extend(target_header)
cls.con[COLUMNS_COLLECTION_NAME].insert_many(train_header)
"""
@classmethod
def __prepare_columns(cls):
cursor = cls.con[PREPROCESSED_DATA_COLLECTION_NAME].find()
data = []
for c in cursor:
project = cls.con[PROJECTS_COLLECTION_NAME].find_one({'_id': c['project_id']})
with open(project['file_name'], 'r') as f:
header = f.readline().split(',')
# consider last columns is target in this test
train_columns = header[:-1]
target_columns = header[-1]
for train in train_columns:
data.append({'preprocessed_data_id': c['_id'], 'name': train, 'target': False})
for target in target_columns:
data.append({'preprocessed_data_id': c['_id'], 'name': target, 'target': True})
cls.con[COLUMNS_COLLECTION_NAME].insert_many(data)
"""
@classmethod
def __prepare_algorithms(cls):
cls.con[ALGORITHMS_COLLECTION_NAME].insert_many(cls.__prepare_mongodb_db('./tests/data/algorithms.csv'))
@classmethod
def __prepare_preprocesses(cls):
cursor = cls.con[PREPROCESSED_DATA_COLLECTION_NAME].find()
for c in cursor:
cls.con[PREPROCESS_ORDER_COLLECTION_NAME].insert_one({'preprocessed_data_id': c['_id'], 'type': 'LabelEncoder', 'column': 'A', 'order': 1})
cls.con[PREPROCESS_ORDER_COLLECTION_NAME].insert_one({'preprocessed_data_id': c['_id'], 'type': 'OneHotEncoder', 'column': 'A.0', 'order': 2})
@classmethod
def setUpClass(cls):
cls.config = parse_db_config('development')
cls.con = db(cls.config)
cls.__initialize()
cls.__prepare_algorithms()
cls.__prepare_projects()
cls.__prepare_analses()
cls.__prepare_preprocesses()
# cls.__prepare_columns()
@classmethod
def tearDownClass(cls):
# make sure no data remains
cls.con[PREPROCESS_ORDER_COLLECTION_NAME].drop()
cls.con[PREPROCESSED_DATA_COLLECTION_NAME].drop()
cls.con[CLASSIFICATION_RESULTS_COLLECTION_NAME].drop()
cls.con[ANALYSES_COLLECTION_NAME].drop()
cls.con[ALGORITHMS_COLLECTION_NAME].drop()
cls.con[COLUMNS_COLLECTION_NAME].drop()
cls.con[PROJECTS_COLLECTION_NAME].drop()
def setUp(self):
pass
def test_00_preprocess(self):
print("test preprocess")
ppds = self.con[PREPROCESSED_DATA_COLLECTION_NAME].find()
for ppd in ppds:
p = Preprocessor(self.con, ppd['_id'])
p.preprocess()
upd = self.con[PREPROCESSED_DATA_COLLECTION_NAME].find()
upd = upd[0]
df = pd.read_csv(StringIO(upd['data']))
df.drop('A', axis=1, inplace=True)
data_buf = StringIO()
df.to_csv(data_buf, index=False)
self.con[PREPROCESSED_DATA_COLLECTION_NAME].update_one({'_id': upd['_id']}, {"$set" : { "data": data_buf.getvalue()}})
c = self.con[COLUMNS_COLLECTION_NAME].find_one({'preprocessed_data_id': upd['_id']})
self.con[COLUMNS_COLLECTION_NAME].delete_one({'_id': c['_id'], 'name': 'A'})
def test_01_dispatcher(self):
print("test_dispatcher")
d = Dispacher(self.con)
d.dispatch()
# check results
# print(list(self.con[CLASSIFICATION_RESULTS_COLLECTION_NAME].find()))
def test_02_player(self):
print("test_players start")
analysis = list(self.con[ANALYSES_COLLECTION_NAME].find())[0]
algorithm = self.con[ALGORITHMS_COLLECTION_NAME].find_one({'_id': analysis['algorithm_id']})
preprocess_data = self.con[PREPROCESSED_DATA_COLLECTION_NAME].find_one({'_id': analysis['preprocessed_data_id']})
data = preprocess_data['data']
df = pd.read_csv(StringIO(data))
player = Player(analysis['_id'], algorithm['module_name'], algorithm['class_name'], data, df.drop('E', axis=1).columns, ['E'])
player.play()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jirouette/pascal",
"score": 3
} |
#### File: jirouette/pascal/pascal.py
```python
import os
from discord.ext import commands
from voice import Voice
from temp import Temp
from rss import Rss
from vote import Vote
from radio import Radio
class Pascal(commands.Bot):
async def on_ready(self):
print('Now connected as {0.user.name}'.format(self))
voice = Voice(bot)
print("Initialized Voice")
temp = Temp(bot)
print("Initialized Temp")
rss = Rss(bot)
print("Initialized RSS")
vote = Vote(bot)
print("Initialized Vote.")
radio = Radio(voice)
print("Initialized Radio.")
print('Ready. ')
async def on_command_error(self, ctx, error):
pass
if __name__ == '__main__':
print("Starting Pascal...")
bot = Pascal(command_prefix="!")
bot.run(os.environ.get('DISCORD_TOKEN'))
```
#### File: jirouette/pascal/voice.py
```python
import discord
import youtube_dl
import time
import asyncio
import typing
import os
from discord.ext import commands
from googleapiclient.discovery import build
TEMP_BANLIST = dict()
ADMIN_ROLE = int(os.environ.get('ADMIN_ROLE'))
youtube = build('youtube', 'v3', developerKey=os.environ.get('YOUTUBE_TOKEN'))
# Suppress noise about console usage from errors
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url: str, *, loop=None, stream=True):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
data['url'] = url
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Request(object):
def __init__(self, music: YTDLSource, user: discord.Member, query: str):
self.music = music
self.user = user
self.query = query
class Voice(object):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.client = None
self.blocked = False
self.queue = []
if not discord.opus.is_loaded():
discord.opus.load_opus("libopus.so")
bot.voice = self
init(bot)
def block(self) -> None:
self.blocked = True
def unblock(self) -> None:
self.blocked = False
def is_blocked(self) -> bool:
return self.blocked
def search(self, text: str) -> typing.Optional[str]:
response = youtube.search().list(q=text, part='id,snippet', maxResults=1).execute()
for result in response.get('items', []):
return "https://www.youtube.com/watch?v=%s" % result['id']['videoId']
return None
def play(self) -> None:
if not self.queue or not self.client:
return
self.client.play(self.queue[0].music, after=self.next)
self.bot.loop.create_task(self.reload_activity())
async def reload_activity(self) -> None:
track = self.get_current_track()
activity = None
if track is not None:
activity = discord.Activity(
type=discord.ActivityType.listening,
name=f"!track − {track.music.title}"
)
await self.bot.change_presence(activity=activity)
def next(self, e) -> None:
if not self.queue:
self.bot.loop.create_task(self.reload_activity())
return
self.queue.pop(0)
self.play()
def get_current_track(self) -> typing.Optional[Request]:
if not self.queue:
return None
return self.queue[0]
def has_next_track(self) -> bool:
return len(self.queue) > 1
async def youtube(self, ctx: typing.Optional[commands.Context], URL: str, query: typing.Optional[str] = None) -> None:
if ctx is not None:
if self.client is None or not self.client.is_connected():
for channel in ctx.channel.guild.voice_channels:
if channel.id == int(os.environ.get('VOICE_CHANNEL')):
self.client = await channel.connect()
break
player = await YTDLSource.from_url(URL, loop=self.bot.loop)
request = Request(player, ctx.author if ctx else Ellipsis, query or URL)
self.queue.append(request)
if len(self.queue) == 1:
self.play()
async def stop(self, *_, **__) -> None:
if self.client is None or not self.client.is_connected():
return
self.client.stop()
async def add_ban(ctx: commands.Context, author: discord.Member, duration = -1) -> None:
if duration < 0:
duration = int(os.environ.get('TEMP_BAN_DURATION', 300))
TEMP_BANLIST[author.id] = time.time()+duration
async def is_in_voice_channel(ctx: commands.Context) -> bool:
if not ctx.bot.voice.client:
return True
if ctx.author.id in [member.id for member in ctx.bot.voice.client.channel.members]:
return True
await add_ban(ctx, ctx.author)
await ctx.send("<:ban:792391057706975272>")
return False
async def is_coming_from_text_channel(ctx: commands.Context) -> bool:
if ctx.channel.id == int(os.environ.get('TEXT_CHANNEL')):
return True
if not ctx.guild:
await ctx.send('🤫')
return False
async def is_not_banned(ctx: commands.Context) -> bool:
if TEMP_BANLIST.get(ctx.author.id, 0) >= time.time():
await ctx.send("<:ban:792391057706975272>")
return False
banned_role = int(os.environ.get('BANNED_ROLE'))
if banned_role in [role.id for role in ctx.author.roles]:
await ctx.send("<:ban:792391057706975272>")
return False
return True
async def is_not_blocked(ctx: commands.Context) -> bool:
if ctx.bot.voice.is_blocked():
await ctx.send('❌')
return False
return True
async def is_owner_of_current_track(ctx: commands.Context) -> bool:
is_admin = discord.utils.get(ctx.author.roles, id=ADMIN_ROLE)
if not ctx.bot.voice.has_next_track():
await ctx.send('🤷')
return False
current_track = ctx.bot.voice.get_current_track()
return is_admin or current_track.user is Ellipsis or ctx.author.id == current_track.user.id
def init(bot: commands.Bot) -> None:
@bot.command()
@commands.check(is_coming_from_text_channel)
@commands.check(is_in_voice_channel)
@commands.check(is_not_banned)
@commands.check(is_not_blocked)
async def music(ctx: commands.Context, *args) -> None:
text = " ".join(args)
print(text)
if (text.startswith("https://www.youtube.com/") or
text.startswith("https://youtube.com/") or
text.startswith("http://www.youtube.com/") or
text.startswith("http://youtube.com/") or
text.startswith("https://youtu.be/") or
text.startswith("http://youtu.be/")):
await ctx.bot.voice.youtube(ctx, text)
return await ctx.send('🎵')
URL = ctx.bot.voice.search(text)
if URL:
await ctx.bot.voice.youtube(ctx, URL, f"{text} ({URL})")
return await ctx.send('🎵 '+URL)
await ctx.send('😔')
@bot.command()
@commands.check(is_coming_from_text_channel)
@commands.has_role(ADMIN_ROLE)
async def unban(ctx: commands.Context, member: typing.Optional[discord.Member]) -> None:
if not member:
TEMP_BANLIST.clear()
else:
del TEMP_BANLIST[member.id]
await ctx.send('👌')
@bot.command()
@commands.check(is_coming_from_text_channel)
@commands.has_role(ADMIN_ROLE)
async def ban(ctx: commands.Context, member: discord.Member, duration: typing.Optional[int] = -1) -> None:
await add_ban(ctx, member, duration*60)
await ctx.send('🔨')
@bot.command()
@commands.check(is_coming_from_text_channel)
@commands.has_role(ADMIN_ROLE)
async def block(ctx: commands.Context) -> None:
ctx.bot.voice.block()
await ctx.send('👌')
@bot.command()
@commands.check(is_coming_from_text_channel)
@commands.has_role(ADMIN_ROLE)
async def unblock(ctx: commands.Context) -> None:
ctx.bot.voice.unblock()
await ctx.send('👌')
@bot.command()
@commands.check(is_coming_from_text_channel)
@commands.has_role(ADMIN_ROLE)
async def stop(ctx: commands.Context) -> None:
await ctx.bot.voice.stop()
ctx.bot.voice.queue = []
await ctx.send('👌')
@bot.command()
@commands.check(is_coming_from_text_channel)
async def track(ctx: commands.Context) -> None:
track = ctx.bot.voice.get_current_track()
if track is None:
await ctx.send('🙅♂️')
return
await ctx.send(f"**{track.music.title}** ({track.music.url})")
@bot.command()
@commands.check(is_coming_from_text_channel)
@commands.check(is_owner_of_current_track)
async def skip(ctx: commands.Context) -> None:
await ctx.bot.voice.stop()
await ctx.send('⏭️'+ctx.bot.voice.queue[1].query)
ctx.bot.voice.next()
```
#### File: jirouette/pascal/vote.py
```python
import os
import redis
from discord.ext import commands
class Vote(object):
def __init__(self, bot: commands.Bot):
self.bot = bot
@bot.command()
@commands.dm_only()
async def token(ctx: commands.Context, token: str):
r = redis.Redis(host='redis')
r.set('TOKEN_'+str(ctx.message.author.id), token)
await ctx.send('Token enregistré ! 😌')
@bot.event
async def on_raw_reaction_add(payload):
if payload.channel_id != int(os.environ.get('VOTE_CHANNEL_ID')):
return
channel = await bot.fetch_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
file_id = message.content.split('/')[-1]
r = redis.Redis(host='redis')
token = r.get('TOKEN_'+str(payload.member.id))
if token is None:
return
token = token.decode('utf-8')
if payload.name == '👍' or payload.name == '👎':
pass # TODO implement vote
elif payload.name == '✅' or payload.name == '❌':
pass # TODO implement selection
@bot.event
async def on_raw_reaction_delete(payload):
if payload.channel_id != int(os.environ.get('VOTE_CHANNEL_ID')):
return
channel = await bot.fetch_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
file_id = message.content.split('/')[-1]
r = redis.Redis(host='redis')
token = r.get('TOKEN_'+str(payload.member.id)).decode('utf-8')
if token is None:
return
token = token.decode('utf-8')
if payload.name == '👍' or payload.name == '👎':
pass # TODO implement vote
@bot.event
async def on_message(message):
await bot.process_commands(message)
if message.channel.id != int(os.environ.get('VOTE_CHANNEL_ID')):
return
await message.add_reaction('👍')
await message.add_reaction('👎')
await message.add_reaction('✅')
await message.add_reaction('❌')
``` |
{
"source": "jirouette/richirc",
"score": 2
} |
#### File: richirc/richirc/web.py
```python
import tornado.ioloop
import tornado.web
import tornado.websocket
import uuid
from mq import RedisMQ
import os
import json
HOST = os.environ.get('RICHIRC_DEFAULT_SERVER', 'chat.freenode.net')
PORT = int(os.environ.get('RICHIRC_DEFAULT_PORT', 6697))
NICKNAME = os.environ.get('RICHIRC_DEFAULT_NICKNAME', 'richirc_user1')
CHANNEL = os.environ.get('RICHIRC_DEFAULT_CHANNEL', '#richirc')
class WebBridge(RedisMQ):
BRIDGE_NAME = 'web'
def __init__(self):
super().__init__(self.BRIDGE_NAME, self.invoke)
def invoke(self, ID, method, *args, **kwargs):
client = app.user_list.get(ID)
if client and not method.startswith('_'):
return getattr(client.irc, method)(*args, **kwargs)
class MainHandler(tornado.web.RequestHandler):
def get(self):
data = dict(host=HOST,
port=PORT,
nickname=NICKNAME,
channel=CHANNEL)
self.render("front/templates/index.html", **data)
class IRCProxyClient(object):
def __init__(self, ID, callback):
self.ID = ID
self.bridge = WebBridge()
self.callback = callback
def __getattr__(self, method):
def execute(*args, **kwargs):
if method.startswith('on_'):
self.callback(self.ID, method, *args, **kwargs)
else:
self.bridge.send(self.ID, method, *args, **kwargs)
return execute
class IRCWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
self.ID = str(uuid.uuid4())
self.application.user_list[self.ID] = self
self.irc = IRCProxyClient(self.ID, self.ws_send)
print("[WS] Welcome", self.ID)
def on_close(self):
del self.application.user_list[self.ID]
print("[WS] Bye", self.ID)
def ws_send(self, ID, method, *args, **kwargs):
payload = dict(method=method,
args=args,
kwargs=kwargs,
ID=ID,
source=WebBridge.BRIDGE_NAME)
self.write_message(json.dumps(payload))
def on_message(self, message):
payload = json.loads(message)
method = payload.get('method')
args = payload.get('args', list())
kwargs = payload.get('kwargs', dict())
return getattr(self.irc, method)(*args, **kwargs)
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/irc", IRCWebSocket),
(r'/js/(.*)', tornado.web.StaticFileHandler, {'path': 'front/js'}),
])
if __name__ == "__main__":
app = make_app()
app.user_list = dict()
app.listen(int(os.environ.get('RICHIRC_WEB_PORT', 1993)))
WebBridge().start()
print("[!] RichIRC web platform started")
tornado.ioloop.IOLoop.current().start()
``` |
{
"source": "jirsat/PlanarAlly",
"score": 2
} |
#### File: socket/asset_manager/ddraft.py
```python
import base64
import json
import hashlib
from typing import List
from typing_extensions import TypedDict
from app import sio
from models import Asset
from state.asset import asset_state
from ..constants import ASSET_NS
from .common import ASSETS_DIR, UploadData
class Coord(TypedDict):
x: int
y: int
class DDraftPortal(TypedDict):
position: Coord
bounds: List[Coord]
rotation: int
closed: bool
freestanding: bool
class DDraftResolution(TypedDict):
map_origin: Coord
map_size: Coord
pixels_per_grid: int
class DDraftData(TypedDict):
format: str
resolution: DDraftResolution
line_of_sight: List[Coord]
portals: List[DDraftPortal]
image: str
async def handle_ddraft_file(upload_data: UploadData, data: bytes, sid: str):
ddraft_file: DDraftData = json.loads(data)
image = base64.b64decode(ddraft_file["image"])
sh = hashlib.sha1(image)
hashname = sh.hexdigest()
if not (ASSETS_DIR / hashname).exists():
with open(ASSETS_DIR / hashname, "wb") as f:
f.write(image)
template = {
"version": "0",
"shape": "assetrect",
"templates": {
"default": {
"options": json.dumps(
[[f"ddraft_{k}", v] for k, v in ddraft_file.items() if k != "image"]
)
}
},
}
user = asset_state.get_user(sid)
asset = Asset.create(
name=upload_data["name"],
file_hash=hashname,
owner=user,
parent=upload_data["directory"],
options=json.dumps(template),
)
await sio.emit("Asset.Upload.Finish", asset.as_dict(), room=sid, namespace=ASSET_NS)
```
#### File: api/socket/marker.py
```python
import auth
from api.socket.constants import GAME_NS
from app import app, sio
from models import Marker, PlayerRoom
from state.game import game_state
@sio.on("Marker.New", namespace=GAME_NS)
@auth.login_required(app, sio)
async def new_marker(sid: str, data):
pr: PlayerRoom = game_state.get(sid)
marker = Marker.get_or_none(shape=data, user=pr.player)
if marker is not None:
return
Marker.create(shape=data, user=pr.player, location=pr.active_location)
@sio.on("Marker.Remove", namespace=GAME_NS)
@auth.login_required(app, sio)
async def delete_marker(sid: str, uuid: str):
pr: PlayerRoom = game_state.get(sid)
marker = Marker.get_or_none(shape_id=uuid, user=pr.player)
if not marker:
return
marker.delete_instance()
```
#### File: socket/shape/utils.py
```python
from typing import Generator, Union
from models import PlayerRoom, Shape
from models.shape.access import has_ownership
from state.game import game_state
from utils import logger
def get_shape_or_none(pr: PlayerRoom, shape_id: str, action: str) -> Union[Shape, None]:
try:
shape: Shape = Shape.get(uuid=shape_id)
except Shape.DoesNotExist as exc:
logger.warning(
f"Attempt by {pr.player.name} on unknown shape. {{method: {action}, shape id: {shape_id}}}"
)
raise exc
if not has_ownership(shape, pr):
logger.warning(
f"Attempt by {pr.player.name} on shape they do not own. {{method: {action}, shape id: {shape_id}}}"
)
return None
return shape
def get_owner_sids(
pr: PlayerRoom, shape: Shape, skip_sid=None
) -> Generator[str, None, None]:
for psid in game_state.get_sids(
active_location=pr.active_location, skip_sid=skip_sid
):
if has_ownership(shape, game_state.get(psid)):
yield psid
``` |
{
"source": "jirufengyu/ode",
"score": 2
} |
#### File: jirufengyu/ode/ffjord_tabular.py
```python
import argparse
import collections
import os
import pickle
import sys
import time
import haiku as hk
import jax
import jax.numpy as jnp
from jax import lax
from jax.config import config
from jax.experimental import optimizers
from jax.experimental.jet import jet
from jax.flatten_util import ravel_pytree
from jax.tree_util import tree_flatten
import datasets
from lib.ode import odeint, odeint_sepaux, odeint_grid, odeint_grid_sepaux, odeint_grid_sepaux_one
float_64 = False
config.update("jax_enable_x64", float_64)
parser = argparse.ArgumentParser('FFJORD Tabular')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--nepochs', type=int, default=500)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--lam', type=float, default=0)
parser.add_argument('--lam_w', type=float, default=1e-6)
parser.add_argument('--atol', type=float, default=1.4e-8) # 1e-8 (original values)
parser.add_argument('--rtol', type=float, default=1.4e-8) # 1e-6
parser.add_argument('--method', type=str, default="dopri5")
parser.add_argument('--vmap', action="store_true")
parser.add_argument('--reg', type=str, choices=['none', 'r2', 'r3', 'r4'], default='none')
parser.add_argument('--test_freq', type=int, default=300)
parser.add_argument('--save_freq', type=int, default=300)
parser.add_argument('--dirname', type=str, default='tmp')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--no_count_nfe', action="store_true")
parser.add_argument('--ckpt_freq', type=int, default=60) # divide test and save, and be divisible by num_batches
parser.add_argument('--ckpt_path', type=str, default="./ck.pt")
parser.add_argument('--lam_fro', type=float, default=0)
parser.add_argument('--lam_kin', type=float, default=0)
parser.add_argument('--reg_type', type=str, choices=['our', 'fin'], default='our')
parser.add_argument('--num_steps', type=int, default=2)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--hdim_factor', type=int, default=20)
parser.add_argument('--nonlinearity', type=str, default="softplus")
parse_args = parser.parse_args()
if not os.path.exists(parse_args.dirname):
os.makedirs(parse_args.dirname)
# set up config
reg = parse_args.reg
lam = parse_args.lam
lam_fro = parse_args.lam_fro
lam_kin = parse_args.lam_kin
reg_type = parse_args.reg_type
lam_w = parse_args.lam_w
seed = parse_args.seed
rng = jax.random.PRNGKey(seed)
dirname = parse_args.dirname
count_nfe = not parse_args.no_count_nfe
vmap = parse_args.vmap
grid = False
if grid:
_odeint = odeint_grid
_odeint_aux2 = odeint_grid_sepaux_one # finlay trick w/ 2 augmented states
_odeint_aux3 = odeint_grid_sepaux # finlay trick w/ 3 augmented states
ode_kwargs = {
"step_size": 1 / parse_args.num_steps
}
else:
_odeint = odeint
_odeint_aux2 = odeint_sepaux
_odeint_aux3 = odeint_sepaux # TODO: this will break for fin, but we shouldn't use it anyway
ode_kwargs = {
"atol": parse_args.atol,
"rtol": parse_args.rtol
}
def _logaddexp(x1, x2):
"""
Logaddexp while ignoring the custom_jvp rule.
"""
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(jnp.isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.log1p(lax.exp(-lax.abs(delta)))))
softplus = lambda x: _logaddexp(x, jnp.zeros_like(x))
def sigmoid(z):
"""
Numerically stable sigmoid.
"""
return 1/(1 + jnp.exp(-z))
nonlinearity = softplus if parse_args.nonlinearity == "softplus" else jnp.tanh
def sol_recursive(f, z, t):
"""
Recursively compute higher order derivatives of dynamics of ODE.
"""
z_shape = z.shape
z_t = jnp.concatenate((jnp.ravel(z), jnp.array([t])))
def g(z_t):
"""
Closure to expand z.
"""
z, t = jnp.reshape(z_t[:-1], z_shape), z_t[-1]
dz = jnp.ravel(f(z, t))
dt = jnp.array([1.])
dz_t = jnp.concatenate((dz, dt))
return dz_t
(y0, [y1h]) = jet(g, (z_t, ), ((jnp.ones_like(z_t), ), ))
(y0, [y1, y2h]) = jet(g, (z_t, ), ((y0, y1h,), ))
return (jnp.reshape(y0[:-1], z_shape), [jnp.reshape(y1[:-1], z_shape)])
# set up modules
class ConcatSquashLinear(hk.Module):
"""
ConcatSquash Linear layer.
"""
def __init__(self, dim_out):
super(ConcatSquashLinear, self).__init__()
self._layer = hk.Linear(dim_out)
self._hyper_bias = hk.Linear(dim_out, with_bias=False)
self._hyper_gate = hk.Linear(dim_out)
def __call__(self, x, t):
return self._layer(x) * sigmoid(self._hyper_gate(jnp.reshape(t, (1, 1)))) \
+ self._hyper_bias(jnp.reshape(t, (1, 1)))
def get_epsilon(key, shape):
"""
Sample epsilon from the desired distribution.
"""
# normal
return jax.random.normal(key, shape)
class NN_Dynamics(hk.Module):
"""
NN_Dynamics of the ODENet.
"""
def __init__(self,
hidden_dims,
input_shape):
super(NN_Dynamics, self).__init__()
self.input_shape = input_shape
layers = []
activation_fns = []
base_layer = ConcatSquashLinear
for dim_out in hidden_dims + (input_shape[-1], ):
layer = base_layer(dim_out)
layers.append(layer)
activation_fns.append(nonlinearity)
self.layers = layers
self.activation_fns = activation_fns[:-1]
def __call__(self, x, t):
x = jnp.reshape(x, (-1, *self.input_shape))
dx = x
for l, layer in enumerate(self.layers):
dx = layer(dx, t)
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
return dx
def wrap_module(module, *module_args, **module_kwargs):
"""
Wrap the module in a function to be transformed.
"""
def wrap(*args, **kwargs):
"""
Wrapping of module.
"""
model = module(*module_args, **module_kwargs)
return model(*args, **kwargs)
return wrap
def initialization_data(input_shape):
"""
Data for initializing the modules.
"""
input_shape = (parse_args.test_batch_size, ) + input_shape[1:]
data = {
"ode": aug_init(jnp.zeros(input_shape))[:1] + (0., ) # (z, t)
}
return data
def init_model(n_dims):
"""
Instantiates transformed submodules of model and their parameters.
"""
ts = jnp.array([0., 1.])
input_shape = (-1, n_dims)
initialization_data_ = initialization_data(input_shape)
dynamics = hk.without_apply_rng(hk.transform(wrap_module(NN_Dynamics,
input_shape=input_shape[1:],
hidden_dims=(n_dims * parse_args.hdim_factor, )
* parse_args.num_layers)))
dynamics_params = dynamics.init(rng, *initialization_data_["ode"])
dynamics_wrap = lambda x, t, params: dynamics.apply(params, x, t)
def reg_dynamics(y, t, params):
"""
NN_Dynamics of regularization for ODE integration.
"""
if reg == "none":
y = jnp.reshape(y, input_shape)
return jnp.zeros((y.shape[0], 1))
else:
# do r3 regularization
y0, y_n = sol_recursive(lambda _y, _t: dynamics_wrap(_y, _t, params), y, t)
r = y_n[-1]
return jnp.mean(jnp.square(r), axis=[axis_ for axis_ in range(1, r.ndim)])
def ffjord_dynamics(yp, t, eps, params):
"""
Dynamics of augmented ffjord state.
"""
y, p = yp
f = lambda y: dynamics_wrap(y, t, params)
dy, eps_dy = jax.jvp(f, (y,), (eps,))
div = jnp.sum(jnp.reshape(eps_dy * eps, (y.shape[0], -1)), axis=1, keepdims=True)
return dy, -div
def ffjord2_dynamics(yp, t, eps, params):
"""
Dynamics of augmented ffjord state.
"""
y, p = yp
f = lambda y: dynamics_wrap(y, t, params)
dy, eps_dy = jax.jvp(f, (y,), (eps,))
div = jnp.sum(jnp.reshape(eps_dy * eps, (y.shape[0], -1)), axis=1, keepdims=True)
return dy, -div, eps_dy
def aug_dynamics(ypr, t, eps, params):
"""
NN_Dynamics augmented with logp and regularization.
"""
y, p, *_ = ypr
dy, dp, eps_dy = ffjord2_dynamics((y, p), t, eps, params)
if reg_type == "our":
dr = reg_dynamics(y, t, params)
return dy, dp, dr
else:
dfro = jnp.mean(jnp.square(eps_dy), axis=[axis_ for axis_ in range(1, dy.ndim)])
dkin = jnp.mean(jnp.square(dy), axis=[axis_ for axis_ in range(1, dy.ndim)])
return dy, dp, dfro, dkin
def all_aug_dynamics(ypr, t, eps, params):
"""
NN_Dynamics augmented with logp and regularization.
"""
y, p, *_ = ypr
dy, dp, eps_dy = ffjord2_dynamics((y, p), t, eps, params)
dr = reg_dynamics(y, t, params)
dfro = jnp.mean(jnp.square(eps_dy), axis=[axis_ for axis_ in range(1, dy.ndim)])
dkin = jnp.mean(jnp.square(dy), axis=[axis_ for axis_ in range(1, dy.ndim)])
return dy, dp, dr, dfro, dkin
if reg_type == 'our':
_odeint_aux = _odeint_aux2
else:
_odeint_aux = _odeint_aux3
nodeint_aux = lambda y0, ts, eps, params: _odeint_aux(lambda y, t, eps, params: dynamics_wrap(y, t, params),
aug_dynamics, y0, ts, eps, params, **ode_kwargs)[0]
all_nodeint = lambda y0, ts, eps, params: _odeint(all_aug_dynamics, y0, ts, eps, params, **ode_kwargs)[0]
def ode_aux(params, y, delta_logp, eps):
"""
Apply the ODE block.
"""
ys, delta_logps, *rs = nodeint_aux(reg_init(y, delta_logp), ts, eps, params)
return (ys[-1], delta_logps[-1], *(rs_[-1] for rs_ in rs))
def all_ode(params, y, delta_logp, eps):
"""
Apply the ODE block.
"""
ys, delta_logps, *rs = all_nodeint(all_reg_init(y, delta_logp), ts, eps, params)
return (ys[-1], delta_logps[-1], *(rs_[-1] for rs_ in rs))
if count_nfe:
if vmap:
unreg_nodeint = jax.vmap(lambda z, delta_logp, t, eps, params:
_odeint(ffjord_dynamics, (z, delta_logp), t, eps, params, **ode_kwargs)[1],
(0, 0, None, 0, None))
else:
unreg_nodeint = lambda z, delta_logp, t, eps, params: \
_odeint(ffjord_dynamics, (z, delta_logp), t, eps, params, **ode_kwargs)[1]
@jax.jit
def nfe_fn(key, params, _x):
"""
Function to return NFE.
"""
eps = get_epsilon(key, _x.shape)
f_nfe = unreg_nodeint(*aug_init(_x)[:2], ts, eps, params["ode"])
return jnp.mean(f_nfe)
else:
nfe_fn = None
def forward_aux(key, params, _x):
"""
Forward pass of the model.
"""
eps = get_epsilon(key, _x.shape)
return ode_aux(params["ode"], *aug_init(_x)[:2], eps)
def forward_all(key, params, _x):
"""
Forward pass of the model.
"""
eps = get_epsilon(key, _x.shape)
return all_ode(params["ode"], *aug_init(_x)[:2], eps)
model = {
"model": {
"ode": all_ode
},
"params": {
"ode": dynamics_params
}, "nfe": nfe_fn,
"forward_all": forward_all
}
return forward_aux, model
def aug_init(y):
"""
Initialize dynamics with 0 for logpx and regs.
"""
batch_size = y.shape[0]
if reg_type == "our":
return y, jnp.zeros((batch_size, 1)), jnp.zeros((batch_size, 1))
else:
return y, jnp.zeros((batch_size, 1)), jnp.zeros((batch_size, 1)), jnp.zeros((batch_size, 1))
def reg_init(y, delta_logp):
"""
Initialize dynamics with 0 for and regs.
"""
batch_size = y.shape[0]
if reg_type == "our":
return y, delta_logp, jnp.zeros((batch_size, 1))
else:
return y, delta_logp, jnp.zeros((batch_size, 1)), jnp.zeros((batch_size, 1))
def all_reg_init(y, delta_logp):
"""
Initialize dynamics with 0 for and regs.
"""
batch_size = y.shape[0]
return y, delta_logp, jnp.zeros((batch_size, 1)), jnp.zeros((batch_size, 1)), jnp.zeros((batch_size, 1))
def standard_normal_logprob(z):
"""
Log probability of standard normal.
"""
logz = -0.5 * jnp.log(2 * jnp.pi)
return logz - jnp.square(z) / 2
def _loss_fn(z, delta_logp):
logpz = jnp.sum(jnp.reshape(standard_normal_logprob(z), (z.shape[0], -1)), axis=1, keepdims=True) # logp(z)
logpx = logpz - delta_logp
return -jnp.mean(logpx) # likelihood in nats
def _reg_loss_fn(reg):
return jnp.mean(reg)
def _weight_fn(params):
flat_params, _ = ravel_pytree(params)
return 0.5 * jnp.sum(jnp.square(flat_params))
def loss_fn(forward, params, images, key):
"""
The loss function for training.
"""
if reg_type == "our":
z, delta_logp, regs = forward(key, params, images)
loss_ = _loss_fn(z, delta_logp)
reg_ = _reg_loss_fn(regs)
weight_ = _weight_fn(params)
return loss_ + lam * reg_ + lam_w * weight_
else:
z, delta_logp, fro_regs, kin_regs = forward(key, params, images)
loss_ = _loss_fn(z, delta_logp)
fro_reg_ = _reg_loss_fn(fro_regs)
kin_reg_ = _reg_loss_fn(kin_regs)
weight_ = _weight_fn(params)
return loss_ + lam_fro * fro_reg_ + lam_kin * kin_reg_ + lam_w * weight_
def init_data():
"""
Initialize data.
"""
data = datasets.MINIBOONE()
num_train = data.trn.N
# num_test = data.trn.N
num_test = data.val.N
if float_64:
convert = jnp.float64
else:
convert = jnp.float32
data.trn.x = convert(data.trn.x)
data.val.x = convert(data.val.x)
data.tst.x = convert(data.tst.x)
num_batches = num_train // parse_args.batch_size + 1 * (num_train % parse_args.batch_size != 0)
num_test_batches = num_test // parse_args.test_batch_size + 1 * (num_train % parse_args.test_batch_size != 0)
# make sure we always save the model on the last iteration
assert num_batches * parse_args.nepochs % parse_args.save_freq == 0
def gen_train_data():
"""
Generator for train data.
"""
key = rng
inds = jnp.arange(num_train)
while True:
key, = jax.random.split(key, num=1)
epoch_inds = jax.random.shuffle(key, inds)
for i in range(num_batches):
batch_inds = epoch_inds[i * parse_args.batch_size: min((i + 1) * parse_args.batch_size, num_train)]
yield data.trn.x[batch_inds]
def gen_val_data():
"""
Generator for train data.
"""
inds = jnp.arange(num_test)
while True:
for i in range(num_test_batches):
batch_inds = inds[i * parse_args.test_batch_size: min((i + 1) * parse_args.test_batch_size, num_test)]
yield data.val.x[batch_inds]
def gen_test_data():
"""
Generator for train data.
"""
inds = jnp.arange(num_test)
while True:
for i in range(num_test_batches):
batch_inds = inds[i * parse_args.test_batch_size: min((i + 1) * parse_args.test_batch_size, num_test)]
yield data.tst.x[batch_inds]
ds_train = gen_train_data()
ds_test = gen_val_data()
meta = {
"dims": data.n_dims,
"num_batches": num_batches,
"num_test_batches": num_test_batches
}
return ds_train, ds_test, meta
def run():
"""
Run the experiment.
"""
# init the model first so that jax gets enough GPU memory before TFDS
forward, model = init_model(43) # how do you sleep at night
grad_fn = jax.grad(lambda *args: loss_fn(forward, *args))
ds_train, ds_test_eval, meta = init_data()
num_batches = meta["num_batches"]
num_test_batches = meta["num_test_batches"]
lr_schedule = optimizers.piecewise_constant(boundaries=[9000, 12750], # 300 epochs, 425 epochs
values=[1e-3, 1e-4, 1e-5])
opt_init, opt_update, get_params = optimizers.adam(step_size=lr_schedule)
unravel_opt = ravel_pytree(opt_init(model["params"]))[1]
if os.path.exists(parse_args.ckpt_path):
outfile = open(parse_args.ckpt_path, 'rb')
state_dict = pickle.load(outfile)
outfile.close()
opt_state = unravel_opt(state_dict["opt_state"])
load_itr = state_dict["itr"]
else:
init_params = model["params"]
opt_state = opt_init(init_params)
load_itr = 0
@jax.jit
def update(_itr, _opt_state, _key, _batch):
"""
Update the params based on grad for current batch.
"""
return opt_update(_itr, grad_fn(get_params(_opt_state), _batch, _key), _opt_state)
@jax.jit
def sep_losses(_opt_state, _batch, _key):
"""
Convenience function for calculating losses separately.
"""
z, delta_logp, r2_regs, fro_regs, kin_regs = model["forward_all"](_key, get_params(_opt_state), _batch)
loss_ = _loss_fn(z, delta_logp)
r2_reg_ = _reg_loss_fn(r2_regs)
fro_reg_ = _reg_loss_fn(fro_regs)
kin_reg_ = _reg_loss_fn(kin_regs)
total_loss_ = loss_ + lam * r2_reg_ + lam_fro * fro_reg_ + lam_kin * kin_reg_
return total_loss_, loss_, r2_reg_, fro_reg_, kin_reg_
def evaluate_loss(opt_state, _key, ds_eval):
"""
Convenience function for evaluating loss over train set in smaller batches.
"""
sep_loss_aug_, sep_loss_, sep_loss_r2_reg_, sep_loss_fro_reg_, sep_loss_kin_reg_, nfe, bs = \
[], [], [], [], [], [], []
for test_batch_num in range(num_test_batches):
_key, = jax.random.split(_key, num=1)
test_batch = next(ds_eval)
test_batch_loss_aug_, test_batch_loss_, \
test_batch_loss_r2_reg_, test_batch_loss_fro_reg_, test_batch_loss_kin_reg_ = \
sep_losses(opt_state, test_batch, _key)
if count_nfe:
nfe.append(model["nfe"](_key, get_params(opt_state), test_batch))
else:
nfe.append(0)
sep_loss_aug_.append(test_batch_loss_aug_)
sep_loss_.append(test_batch_loss_)
sep_loss_r2_reg_.append(test_batch_loss_r2_reg_)
sep_loss_fro_reg_.append(test_batch_loss_fro_reg_)
sep_loss_kin_reg_.append(test_batch_loss_kin_reg_)
bs.append(len(test_batch))
sep_loss_aug_ = jnp.array(sep_loss_aug_)
sep_loss_ = jnp.array(sep_loss_)
sep_loss_r2_reg_ = jnp.array(sep_loss_r2_reg_)
sep_loss_fro_reg_ = jnp.array(sep_loss_fro_reg_)
sep_loss_kin_reg_ = jnp.array(sep_loss_kin_reg_)
nfe = jnp.array(nfe)
bs = jnp.array(bs)
return jnp.average(sep_loss_aug_, weights=bs), \
jnp.average(sep_loss_, weights=bs), \
jnp.average(sep_loss_r2_reg_, weights=bs), \
jnp.average(sep_loss_fro_reg_, weights=bs), \
jnp.average(sep_loss_kin_reg_, weights=bs), \
jnp.average(nfe, weights=bs)
itr = 0
info = collections.defaultdict(dict)
key = rng
for epoch in range(parse_args.nepochs):
for i in range(num_batches):
key, = jax.random.split(key, num=1)
batch = next(ds_train)
itr += 1
if itr <= load_itr:
continue
update_start = time.time()
opt_state = update(itr, opt_state, key, batch)
tree_flatten(opt_state)[0][0].block_until_ready()
update_end = time.time()
time_str = "%d %.18f %d\n" % (itr, update_end - update_start, load_itr)
outfile = open("%s/reg_%s_%s_lam_%.18e_lam_fro_%.18e_lam_kin_%.18e_time.txt"
% (dirname, reg, reg_type, lam, lam_fro, lam_kin), "a")
outfile.write(time_str)
outfile.close()
if itr % parse_args.test_freq == 0:
loss_aug_, loss_, loss_r2_reg_, loss_fro_reg_, loss_kin_reg_, nfe_ = \
evaluate_loss(opt_state, key, ds_test_eval)
print_str = 'Iter {:04d} | Total (Regularized) Loss {:.6f} | Loss {:.6f} | ' \
'r {:.6f} | fro {:.6f} | kin {:.6f} | ' \
'NFE {:.6f}'.format(itr, loss_aug_, loss_, loss_r2_reg_, loss_fro_reg_, loss_kin_reg_, nfe_)
print(print_str)
outfile = open("%s/reg_%s_%s_lam_%.18e_lam_fro_%.18e_lam_kin_%.18e_info.txt"
% (dirname, reg, reg_type, lam, lam_fro, lam_kin), "a")
outfile.write(print_str + "\n")
outfile.close()
info[itr]["loss_aug"] = loss_aug_
info[itr]["loss"] = loss_
info[itr]["loss_r2_reg"] = loss_r2_reg_
info[itr]["loss_fro_reg"] = loss_fro_reg_
info[itr]["loss_kin_reg"] = loss_kin_reg_
info[itr]["nfe"] = nfe_
if itr % parse_args.save_freq == 0:
param_filename = "%s/reg_%s_%s_lam_%.18e_lam_fro_%.18e_lam_kin_%.18e_%d_fargs.pickle" \
% (dirname, reg, reg_type, lam, lam_fro, lam_kin, itr)
fargs = get_params(opt_state)
outfile = open(param_filename, "wb")
pickle.dump(fargs, outfile)
outfile.close()
if itr % parse_args.ckpt_freq == 0:
state_dict = {
"opt_state": ravel_pytree(opt_state)[0],
"itr": itr,
}
# only save ckpts if a directory has been made for them (allow easy switching between v1 and v2)
try:
outfile = open(parse_args.ckpt_path, 'wb')
pickle.dump(state_dict, outfile)
outfile.close()
except IOError:
print("Unable to save ck.pt %d" % itr, file=sys.stderr)
meta = {
"info": info,
"args": parse_args
}
outfile = open("%s/reg_%s_%s_lam_%.18e_lam_fro_%.18e_lam_kin_%.18e_%d_meta.pickle"
% (dirname, reg, reg_type, lam, lam_fro, lam_kin, itr), "wb")
pickle.dump(meta, outfile)
outfile.close()
if __name__ == "__main__":
run()
```
#### File: jirufengyu/ode/mnist.py
```python
import argparse
import collections
import os
import pickle
import time
import haiku as hk
import jax
import jax.numpy as jnp
import tensorflow_datasets as tfds
from jax import lax
from jax.config import config
from jax.experimental import optimizers
from jax.experimental.jet import jet
from jax.flatten_util import ravel_pytree
from jax.tree_util import tree_flatten
from lib.ode import odeint, odeint_aux_one, odeint_sepaux, odeint_grid, odeint_grid_sepaux_one, odeint_grid_aux
float64 = False
config.update("jax_enable_x64", float64)
REGS = ["r2", "r3", "r4", "r5", "r6"]
parser = argparse.ArgumentParser('Neural ODE MNIST')
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--nepochs', type=int, default=160)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--lam', type=float, default=0)
parser.add_argument('--lam_w', type=float, default=0)
parser.add_argument('--atol', type=float, default=1.4e-8)
parser.add_argument('--rtol', type=float, default=1.4e-8)
parser.add_argument('--vmap', action="store_true")
parser.add_argument('--reg', type=str, choices=['none'] + REGS, default='none')
parser.add_argument('--test_freq', type=int, default=3000)
parser.add_argument('--save_freq', type=int, default=3000)
parser.add_argument('--dirname', type=str, default='tmp')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--no_count_nfe', action="store_true")
parser.add_argument('--load_ckpt', type=str, default=None)
parser.add_argument('--ckpt_path', type=str, default="./ck.pt")
parser.add_argument('--lam_fro', type=float, default=0)
parser.add_argument('--lam_kin', type=float, default=0)
parser.add_argument('--reg_type', type=str, choices=['our', 'fin'], default='our')
parser.add_argument('--num_steps', type=int, default=2)
parse_args = parser.parse_args()
if not os.path.exists(parse_args.dirname):
os.makedirs(parse_args.dirname)
# set up config
reg = parse_args.reg
lam = parse_args.lam
lam_fro = parse_args.lam_fro
lam_kin = parse_args.lam_kin
reg_type = parse_args.reg_type
lam_w = parse_args.lam_w
seed = parse_args.seed
rng = jax.random.PRNGKey(seed)
dirname = parse_args.dirname
count_nfe = not parse_args.no_count_nfe
vmap = parse_args.vmap
grid = False
if grid:
all_odeint = odeint_grid
odeint_aux1 = odeint_grid_aux # finlay trick w/ 1 augmented state
odeint_aux2 = odeint_grid_sepaux_one # odeint_grid_sepaux_onefinlay trick w/ 2 augmented states
ode_kwargs = {
"step_size": 1 / parse_args.num_steps
}
else:
all_odeint = odeint
odeint_aux1 = odeint_aux_one
odeint_aux2 = odeint_sepaux
ode_kwargs = {
"atol": parse_args.atol,
"rtol": parse_args.rtol
}
# some primitive functions
def sigmoid(z):
"""
Numerically stable sigmoid.
"""
return 1/(1 + jnp.exp(-z))
def softmax_cross_entropy(logits, labels):
"""
Cross-entropy loss applied to softmax.
"""
one_hot = hk.one_hot(labels, logits.shape[-1])
return -jnp.sum(jax.nn.log_softmax(logits) * one_hot, axis=-1)
def sol_recursive(f, z, t):
"""
Recursively compute higher order derivatives of dynamics of ODE.
"""
if reg == "none":
return f(z, t), jnp.zeros_like(z)
z_shape = z.shape
z_t = jnp.concatenate((jnp.ravel(z), jnp.array([t])))
def g(z_t):
"""
Closure to expand z.
"""
z, t = jnp.reshape(z_t[:-1], z_shape), z_t[-1]
dz = jnp.ravel(f(z, t))
dt = jnp.array([1.])
dz_t = jnp.concatenate((dz, dt))
return dz_t
reg_ind = REGS.index(reg)
(y0, [*yns]) = jet(g, (z_t, ), ((jnp.ones_like(z_t), ), ))
for _ in range(reg_ind + 1):
(y0, [*yns]) = jet(g, (z_t, ), ((y0, *yns), ))
return (jnp.reshape(y0[:-1], z_shape), jnp.reshape(yns[-2][:-1], z_shape))
# set up modules
class Flatten(hk.Module):
"""
Flatten all dimensions except batch dimension.
"""
def __init__(self):
super(Flatten, self).__init__()
def __call__(self, x):
return jnp.reshape(x, (x.shape[0], -1))
class ConcatConv2D(hk.Module):
"""
Convolution with extra channel and skip connection for time.
"""
def __init__(self, **kwargs):
super(ConcatConv2D, self).__init__()
self._layer = hk.Conv2D(**kwargs)
def __call__(self, x, t):
tt = jnp.ones_like(x[:, :, :, :1]) * t
ttx = jnp.concatenate([tt, x], axis=-1)
return self._layer(ttx)
def get_epsilon(key, shape):
"""
Sample epsilon from the desired distribution.
"""
# rademacher
if float64:
return jax.random.randint(key, shape, minval=0, maxval=2).astype(jnp.float64) * 2 - 1
else:
return jax.random.randint(key, shape, minval=0, maxval=2).astype(jnp.float32) * 2 - 1
class PreODE(hk.Module):
"""
Module applied before the ODE layer.
"""
def __init__(self):
super(PreODE, self).__init__()
if float64:
self.model = hk.Sequential([
lambda x: x.astype(jnp.float64) / 255.,
Flatten()
])
else:
self.model = hk.Sequential([
lambda x: x.astype(jnp.float32) / 255.,
Flatten()
])
def __call__(self, x):
return self.model(x)
class MLPDynamics(hk.Module):
"""
Dynamics for ODE as an MLP.
"""
def __init__(self, input_shape):
super(MLPDynamics, self).__init__()
self.input_shape = input_shape
self.dim = jnp.prod(input_shape[1:])
self.hidden_dim = 100
self.lin1 = hk.Linear(self.hidden_dim)
self.lin2 = hk.Linear(self.dim)
def __call__(self, x, t):
# vmapping means x will be a single batch element, so need to expand dims at 0
x = jnp.reshape(x, (-1, self.dim))
out = sigmoid(x)
tt = jnp.ones_like(x[:, :1]) * t
t_out = jnp.concatenate([tt, out], axis=-1)
out = self.lin1(t_out)
out = sigmoid(out)
tt = jnp.ones_like(out[:, :1]) * t
t_out = jnp.concatenate([tt, out], axis=-1)
out = self.lin2(t_out)
return out
class PostODE(hk.Module):
"""
Module applied after the ODE layer.
"""
def __init__(self):
super(PostODE, self).__init__()
self.model = hk.Sequential([
sigmoid,
hk.Linear(10)
])
def __call__(self, x):
return self.model(x)
def wrap_module(module, *module_args, **module_kwargs):
"""
Wrap the module in a function to be transformed.
将模块包装在要转换的函数中。
"""
def wrap(*args, **kwargs):
"""
Wrapping of module.
"""
model = module(*module_args, **module_kwargs)
return model(*args, **kwargs)
return wrap
def initialization_data(input_shape, ode_shape):
"""
Data for initializing the modules.
"""
#ode_shape = (1, ) + ode_shape[1:]
ode_dim=784#ode_dim = jnp.prod(ode_shape)
data = {
"pre_ode": jnp.zeros(input_shape),
"ode": (jnp.zeros(ode_dim), 0.),
"post_ode": jnp.zeros(ode_dim)
}
return data
def init_model():
"""
Instantiates transformed submodules of model and their parameters.
"""
ts = jnp.array([0., 1.])
input_shape = (1, 28, 28, 1)
ode_shape = (-1, 28, 28, 1)
ode_shape=jnp.asarray(ode_shape)
ode_dim = 784#ode_dim = jnp.prod(ode_shape[1:])
initialization_data_ = initialization_data(input_shape, ode_shape)
pre_ode = hk.without_apply_rng(hk.transform(wrap_module(PreODE)))
pre_ode_params = pre_ode.init(rng, initialization_data_["pre_ode"])
pre_ode_fn = pre_ode.apply
dynamics = hk.without_apply_rng(hk.transform(wrap_module(MLPDynamics, ode_shape)))
dynamics_params = dynamics.init(rng, *initialization_data_["ode"])
dynamics_wrap = lambda x, t, params: dynamics.apply(params, x, t)
def reg_dynamics(y, t, params):
"""
Dynamics of regularization for ODE integration.
"""
y0, r = sol_recursive(lambda _y, _t: dynamics_wrap(_y, _t, params), y, t)
return y0, jnp.mean(jnp.square(r), axis=[axis_ for axis_ in range(1, r.ndim)])
def fin_dynamics(y, t, eps, params):
"""
Dynamics of finlay reg.
"""
f = lambda y: dynamics_wrap(y, t, params)
dy, eps_dy = jax.jvp(f, (y,), (eps,))
return dy, eps_dy
def aug_dynamics(yr, t, eps, params):
"""
Dynamics augmented with regularization.
"""
y, *_ = yr
if reg_type == "our":
return reg_dynamics(y, t, params)
else:
dy, eps_dy = fin_dynamics(y, t, eps, params)
dfro = jnp.mean(jnp.square(eps_dy), axis=[axis_ for axis_ in range(1, dy.ndim)])
dkin = jnp.mean(jnp.square(dy), axis=[axis_ for axis_ in range(1, dy.ndim)])
return dy, dfro, dkin
def all_aug_dynamics(yr, t, eps, params):
"""
Dynamics augmented with all regularizations for tracking.
"""
y, *_ = yr
dy, eps_dy = fin_dynamics(y, t, eps, params)
_, drdt = reg_dynamics(y, t, params)
dfro = jnp.mean(jnp.square(eps_dy), axis=[axis_ for axis_ in range(1, dy.ndim)])
dkin = jnp.mean(jnp.square(dy), axis=[axis_ for axis_ in range(1, dy.ndim)])
return dy, drdt, dfro, dkin
if reg_type == "our":
_odeint = odeint_aux1
else:
_odeint = odeint_aux2
nodeint_aux = lambda y0, ts, eps, params: \
_odeint(lambda y, t, eps, params: dynamics_wrap(y, t, params),
aug_dynamics, y0, ts, eps, params, **ode_kwargs)[0]
all_nodeint = lambda y0, ts, eps, params: all_odeint(all_aug_dynamics,
y0, ts, eps, params, **ode_kwargs)[0]
def ode(params, out_pre_ode, eps):
"""
Apply the ODE block.
"""
out_ode, *out_ode_rs = nodeint_aux(aug_init(out_pre_ode), ts, eps, params)
return (out_ode[-1], *(out_ode_r[-1] for out_ode_r in out_ode_rs))
def all_ode(params, out_pre_ode, eps):
"""
Apply ODE block for all regularizations.
"""
out_ode, *out_ode_rs = all_nodeint(all_aug_init(out_pre_ode), ts, eps, params)
return (out_ode[-1], *(out_ode_r[-1] for out_ode_r in out_ode_rs))
if count_nfe:
if vmap:
unreg_nodeint = jax.vmap(lambda y0, t, params: all_odeint(dynamics_wrap, y0, t, params, **ode_kwargs)[1],
(0, None, None))
else:
unreg_nodeint = lambda y0, t, params: all_odeint(dynamics_wrap, y0, t, params, **ode_kwargs)[1]
#<EMAIL>
def nfe_fn(params, _images, _labels):
"""
Function to return NFE.
"""
in_ode = pre_ode_fn(params["pre_ode"], _images)
f_nfe = unreg_nodeint(in_ode, ts, params["ode"])
return jnp.mean(f_nfe)
else:
nfe_fn = None
post_ode = hk.without_apply_rng(hk.transform(wrap_module(PostODE)))
post_ode_params = post_ode.init(rng, initialization_data_["post_ode"])
post_ode_fn = post_ode.apply
# return a dictionary of the three components of the model
model = {"model": {
"pre_ode": pre_ode_fn,
"ode": ode,
"post_ode": post_ode_fn,
"all_ode": all_ode
}, "params": {
"pre_ode": pre_ode_params,
"ode": dynamics_params,
"post_ode": post_ode_params
}, "nfe": nfe_fn
}
def forward(key, params, _images):
"""
Forward pass of the model.
"""
model_ = model["model"]
out_pre_ode = model_["pre_ode"](params["pre_ode"], _images)
out_ode, *regs = model_["ode"](params["ode"], out_pre_ode, get_epsilon(key, out_pre_ode.shape))
logits = model_["post_ode"](params["post_ode"], out_ode)
return (logits, *regs)
def forward_all(key, params, _images):
"""
Forward pass of the model.
"""
model_ = model["model"]
out_pre_ode = model_["pre_ode"](params["pre_ode"], _images)
out_ode, *regs = model_["all_ode"](params["ode"], out_pre_ode, get_epsilon(key, out_pre_ode.shape))
logits = model_["post_ode"](params["post_ode"], out_ode)
return (logits, *regs)
model["model"]["forward_all"] = forward_all
model["model"]["forward"] = forward
return forward, model
def aug_init(y, batch_size=-1):
"""
Flatten the dynamics and append regularization dynamics.
We need to flatten the dynamics first since they may be convolutional
(has width, height, and channels).
"""
if batch_size == -1:
batch_size = y.shape[0]
if reg_type == "our":
return y, jnp.zeros(batch_size)
else:
return y, jnp.zeros(batch_size), jnp.zeros(batch_size)
def all_aug_init(y, batch_size=-1):
"""
Flatten the dynamics and append regularization dynamics.
We need to flatten the dynamics first since they may be convolutional
(has width, height, and channels).
"""
if batch_size == -1:
batch_size = y.shape[0]
return y, jnp.zeros(batch_size), jnp.zeros(batch_size), jnp.zeros(batch_size)
def _acc_fn(logits, labels):
"""
Classification accuracy of the model.
"""
predicted_class = jnp.argmax(logits, axis=1)
return jnp.mean(predicted_class == labels)
def _loss_fn(logits, labels):
return jnp.mean(softmax_cross_entropy(logits, labels))
def _reg_loss_fn(reg):
return jnp.mean(reg)
def _weight_fn(params):
flat_params, _ = ravel_pytree(params)
return 0.5 * jnp.sum(jnp.square(flat_params))
def loss_fn(forward, params, images, labels, key):
"""
The loss function for training.
"""
if reg_type == "our":
logits, regs = forward(key, params, images)
loss_ = _loss_fn(logits, labels)
reg_ = _reg_loss_fn(regs)
weight_ = _weight_fn(params)
return loss_ + lam * reg_ + lam_w * weight_
else:
logits, fro_regs, kin_regs = forward(key, params, images)
loss_ = _loss_fn(logits, labels)
fro_reg_ = _reg_loss_fn(fro_regs)
kin_reg_ = _reg_loss_fn(kin_regs)
weight_ = _weight_fn(params)
return loss_ + lam_fro * fro_reg_ + lam_kin * kin_reg_ + lam_w * weight_
def init_data():
"""
Initialize data.
"""
(ds_train,), ds_info = tfds.load('mnist',
split=['train'],
shuffle_files=True,
as_supervised=True,
with_info=True,
read_config=tfds.ReadConfig(shuffle_seed=parse_args.seed))
print(ds_train)
print(type(ds_train))
num_train = ds_info.splits['train'].num_examples
assert num_train % parse_args.batch_size == 0
num_batches = num_train // parse_args.batch_size
test_batch_size = parse_args.test_batch_size
assert num_train % test_batch_size == 0
num_test_batches = num_train // test_batch_size
# make sure we always save the model on the last iteration
assert num_batches * parse_args.nepochs % parse_args.save_freq == 0
ds_train = ds_train.cache()
ds_train = ds_train.repeat()
ds_train = ds_train.shuffle(1000, seed=seed)
ds_train, ds_train_eval = ds_train.batch(parse_args.batch_size), ds_train.batch(test_batch_size)
ds_train, ds_train_eval = tfds.as_numpy(ds_train), tfds.as_numpy(ds_train_eval)
meta = {
"num_batches": num_batches,
"num_test_batches": num_test_batches
}
return ds_train, ds_train_eval, meta
def run():
"""
Run the experiment.
"""
ds_train, ds_train_eval, meta = init_data()
num_batches = meta["num_batches"]
num_test_batches = meta["num_test_batches"]
forward, model = init_model()
forward_all = model["model"]["forward_all"]
grad_fn = jax.grad(lambda *args: loss_fn(forward, *args))
def lr_schedule(train_itr):
"""
The learning rate schedule.
"""
_epoch = train_itr // num_batches
id = lambda x: x
return lax.cond(_epoch < 60, 1e-1, id, 0,
lambda _: lax.cond(_epoch < 100, 1e-2, id, 0,
lambda _: lax.cond(_epoch < 140, 1e-3, id, 1e-4, id)))
opt_init, opt_update, get_params = optimizers.momentum(step_size=lr_schedule, mass=0.9)
if parse_args.load_ckpt:
file_ = open(parse_args.load_ckpt, 'rb')
init_params = pickle.load(file_)
file_.close()
# parse itr from the checkpoint
load_itr = int(os.path.basename(parse_args.load_ckpt).split("_")[-2])
else:
init_params = model["params"]
load_itr = 0
opt_state = opt_init(init_params)
#@jax.jit
def update(_itr, _opt_state, _key, _batch):
"""
Update the params based on grad for current batch.
"""
images, labels = _batch
return opt_update(_itr, grad_fn(get_params(_opt_state), images, labels, _key), _opt_state)
# @jax.jit
def sep_losses(_opt_state, _batch, key):
"""
Convenience function for calculating losses separately.
"""
params = get_params(_opt_state)
images, labels = _batch
logits, r2_regs, fro_regs, kin_regs = forward_all(key, params, images)
loss_ = _loss_fn(logits, labels)
r2_reg_ = _reg_loss_fn(r2_regs)
fro_reg_ = _reg_loss_fn(fro_regs)
kin_reg_ = _reg_loss_fn(kin_regs)
total_loss_ = loss_ + lam * r2_reg_ + lam_fro * fro_reg_ + lam_kin * kin_reg_
acc_ = _acc_fn(logits, labels)
return acc_, total_loss_, loss_, r2_reg_, fro_reg_, kin_reg_
def evaluate_loss(opt_state, _key, ds_train_eval):
"""
Convenience function for evaluating loss over train set in smaller batches.
"""
sep_acc_, sep_loss_aug_, sep_loss_, \
sep_loss_r2_reg_, sep_loss_fro_reg_, sep_loss_kin_reg_, nfe = [], [], [], [], [], [], []
for test_batch_num in range(num_test_batches):
test_batch = next(ds_train_eval)
_key, = jax.random.split(_key, num=1)
test_batch_acc_, test_batch_loss_aug_, test_batch_loss_, \
test_batch_loss_r2_reg_, test_batch_loss_fro_reg_, test_batch_loss_kin_reg_ = \
sep_losses(opt_state, test_batch, _key)
if count_nfe:
nfe.append(model["nfe"](get_params(opt_state), *test_batch))
else:
nfe.append(0)
sep_acc_.append(test_batch_acc_)
sep_loss_aug_.append(test_batch_loss_aug_)
sep_loss_.append(test_batch_loss_)
sep_loss_r2_reg_.append(test_batch_loss_r2_reg_)
sep_loss_fro_reg_.append(test_batch_loss_fro_reg_)
sep_loss_kin_reg_.append(test_batch_loss_kin_reg_)
sep_acc_ = jnp.array(sep_acc_)
sep_loss_aug_ = jnp.array(sep_loss_aug_)
sep_loss_ = jnp.array(sep_loss_)
sep_loss_r2_reg_ = jnp.array(sep_loss_r2_reg_)
sep_loss_fro_reg_ = jnp.array(sep_loss_fro_reg_)
sep_loss_kin_reg_ = jnp.array(sep_loss_kin_reg_)
nfe = jnp.array(nfe)
return jnp.mean(sep_acc_), jnp.mean(sep_loss_aug_), jnp.mean(sep_loss_), \
jnp.mean(sep_loss_r2_reg_), jnp.mean(sep_loss_fro_reg_), jnp.mean(sep_loss_kin_reg_), jnp.mean(nfe)
itr = 0
info = collections.defaultdict(dict)
key = rng
#创建迭代器
iterator=iter(ds_train)
for epoch in range(parse_args.nepochs):
for i in range(num_batches):
batch = next(iterator)
key, = jax.random.split(key, num=1)
itr += 1
if parse_args.load_ckpt:
if itr <= load_itr:
continue
update_start = time.time()
opt_state = update(itr, opt_state, key, batch)
tree_flatten(opt_state)[0][0].block_until_ready()
update_end = time.time()
time_str = "%d %.18f %d\n" % (itr, update_end - update_start, load_itr)
outfile = open("%s/reg_%s_%s_lam_%.18e_lam_fro_%.18e_lam_kin_%.18e_time.txt"
% (dirname, reg, reg_type, lam, lam_fro, lam_kin), "a")
outfile.write(time_str)
outfile.close()
if itr % parse_args.test_freq == 0:
acc_, loss_aug_, loss_, \
loss_r2_reg_, loss_fro_reg_, loss_kin_reg_, nfe_ = evaluate_loss(opt_state, key, ds_train_eval)
print_str = 'Iter {:04d} | Total (Regularized) Loss {:.6f} | Loss {:.6f} | ' \
'r {:.6f} | fro {:.6f} | kin {:.6f} | ' \
'NFE {:.6f}'.format(itr, loss_aug_, loss_, loss_r2_reg_, loss_fro_reg_, loss_kin_reg_, nfe_)
print(print_str)
outfile = open("%s/reg_%s_%s_lam_%.18e_lam_fro_%.18e_lam_kin_%.18e_info.txt"
% (dirname, reg, reg_type, lam, lam_fro, lam_kin), "a")
outfile.write(print_str + "\n")
outfile.close()
info[itr]["acc"] = acc_
info[itr]["loss_aug"] = loss_aug_
info[itr]["loss"] = loss_
info[itr]["loss_r2_reg"] = loss_r2_reg_
info[itr]["loss_fro_reg"] = loss_fro_reg_
info[itr]["loss_kin_reg"] = loss_kin_reg_
info[itr]["nfe"] = nfe_
if itr % parse_args.save_freq == 0:
param_filename = "%s/reg_%s_%s_lam_%.18e_lam_fro_%.18e_lam_kin_%.18e_%d_fargs.pickle" \
% (dirname, reg, reg_type, lam, lam_fro, lam_kin, itr)
fargs = get_params(opt_state)
outfile = open(param_filename, "wb")
pickle.dump(fargs, outfile)
outfile.close()
meta = {
"info": info,
"args": parse_args
}
outfile = open("%s/reg_%s_%s_lam_%.18e_lam_fro_%.18e_lam_kin_%.18e_%d_meta.pickle"
% (dirname, reg, reg_type, lam, lam_fro, lam_kin, itr), "wb")
pickle.dump(meta, outfile)
outfile.close()
if __name__ == "__main__":
run()
``` |
{
"source": "jiruifu-jerry0219/DRLND_Jerry",
"score": 2
} |
#### File: DRLND_Jerry/CrossEntropy/CEM.py
```python
import gym
import math
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
env = gym.make('MountainCarContinuous-v0')
# env = gym.make('CartPole-v1')
env.seed(101)
np.random.seed(101)
print('observation space:', env.observation_space)
print('action space:', env.action_space)
print(' - low:', env.action_space.low)
print(' - high:', env.action_space.high)
class Agent(nn.Module):
def __init__(self, env, h_size=16):
super(Agent, self).__init__()
self.env = env
# state, hidden layer, action sizes
self.s_size = env.observation_space.shape[0]
self.h_size = h_size
self.a_size = env.action_space.shape[0]
# define layers
self.fc1 = nn.Linear(self.s_size, self.h_size)
self.fc2 = nn.Linear(self.h_size, self.a_size)
def set_weights(self, weights):
s_size = self.s_size
h_size = self.h_size
a_size = self.a_size
# separate the weights for each layer
fc1_end = (s_size*h_size)+h_size
fc1_W = torch.from_numpy(weights[:s_size*h_size].reshape(s_size, h_size))
fc1_b = torch.from_numpy(weights[s_size*h_size:fc1_end])
fc2_W = torch.from_numpy(weights[fc1_end:fc1_end+(h_size*a_size)].reshape(h_size, a_size))
fc2_b = torch.from_numpy(weights[fc1_end+(h_size*a_size):])
# set the weights for each layer
self.fc1.weight.data.copy_(fc1_W.view_as(self.fc1.weight.data))
self.fc1.bias.data.copy_(fc1_b.view_as(self.fc1.bias.data))
self.fc2.weight.data.copy_(fc2_W.view_as(self.fc2.weight.data))
self.fc2.bias.data.copy_(fc2_b.view_as(self.fc2.bias.data))
def get_weights_dim(self):
return (self.s_size+1)*self.h_size + (self.h_size+1)*self.a_size
def forward(self, x):
x = F.relu(self.fc1(x))
x = torch.tanh(self.fc2(x))
action = x.cpu().data
return action
def evaluate(self, weights, gamma=1.0, max_t=5000):
self.set_weights(weights)
episode_return = 0.0
state = self.env.reset()
for t in range(max_t):
state = torch.from_numpy(state).float().to(device)
action = self.forward(state)
state, reward, done, _ = self.env.step(action)
episode_return += reward * math.pow(gamma, t)
if done:
break
return episode_return
agent = Agent(env).to(device)
def cem(n_iterations=500, max_t=1000, gamma=1.0, print_every=10, pop_size=50, elite_frac=0.2, sigma=0.5):
"""PyTorch implementation of the cross-entropy method.
Params
======
n_iterations (int): maximum number of training iterations
max_t (int): maximum number of timesteps per episode
gamma (float): discount rate
print_every (int): how often to print average score (over last 100 episodes)
pop_size (int): size of population at each iteration
elite_frac (float): percentage of top performers to use in update
sigma (float): standard deviation of additive noise
"""
n_elite=int(pop_size*elite_frac)
scores_deque = deque(maxlen=100)
scores = []
best_weight = sigma*np.random.randn(agent.get_weights_dim())
for i_iteration in range(1, n_iterations+1):
weights_pop = [best_weight + (sigma*np.random.randn(agent.get_weights_dim())) for i in range(pop_size)]
rewards = np.array([agent.evaluate(weights, gamma, max_t) for weights in weights_pop])
elite_idxs = rewards.argsort()[-n_elite:]
elite_weights = [weights_pop[i] for i in elite_idxs]
best_weight = np.array(elite_weights).mean(axis=0)
reward = agent.evaluate(best_weight, gamma=1.0)
scores_deque.append(reward)
scores.append(reward)
torch.save(agent.state_dict(), 'checkpoint.pth')
if i_iteration % print_every == 0:
print('Episode {}\tAverage Score: {:.2f}'.format(i_iteration, np.mean(scores_deque)))
if np.mean(scores_deque)>=90.0:
print('\nEnvironment solved in {:d} iterations!\tAverage Score: {:.2f}'.format(i_iteration-100, np.mean(scores_deque)))
break
return scores
scores = cem()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
fig.savefig('result.png')
#shut down computer after training
# import os
# os.system("shutdown /s /t 1")
``` |
{
"source": "jiruifu-jerry0219/EMG_Deep_Features",
"score": 3
} |
#### File: EMG_Deep_Features/model/cnn.py
```python
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
from torch.autograd import Variable
class CNNModel(nn.Module):
def __init__(self):
super(CNNModel, self).__init__()
# Convolution 1
self.cnn1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=1, padding=1)
self.relu1 = nn.ReLU()
# Max pool 1
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
# Convolution 2
self.cnn2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0)
self.relu2 = nn.ReLU()
# Max pool 2
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
# Fully connected 1 (readout)
self.fc1 = nn.Linear(32 * 4 * 4, 10)
def forward(self, x):
# Convolution 1
out = self.cnn1(x)
out = self.relu1(out)
# Max pool 1
out = self.maxpool1(out)
# Convolution 2
out = self.cnn2(out)
out = self.relu2(out)
# Max pool 2
out = self.maxpool2(out)
# Resize
# Original size: (100, 32, 7, 7)
# out.size(0): 100
# New out size: (100, 32*7*7)
out = out.view(out.size(0), -1)
# Linear function (readout)
out = self.fc1(out)
return out
```
#### File: EMG_Deep_Features/utils/data_loader.py
```python
import torch
import numpy as np
from os.path import dirname, join as pjoin
import scipy.io as sio
import os
from collections import defaultdict
# class MyDataset(Dataset):
# """custom dataset for .mat images"""
#
# def __init__(self, list_of_urls):
# self.list_of_urls = list_of_urls
#
# def __len__(self):
# return len(self.list_of_urls)
#
# def __getitem__(self, index):
# image_url = self.list_of_urls[index]
# image = scipy.io.loadmat(image_url)
# label = ...
# ...
class EmgMatDataset(Dataset):
"""Create custom dataset from .mat files"""
def __init__(self, path_of_root):
path_check = os.listdir(path_of_root)
assert len(path_check) != 0, "The path for data loader must not empty! Please check your path"
self.root_path = path_of_root
def label(self, class):
self.label_list = class
def matloader(self, variable_name):
assert type(variable_name) == str, 'The name of MATLAB matrix must be a string'
assert type(self.label_list) == list, 'The label must be wrapped as list'
assert len(self.label_list) != 0, "The list for labels must be provided!"
#create an empty dictionay with labels as the key
pool = {key: [] for key in self.label_list}
root = sorted(os.listdir(self.root_path))
for i in range(len(root)):
train_path = path1 + root[i]
for files in sorted(os.listdir(train_path)):
EMG = sio.loadmat(train_path + '/' + files)
data = EMG[variable_name]
pool[list(pool)[i]].append(data)
return pool
``` |
{
"source": "jiruifu-jerry0219/EMG_Process_Toolkit",
"score": 3
} |
#### File: signal_processing/Filters/butterworth_filter.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import math
# def butter_highpass_filter(f_s, f_pass, f_stop, fs = 0.5, td = 1, g_pass = 1, g_stop = 50, wc = None):
# """
# Return explanation:
# N: number of orders
# b: numerator of the Filter
# a: denominator of the filter
# """
# wp = f_pass / (f_s / 2)
# ws = f_stop / (f_s / 2)
# omega_p = (2 / td) * np.tan(wp / 2)
# omega_s = (2 / td) * np.tan(ws / 2)
# # Find the order and natural frequency of the highpass filter
# N, wn = signal.buttord(omega_p, omega_s, g_pass, g_stop, analog = True)
# # Find the Gain of the highpass filter
# if wc:
# b, a = signal.butter(N, wc, btype = 'high', analog = True)
# wn = wc
# else:
# b, a = signal.butter(N, wn, btype = 'high', analog = True)
# return N, b, a, wn
def butter_highpass_filter(fs, fc, order = 5):
"""
Function explanation:
fs: sampling rate
fc: cutoff frequency
order: Number of orders
Return: denominator and numerator of the filter's transfer function
"""
nyq = 0.5 * fs
normal_fc = fc / nyq
b, a = signal.butter(order, normal_fc, btype = 'high', analog = False)
return b, a
def butter_bandpass_filter(fs, lowcut, highcut, order = 5):
"""
Function explanation:
fs: sampling rate
lowcut: lowcut Frequency
highcut:highcut Frequency
order: Number of orders
Return: denominator and numerator of the filter's transfer function
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype = 'band', analog = False)
return b, a
```
#### File: signal_processing/Filters/highpass_filter.py
```python
from butterworth_filter import butter_highpass_filter as bhpf
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import math
def hpf(data, fs, fc, order):
"""
Apply the Butterworth high pass Filter
data: raw signal for processing
fs: sampling rate
fc: cutoff Frequency
order: order of Filter
"""
b, a = bhpf(fs, fc, order)
y = signal.filtfilt(b, a, data)
return y
``` |
{
"source": "jiruifu-jerry0219/MQTT-Communication",
"score": 3
} |
#### File: mqtt/IMAGE_MQTT/ImageRec.py
```python
import paho.mqtt.client as mqtt
import time
import os
# The callback for when the client receives a CONNACK response from the server.
"""You can use this to transmit images between devices"""
class Subscriber:
def __init__(self, host = '192.168.0.179', topic = 'camera'):
self.hostname = host
self.channel = topic
self.save_path = './image/'
self.num = 0
self.msg = None
def receive(self):
def on_connect(client, userdata, flags, rc):
client.subscribe(self.channel)
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# The callback for when a PUBLISH message is received from the server.
def on_message(client, obj, msg):
print("<-------Received Image"+str(self.num)+"------->")
self.num +=1
name = 'fname' + str(self.num)
with open(os.path(self.save_path + name+'.jpg', 'wb') as fd:
fd.write(msg.payload)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(self.hostname, 1883, 60)
client.loop_forever()
try:
receiver = Subscriber()
msg = receiver.receive()
except KeyboardInterrupt:
print("Receive ended")
```
#### File: mqtt/mqtt_test/agent_publish.py
```python
import paho.mqtt.client as mqtt
import random
##publisher part
def Publish(msg, broker_sever, topic, client_name):
MQTT_Server = broker_sever
MQTT_PATH = topic
client = mqtt.Client(client_name)
client.connect(MQTT_Server, 1883, 60)
client.loop_start()
client.publish(topic=MQTT_PATH, payload = msg, qos = 1)
##generate random message to publish
def msg_generator():
msg_topic_set = ["distance", "angle", "speed"]
msg_content = random.randint(10,50)
msg_header = random.choice(msg_topic_set)
msg = msg_header + str(msg_content)
return msg
while True:
##by testing the publishing function, randomly generate a type of message
##publish the message to the broker
receive_id = random.randint(1,3)
``` |
{
"source": "jiruifu-jerry0219/UpperLimbEstimator",
"score": 2
} |
#### File: UpperLimbEstimator/ArtificialNeuralNetwork/agent.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as Data
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
class Agent:
def __init__(self):
pass
```
#### File: UpperLimbEstimator/data_process/getFeaturesTD.py
```python
import numpy as np # to handle datas
import math # to handle mathematical stuff (example power of 2)
from scipy.signal import butter, lfilter, welch, square # for signal filtering
def getfeaturesTD(emg, windows, step):
pool = []
col = emg.shape
for i in range(col[-1]):
s = emg[:, i]
assert len(s) != 0, "The length of input vector is zero!"
# print('The length of current vector is:', len(s))
MAV = (1 / len(s)) * np.sum([abs(x) for x in s])
SSI = np.sum([x ** 2 for x in s])
VAR = (1 / (len(s) - 1)) * np.sum([x ** 2 for x in s])
RMS = np.sqrt((1 / len(s)) * np.sum([x ** 2 for x in s]))
LOG = math.exp((1 / len(s)) * sum([abs(x) for x in s]))
cln = np.vstack((MAV, SSI, VAR, RMS, LOG))
pool.append(cln)
featureSet = np.vstack(pool)
# print('The shape of feature set in this iteration is', featureSet.T.shape)
return featureSet.T
``` |
{
"source": "jiru/kakaodecrypt",
"score": 3
} |
#### File: jiru/kakaodecrypt/guess_user_id.py
```python
import sys
import argparse
import sqlite3
import json
from collections import Counter
class KakaoDbGuessUserId:
@staticmethod
def run(db_file):
con = sqlite3.connect(db_file)
cur = con.cursor()
cur.execute('SELECT id, members FROM chat_rooms')
chat_members = { row[0]: [] if row[1] is None else json.loads(row[1]) for row in cur.fetchall()}
found = []
for chat_id in chat_members:
if len(chat_members[chat_id]) > 0:
exclude = ','.join(list(map(str, chat_members[chat_id])))
cur.execute('SELECT DISTINCT user_id FROM chat_logs WHERE chat_id = %d AND user_id NOT IN (%s)' % (chat_id, exclude))
for row in cur.fetchall():
found.append(row[0])
total = len(found)
if total > 0:
print('Possible value(s) for user_id:')
found = Counter(found)
for user_id in found:
prob = found[user_id]*100/total
print(' %20d (prob %5.2f%%)' % (user_id, prob))
else:
print('Unable to find user_id.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Guess the account's user_id value by analysing chat logs and chatrooms membership.")
parser.add_argument('db_file', metavar='KakaoTalk.db')
args = parser.parse_args()
KakaoDbGuessUserId.run(args.db_file)
``` |
{
"source": "jirvingphd/starskope",
"score": 3
} |
#### File: jirvingphd/starskope/functions_JMI.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib as mpl
from IPython.display import display
# import additional libraries for keras
import keras
from keras.utils.np_utils import to_categorical
# from keras.preprocessing.text import Tokenizer
from keras import models, layers, optimizers
from keras.models import Sequential, Model
from keras.layers import Conv1D, MaxPool1D, Dense, Dropout, Flatten, \
BatchNormalization, Input, concatenate, Activation
from keras.optimizers import Adam
def star_signals(signal, label_col=None, classes=None,
class_names=None, figsize=(15,5), y_units=None, x_units=None):
"""
Plots a scatter plot and line plot of time series signal values.
**ARGS
signal: pandas series or numpy array
label_col: name of the label column if using labeled pandas series
-use default None for numpy array or unlabeled series.
-this is simply for customizing plot Title to include classification
classes: (optional- req labeled data) tuple if binary, array if multiclass
class_names: tuple or array of strings denoting what the classes mean
figsize: size of the figures (default = (15,5))
******
Ex1: Labeled timeseries passing 1st row of pandas dataframe
> first create the signal:
star_signal_alpha = train.iloc[0, :]
> then plot:
star_signals(star_signal_alpha, label_col='LABEL',classes=[1,2],
class_names=['No Planet', 'Planet']), figsize=(15,5))
Ex2: numpy array without any labels
> first create the signal:
>then plot:
star_signals(signal, figsize=(15,5))
"""
# pass None to label_col if unlabeled data, creates generic title
if label_col is None:
label = None
title_scatter = "Scatterplot of Star Flux Signals"
title_line = "Line Plot of Star Flux Signals"
color='black'
# store target column as variable
elif label_col is not None:
label = signal[label_col]
# for labeled timeseries
if label == 1:
cls = classes[0]
cn = class_names[0]
color='red'
elif label == 2:
cls = classes[1]
cn = class_names[1]
color='blue'
#create appropriate title acc to class_names
title_scatter = f"Scatterplot for Star Flux Signal: {cn}"
title_line = f"Line Plot for Star Flux Signal: {cn}"
# Set x and y axis labels according to units
# if the units are unknown, we will default to "Flux"
if y_units == None:
y_units = 'Flux'
else:
y_units = y_units
# it is assumed this is a timeseries, default to "time"
if x_units == None:
x_units = 'Time'
else:
x_units = x_units
# Scatter Plot
plt.figure(figsize=figsize)
plt.scatter(pd.Series([i for i in range(1, len(signal))]),
signal[1:], marker=4, color=color, alpha=0.7)
plt.ylabel(y_units)
plt.xlabel(x_units)
plt.title(title_scatter)
plt.show();
# Line Plot
plt.figure(figsize=figsize)
plt.plot(pd.Series([i for i in range(1, len(signal))]),
signal[1:], color=color, alpha=0.7)
plt.ylabel(y_units)
plt.xlabel(x_units)
plt.title(title_line)
plt.show();
# Using Numpy instead of Pandas to create the 1-dimensional arrays
def numpy_train_test_split(data_folder, train_set, test_set):
"""
create target classes for training and test data using numpy
"""
import numpy as np
train = np.loadtxt(data_folder+train_set, skiprows=1, delimiter=',')
x_train = train[:, 1:]
y_train = train[:, 0, np.newaxis] - 1.
test = np.loadtxt(data_folder+test_set, skiprows=1, delimiter=',')
x_test = test[:, 1:]
y_test = test[:, 0, np.newaxis] - 1.
train,test
return x_train, y_train, x_test, y_test
def zero_scaler(x_train, x_test):
"""
Scales each observation of an array to zero mean and unit variance.
Takes array for train and test data separately.
"""
import numpy as np
x_train = ((x_train - np.mean(x_train, axis=1).reshape(-1,1)) /
np.std(x_train, axis=1).reshape(-1,1))
x_test = ((x_test - np.mean(x_test, axis=1).reshape(-1,1)) /
np.std(x_test, axis=1).reshape(-1,1))
return x_train, x_test
def time_filter(x_train, x_test, step_size=None, axis=2):
"""
Adds an input corresponding to the running average over a set number
of time steps. This helps the neural network to ignore high frequency
noise by passing in a uniform 1-D filter and stacking the arrays.
**ARGS
step_size: integer, # timesteps for 1D filter. defaults to 200
axis: which axis to stack the arrays
"""
import numpy as np
from scipy.ndimage.filters import uniform_filter1d
if step_size is None:
step_size=200
train_filter = uniform_filter1d(x_train, axis=1, size=step_size)
test_filter = uniform_filter1d(x_test, axis=1, size=step_size)
x_train = np.stack([x_train, train_filter], axis=2)
x_test = np.stack([x_test, test_filter], axis=2)
# x_train = np.stack([x_train, uniform_filter1d(x_train, axis=1,
# size=time_steps)], axis=2)
# x_test = np.stack([x_test, uniform_filter1d(x_test, axis=1,
# size=time_steps)], axis=2)
return x_train, x_test
def batch_maker(x_train, y_train, batch_size=32):
"""
Gives equal number of positive and negative samples rotating randomly
generator: A generator or an instance of `keras.utils.Sequence`
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single
batch. Therefore, all arrays in this tuple must have the same
length (equal to the size of this batch). Different batches may have
different sizes.
For example, the last batch of the epoch
is commonly smaller than the others, if the size of the dataset
is not divisible by the batch size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
"""
import numpy
import random
half_batch = batch_size // 2
# Returns a new array of given shape and type, without initializing entries.
# x_train.shape = (5087, 3197, 2)
x_batch = np.empty((batch_size, x_train.shape[1], x_train.shape[2]), dtype='float32')
#y_train.shape = (5087, 1)
y_batch = np.empty((batch_size, y_train.shape[1]), dtype='float32')
pos_idx = np.where(y_train[:,0] == 1.)[0]
neg_idx = np.where(y_train[:,0] == 0.)[0]
# rotating each of the samples randomly
while True:
np.random.shuffle(pos_idx)
np.random.shuffle(neg_idx)
x_batch[:half_batch] = x_train[pos_idx[:half_batch]]
x_batch[half_batch:] = x_train[neg_idx[half_batch:batch_size]]
y_batch[:half_batch] = y_train[pos_idx[:half_batch]]
y_batch[half_batch:] = y_train[neg_idx[half_batch:batch_size]]
for i in range(batch_size):
sz = np.random.randint(x_batch.shape[1])
x_batch[i] = np.roll(x_batch[i], sz, axis = 0)
yield x_batch, y_batch
# def scikit_keras(build_fn=None, compiler=None, params=None, batch_size=32):
# """
# Builds, compiles and fits a keras model
# Takes in dictionaries of parameters for both compiler and
# fit_generator.
# *ARGS
# build_fn: build function for creating model, can also pass in a model
# compiler : dict of paramaters for model.compile()
# params : dict of parameters for model.fit_generator
# note: batch
# """
# # set default parameters if not made explicit
# # BUILD vars
# if build_fn:
# model=build_fn
# else:
# model = keras_1D(model=Sequential(), kernel_size=11, activation='relu',
# input_shape=x_train.shape[1:], strides=4)
# # COMPILE vars
# if compiler:
# optimizer=compiler['optimizer']
# learning_rate=compiler['learning_rate']
# loss=compiler['loss']
# metrics=compiler['metrics']
# else:
# optimizer=Adam
# learning_rate=1e-5
# loss='binary_crossentropy'
# metrics=['accuracy']
# ##### COMPILE AND FIT #####
# model.compile(optimizer=optimizer(learning_rate), loss=loss,
# metrics=metrics)
# # HISTORY vars
# # if generator is None:
# # generator = batch_maker(x_train, y_train, batch_size)
# if params:
# validation_data = params['validation_data']
# verbose = params['verbose']
# epochs = params['epochs']
# steps_per_epoch = params['steps_per_epoch']
# else:
# validation_data = (x_test, y_test)
# verbose=0
# epochs=5
# steps_per_epoch=x_train.shape[1]//32
# history = model.fit_generator(batch_maker(x_train, y_train, batch_size),
# validation_data=validation_data,
# verbose=verbose, epochs=epochs,
# steps_per_epoch=steps_per_epoch)
# return model, history
# Build these values into a function for efficiency in next model iterations:
def get_preds(x_test,y_test,model=None,**kwargs):
#y_true = (y_test[:, 0] + 0.5).astype("int") # flatten and make integer
#y_hat = model.predict(x_test)[:,0]
y_true = y_test.flatten()
y_pred = model.predict_classes(x_test).flatten() # class predictions
yhat_val = pd.Series(y_pred).value_counts(normalize=False)
yhat_pct = pd.Series(y_pred).value_counts(normalize=True)*100
print(f"y_hat_vals:\n {yhat_val}")
print("\n")
print(f"y_pred:\n {yhat_pct}")
from sklearn import metrics
from sklearn.metrics import accuracy_score
acc = accuracy_score(y_true, y_pred)
print('\nAccuracy Score:', acc)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred, labels=[0,1])
print("\nConfusion Matrix")
display(cm)
return y_true,y_pred
def plot_keras_history(history,figsize=(10,4),subplot_kws={}):
if hasattr(history,'history'):
history=history.history
figsize=(10,4)
subplot_kws={}
acc_keys = list(filter(lambda x: 'acc' in x,history.keys()))
loss_keys = list(filter(lambda x: 'loss' in x,history.keys()))
fig,axes=plt.subplots(ncols=2,figsize=figsize,**subplot_kws)
axes = axes.flatten()
y_labels= ['Accuracy','Loss']
for a, metric in enumerate([acc_keys,loss_keys]):
for i in range(len(metric)):
ax = pd.Series(history[metric[i]],
name=metric[i]).plot(ax=axes[a],label=metric[i])
[ax.legend() for ax in axes]
[ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True)) for ax in axes]
[ax.set(xlabel='Epochs') for ax in axes]
plt.suptitle('Model Training Results',y=1.01)
plt.tight_layout()
plt.show()
# PLOT Confusion Matrices
def plot_confusion_matrix(cm, classes=None,
normalize=False,
title='Confusion matrix',cmap=plt.cm.Blues):
import itertools
# Check if normalize is set to True
# If so, normalize the raw confusion matrix before visualizing
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
fig, ax = plt.subplots(figsize=(10,10))
#mask = np.zeros_like(cm, dtype=np.bool)
#idx = np.triu_indices_from(mask)
#mask[idx] = True
plt.imshow(cm, cmap=cmap, aspect='equal')
# Add title and axis labels
plt.title('Confusion Matrix')
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Add appropriate axis scales
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
#ax.set_ylim(len(cm), -.5,.5)
# Text formatting
fmt = '.2f' if normalize else 'd'
# Add labels to each cell
thresh = cm.max() / 2.
# iterate thru matrix and append labels
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment='center',
color='darkgray' if cm[i, j] > thresh else 'black',
size=14, weight='bold')
# Add a legend
plt.colorbar()
plt.show()
def roc_plots(y_test, y_hat):
from sklearn import metrics
from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score
y_true = (y_test[:, 0] + 0.5).astype("int")
fpr, tpr, thresholds = roc_curve(y_true, y_hat)
fpr, tpr, thresholds = roc_curve(y_true, y_hat)
# Threshold Cutoff for predictions
crossover_index = np.min(np.where(1.-fpr <= tpr))
crossover_cutoff = thresholds[crossover_index]
crossover_specificity = 1.-fpr[crossover_index]
#print("Crossover at {0:.2f} with specificity {1:.2f}".format(crossover_cutoff, crossover_specificity))
plt.plot(thresholds, 1.-fpr)
plt.plot(thresholds, tpr)
plt.title("Crossover at {0:.2f} with specificity {1:.2f}".format(crossover_cutoff, crossover_specificity))
plt.show()
plt.plot(fpr, tpr)
plt.title("ROC area under curve is {0:.2f}".format(roc_auc_score(y_true, y_hat)))
plt.show()
score = roc_auc_score(y_true,y_hat)
print("ROC_AUC SCORE:",score)
#print("ROC area under curve is {0:.2f}".format(roc_auc_score(y_true, y_hat)))
def evaluate_model(x_test, y_test, history=None):
# make predictons using test set
y_true = (y_test[:, 0] + 0.5).astype("int") # flatten and make integer
y_hat = model.predict(x_test)[:,0]
y_pred = model.predict_classes(x_test).flatten() # class predictions
#Plot Model Training Results (PLOT KERAS HISTORY)
from sklearn import metrics
if y_true.ndim>1:
y_true = y_true.argmax(axis=1)
if y_pred.ndim>1:
y_pred = y_pred.argmax(axis=1)
try:
if history is not None:
plot_keras_history(history)
except:
pass
# Print CLASSIFICATION REPORT
num_dashes=20
print('\n')
print('---'*num_dashes)
print('\tCLASSIFICATION REPORT:')
print('---'*num_dashes)
# try:
# print(metrics.classification_report(y_true,y_pred))
#fig = plot_confusion_matrix((y_true,y_pred))
# except Exception as e:
# print(f"[!] Error during model evaluation:\n\t{e}")
from sklearn import metrics
report = metrics.classification_report(y_true,y_pred)
print(report)
# Adding additional metrics not in sklearn's report
from sklearn.metrics import jaccard_score
jaccard = jaccard_score(y_test, y_hat_test)
print('Jaccard Similarity Score:',jaccard)
# CONFUSION MATRIX
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred, labels=[0,1])
# Plot normalized confusion matrix
fig = plot_confusion_matrix(cm, classes=['No Planet', 'Planet'],
normalize=False,
title='Normalized confusion matrix')
plt.show()
# ROC Area Under Curve
roc_plots(y_test, y_hat_test)
```
#### File: starskope/pyFunc/symmetry_plots.py
```python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
### OUTPUT NOTEBOOK
output_notebook(notebook_type='jupyter')
#Configure output to a standalone HTML file.
### OUTBOOK FILE (HTML)
output_file(filename, title='Bokeh Plot', mode=None, root_dir=None)[source]
property document
A default Document to use for all output operations.
property file
A dict with the default configuration for file output (READ ONLY)
The dictionary value has the following form:
{
'filename' : # filename to use when saving
'resources' : # resources configuration
'title' : # a title for the HTML document
}
from math import pi
import pandas as pd
from bokeh.io import show
from bokeh.models import BasicTicker, ColorBar, LinearColorMapper, PrintfTickFormatter
from bokeh.plotting import figure
from bokeh.sampledata.unemployment1948 import data
data['Year'] = data['Year'].astype(str)
data = data.set_index('Year')
data.drop('Annual', axis=1, inplace=True)
data.columns.name = 'Month'
years = list(data.index)
months = list(data.columns)
# reshape to 1D array or rates with a month and year for each row.
df = pd.DataFrame(data.stack(), columns=['rate']).reset_index()
# this is the colormap from the original NYTimes plot
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.rate.min(), high=df.rate.max())
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
p = figure(title="US Unemployment ({0} - {1})".format(years[0], years[-1]),
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=900, plot_height=400,
tools=TOOLS, toolbar_location='below',
tooltips=[('date', '@Month @Year'), ('rate', '@rate%')])
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = pi / 3
p.rect(x="Year", y="Month", width=1, height=1,
source=df,
fill_color={'field': 'rate', 'transform': mapper},
line_color=None)
color_bar = ColorBar(color_mapper=mapper, major_label_text_font_size="5pt",
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"),
label_standoff=6, border_line_color=None, location=(0, 0))
p.add_layout(color_bar, 'right')
show(p) # show the plot
#### MARKERS
from numpy.random import random
from bokeh.plotting import figure, output_file, show
def mscatter(p, x, y, marker):
p.scatter(x, y, marker=marker, size=15,
line_color="navy", fill_color="orange", alpha=0.5)
def mtext(p, x, y, text):
p.text(x, y, text=[text],
text_color="firebrick", text_align="center", text_font_size="10pt")
p = figure(title="Bokeh Markers", toolbar_location=None)
p.grid.grid_line_color = None
p.background_fill_color = "#eeeeee"
p.axis.visible = False
N = 10
mscatter(p, random(N)+2, random(N)+1, "circle")
mscatter(p, random(N)+4, random(N)+1, "square")
mscatter(p, random(N)+6, random(N)+1, "triangle")
mscatter(p, random(N)+8, random(N)+1, "asterisk")
mscatter(p, random(N)+2, random(N)+4, "circle_x")
mscatter(p, random(N)+4, random(N)+4, "square_x")
mscatter(p, random(N)+6, random(N)+4, "inverted_triangle")
mscatter(p, random(N)+8, random(N)+4, "x")
mscatter(p, random(N)+2, random(N)+7, "circle_cross")
mscatter(p, random(N)+4, random(N)+7, "square_cross")
mscatter(p, random(N)+6, random(N)+7, "diamond")
mscatter(p, random(N)+8, random(N)+7, "cross")
mtext(p, 2.5, 0.5, "circle / o")
mtext(p, 4.5, 0.5, "square")
mtext(p, 6.5, 0.5, "triangle")
mtext(p, 8.5, 0.5, "asterisk / *")
mtext(p, 2.5, 3.5, "circle_x / ox")
mtext(p, 4.5, 3.5, "square_x")
mtext(p, 6.5, 3.5, "inverted_triangle")
mtext(p, 8.5, 3.5, "x")
mtext(p, 2.5, 6.5, "circle_cross / o+")
mtext(p, 4.5, 6.5, "square_cross")
mtext(p, 6.5, 6.5, "diamond")
mtext(p, 8.5, 6.5, "cross / +")
output_file("markers.html", title="markers.py example")
show(p) # open a browser
# ========== HEXTILES
# Bokeh can plot hexagonal tiles, which are often used for showing binned aggregations.
# The hex_tile() method takes a size parameter to define the size of the hex grid,
# and axial coordinates to specify which tiles are present.
import numpy as np
from bokeh.io import output_file, show
from bokeh.plotting import figure
from bokeh.util.hex import axial_to_cartesian
output_file("hex_coords.html")
q = np.array([0, 0, 0, -1, -1, 1, 1])
r = np.array([0, -1, 1, 0, 1, -1, 0])
p = figure(plot_width=400, plot_height=400, toolbar_location=None)
p.grid.visible = False
p.hex_tile(q, r, size=1, fill_color=["firebrick"]*3 + ["navy"]*4,
line_color="white", alpha=0.5)
x, y = axial_to_cartesian(q, r, 1, "pointytop")
p.text(x, y, text=["(%d, %d)" % (q,r) for (q, r) in zip(q, r)],
text_baseline="middle", text_align="center")
show(p)
# ==========
# computes counts per bin using the hexbin() function and plots the colormapped counts:
import numpy as np
from bokeh.io import output_file, show
from bokeh.plotting import figure
from bokeh.transform import linear_cmap
from bokeh.util.hex import hexbin
n = 50000
x = np.random.standard_normal(n)
y = np.random.standard_normal(n)
bins = hexbin(x, y, 0.1)
p = figure(tools="wheel_zoom,reset", match_aspect=True, background_fill_color='#440154')
p.grid.visible = False
p.hex_tile(q="q", r="r", size=0.1, line_color=None, source=bins,
fill_color=linear_cmap('counts', 'Viridis256', 0, max(bins.counts)))
output_file("hex_tile.html")
show(p)
import numpy as np
from bokeh.io import output_file, show
from bokeh.models import HoverTool
from bokeh.plotting import figure
n = 500
x = 2 + 2*np.random.standard_normal(n)
y = 2 + 2*np.random.standard_normal(n)
p = figure(title="Hexbin for 500 points", match_aspect=True,
tools="wheel_zoom,reset", background_fill_color='#440154')
p.grid.visible = False
r, bins = p.hexbin(x, y, size=0.5, hover_color="pink", hover_alpha=0.8)
p.circle(x, y, color="white", size=1)
p.add_tools(HoverTool(
tooltips=[("count", "@c"), ("(q,r)", "(@q, @r)")],
mode="mouse", point_policy="follow_mouse", renderers=[r]
))
output_file("hexbin.html")
show(p)
###### LINE SEGMENTS - GROUPS
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],
x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],
color="#F4A582", line_width=3)
show(plot)
##### SQUARE CROSSES
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
#### TRIANGLES
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
### CIRCLE CROSS
circle_cross(x, y, size=4, angle=0.0, *, angle_units='rad',
fill_alpha=1.0, fill_color='gray',
line_alpha=1.0, line_cap='butt', line_color='black',
line_dash=[], line_dash_offset=0, line_join='bevel',
line_width=1, name=None, tags=[], **kwargs)[source]
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
#### CIRCLE X
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
# CROSS
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
#### COLOR THEME: DARK MINIMAL
from bokeh.plotting import figure, output_file, show
from bokeh.themes import built_in_themes
from bokeh.io import curdoc
x = [1, 2, 3, 4, 5]
y = [6, 7, 6, 4, 5]
output_file("caliber.html")
curdoc().theme = 'caliber'
p = figure(title='caliber', plot_width=300, plot_height=300)
p.line(x, y)
show(p)
cosine(w: float, A: float = 1, phi: float = 0, offset: float = 0) → partial[Callable[[], None]][source]
Return a driver function that can advance a sequence of cosine values.
value = A * cos(w*i + phi) + offset
# Parameters
# w (float) – a frequency for the cosine driver
# A (float) – an amplitude for the cosine driver
# phi (float) – a phase offset to start the cosine driver with
# offset (float) – a global offset to add to the driver values
#### SINE
sine(w: float, A: float = 1, phi: float = 0, offset: float = 0) → partial[Callable[[], None]][source]¶
Return a driver function that can advance a sequence of sine values.
value = A * sin(w*i + phi) + offset
# Parameters
# w (float) – a frequency for the sine driver
# A (float) – an amplitude for the sine driver
# phi (float) – a phase offset to start the sine driver with
# offset (float) – a global offset to add to the driver values
# Project
# Roadmap
# Team
# Citation
# Documentation
# User Guide
# Gallery
# Reference Guide
# SINE WAVE
https://demo.bokeh.org/sliders
https://docs.bokeh.org/en/latest/docs/gallery/slider.html
import numpy as np
from bokeh.layouts import column, row
from bokeh.models import CustomJS, Slider
from bokeh.plotting import ColumnDataSource, figure, output_file, show
x = np.linspace(0, 10, 500)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(y_range=(-10, 10), plot_width=400, plot_height=400)
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
amp_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Amplitude")
freq_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Frequency")
phase_slider = Slider(start=0, end=6.4, value=0, step=.1, title="Phase")
offset_slider = Slider(start=-5, end=5, value=0, step=.1, title="Offset")
callback = CustomJS(args=dict(source=source, amp=amp_slider, freq=freq_slider, phase=phase_slider, offset=offset_slider),
code="""
const data = source.data;
const A = amp.value;
const k = freq.value;
const phi = phase.value;
const B = offset.value;
const x = data['x']
const y = data['y']
for (var i = 0; i < x.length; i++) {
y[i] = B + A*Math.sin(k*x[i]+phi);
}
source.change.emit();
""")
amp_slider.js_on_change('value', callback)
freq_slider.js_on_change('value', callback)
phase_slider.js_on_change('value', callback)
offset_slider.js_on_change('value', callback)
layout = row(
plot,
column(amp_slider, freq_slider, phase_slider, offset_slider),
)
output_file("slider.html", title="slider.py example")
show(layout)
import numpy as np
from bokeh.layouts import gridplot
from bokeh.plotting import figure, output_file, show
x = np.linspace(0, 4*np.pi, 100)
y = np.sin(x)
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select"
p1 = figure(title="Legend Example", tools=TOOLS)
p1.circle(x, y, legend_label="sin(x)")
p1.circle(x, 2*y, legend_label="2*sin(x)", color="orange")
p1.circle(x, 3*y, legend_label="3*sin(x)", color="green")
p1.legend.title = 'Example Title'
p2 = figure(title="Another Legend Example", tools=TOOLS)
p2.circle(x, y, legend_label="sin(x)")
p2.line(x, y, legend_label="sin(x)")
p2.line(x, 2*y, legend_label="2*sin(x)", line_dash=(4, 4), line_color="orange", line_width=2)
p2.square(x, 3*y, legend_label="3*sin(x)", fill_color=None, line_color="green")
p2.line(x, 3*y, legend_label="3*sin(x)", line_color="green")
output_file("legend.html", title="legend.py example")
show(gridplot([p1, p2], ncols=2, plot_width=400, plot_height=400)) # open a browser
https://attractors.pyviz.demo.anaconda.com/attractors_panel
https://demo.bokeh.org/stocks
# Interactive Weather Statistics DEMO
https://demo.bokeh.org/weather
https://docs.bokeh.org/en/latest/docs/gallery/burtin.html
from collections import OrderedDict
from io import StringIO
from math import log, sqrt
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, show
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = OrderedDict([
("negative", "#e69584"),
("positive", "#aeaeb8"),
])
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=(-420, 420), y_range=(-420, 420),
min_border=0, outline_line_color="black",
background_fill_color="#f0e1d2")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
0, 0, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(0, 0, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(0, 0, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(0, 0, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(0, 0, radius=radii, fill_color=None, line_color="white")
p.text(0, radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(0, 0, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color),
text_font_size="9pt", text_align="left", text_baseline="middle")
output_file("burtin.html", title="burtin.py example")
show(p)
import colorcet as cc
from numpy import linspace
from scipy.stats.kde import gaussian_kde
from bokeh.io import output_file, show
from bokeh.models import ColumnDataSource, FixedTicker, PrintfTickFormatter
from bokeh.plotting import figure
from bokeh.sampledata.perceptions import probly
output_file("ridgeplot.html")
def ridge(category, data, scale=20):
return list(zip([category]*len(data), scale*data))
cats = list(reversed(probly.keys()))
palette = [cc.rainbow[i*15] for i in range(17)]
x = linspace(-20,110, 500)
source = ColumnDataSource(data=dict(x=x))
p = figure(y_range=cats, plot_width=900, x_range=(-5, 105), toolbar_location=None)
for i, cat in enumerate(reversed(cats)):
pdf = gaussian_kde(probly[cat])
y = ridge(cat, pdf(x))
source.add(y, cat)
p.patch('x', cat, color=palette[i], alpha=0.6, line_color="black", source=source)
p.outline_line_color = None
p.background_fill_color = "#efefef"
p.xaxis.ticker = FixedTicker(ticks=list(range(0, 101, 10)))
p.xaxis.formatter = PrintfTickFormatter(format="%d%%")
p.ygrid.grid_line_color = None
p.xgrid.grid_line_color = "#dddddd"
p.xgrid.ticker = p.xaxis.ticker
p.axis.minor_tick_line_color = None
p.axis.major_tick_line_color = None
p.axis.axis_line_color = None
p.y_range.range_padding = 0.12
show(p)
### ANSCOMBES QUARTET
import numpy as np
import pandas as pd
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import column, gridplot
from bokeh.models import (Circle, ColumnDataSource, Div, Grid,
Line, LinearAxis, Plot, Range1d,)
from bokeh.resources import INLINE
from bokeh.util.browser import view
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(x_range=xdr, y_range=ydr, plot_width=400, plot_height=400,
background_fill_color='#efefef')
plot.title.text = title
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = gridplot([[I, II], [III, IV]], toolbar_location=None)
div = Div(text="""
<h1>Anscombe's Quartet</h1>
<p>Anscombe's quartet is a collection of four small datasets that have nearly
identical simple descriptive statistics (mean, variance, correlation, and linear
regression lines), yet appear very different when graphed.
</p>
""")
doc = Document()
doc.add_root(column(div, grid, sizing_mode="scale_width"))
if __name__ == "__main__":
doc.validate()
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
### CATEGORICAL DOT PLOT
from bokeh.layouts import row
from bokeh.plotting import figure, output_file, show
factors = ["a", "b", "c", "d", "e", "f", "g", "h"]
x = [50, 40, 65, 10, 25, 37, 80, 60]
dot = figure(title="Categorical Dot Plot", tools="", toolbar_location=None,
y_range=factors, x_range=[0,100])
dot.segment(0, factors, x, factors, line_width=2, line_color="green", )
dot.circle(x, factors, size=15, fill_color="orange", line_color="green", line_width=3, )
factors = ["foo 123", "bar:0.2", "baz-10"]
x = ["foo 123", "foo 123", "foo 123", "bar:0.2", "bar:0.2", "bar:0.2", "baz-10", "baz-10", "baz-10"]
y = ["foo 123", "bar:0.2", "baz-10", "foo 123", "bar:0.2", "baz-10", "foo 123", "bar:0.2", "baz-10"]
colors = [
"#0B486B", "#79BD9A", "#CFF09E",
"#79BD9A", "#0B486B", "#79BD9A",
"#CFF09E", "#79BD9A", "#0B486B"
]
hm = figure(title="Categorical Heatmap", tools="hover", toolbar_location=None,
x_range=factors, y_range=factors)
hm.rect(x, y, color=colors, width=1, height=1)
output_file("categorical.html", title="categorical.py example")
show(row(hm, dot, sizing_mode="scale_width")) # open a browser
#### RGBA SQUARE
import numpy as np
from bokeh.plotting import figure, output_file, show
N = 20
img = np.empty((N,N), dtype=np.uint32)
view = img.view(dtype=np.uint8).reshape((N, N, 4))
for i in range(N):
for j in range(N):
view[i, j, 0] = int(i/N*255)
view[i, j, 1] = 158
view[i, j, 2] = int(j/N*255)
view[i, j, 3] = 255
p = figure(tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")])
p.x_range.range_padding = p.y_range.range_padding = 0
# must give a vector of images
p.image_rgba(image=[img], x=0, y=0, dw=10, dh=10)
output_file("image_rgba.html", title="image_rgba.py example")
show(p) # open a browser
### PERIODIC TABLE
from bokeh.io import output_file, show
from bokeh.plotting import figure
from bokeh.sampledata.periodic_table import elements
from bokeh.transform import dodge, factor_cmap
output_file("periodic.html")
periods = ["I", "II", "III", "IV", "V", "VI", "VII"]
groups = [str(x) for x in range(1, 19)]
df = elements.copy()
df["atomic mass"] = df["atomic mass"].astype(str)
df["group"] = df["group"].astype(str)
df["period"] = [periods[x-1] for x in df.period]
df = df[df.group != "-"]
df = df[df.symbol != "Lr"]
df = df[df.symbol != "Lu"]
cmap = {
"alkali metal" : "#a6cee3",
"alkaline earth metal" : "#1f78b4",
"metal" : "#d93b43",
"halogen" : "#999d9a",
"metalloid" : "#e08d49",
"noble gas" : "#eaeaea",
"nonmetal" : "#f1d4Af",
"transition metal" : "#599d7A",
}
TOOLTIPS = [
("Name", "@name"),
("Atomic number", "@{atomic number}"),
("Atomic mass", "@{atomic mass}"),
("Type", "@metal"),
("CPK color", "$color[hex, swatch]:CPK"),
("Electronic configuration", "@{electronic configuration}"),
]
p = figure(title="Periodic Table (omitting LA and AC Series)", plot_width=1000, plot_height=450,
x_range=groups, y_range=list(reversed(periods)),
tools="hover", toolbar_location=None, tooltips=TOOLTIPS)
r = p.rect("group", "period", 0.95, 0.95, source=df, fill_alpha=0.6, legend_field="metal",
color=factor_cmap('metal', palette=list(cmap.values()), factors=list(cmap.keys())))
text_props = {"source": df, "text_align": "left", "text_baseline": "middle"}
x = dodge("group", -0.4, range=p.x_range)
p.text(x=x, y="period", text="symbol", text_font_style="bold", **text_props)
p.text(x=x, y=dodge("period", 0.3, range=p.y_range), text="atomic number",
text_font_size="8pt", **text_props)
p.text(x=x, y=dodge("period", -0.35, range=p.y_range), text="name",
text_font_size="5pt", **text_props)
p.text(x=x, y=dodge("period", -0.2, range=p.y_range), text="atomic mass",
text_font_size="5pt", **text_props)
p.text(x=["3", "3"], y=["VI", "VII"], text=["LA", "AC"], text_align="center", text_baseline="middle")
p.outline_line_color = None
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_standoff = 0
p.legend.orientation = "horizontal"
p.legend.location ="top_center"
p.hover.renderers = [r] # only hover element boxes
show(p)
from typing import Any, List, Tuple
import numpy as np
from bokeh.layouts import gridplot
from bokeh.plotting import figure, output_file, show
def streamlines(x: np.ndarray, y, u, v, density: float = 1) -> Tuple[List[Any], List[Any]]:
''' Return streamlines of a vector flow.
* x and y are 1d arrays defining an *evenly spaced* grid.
* u and v are 2d arrays (shape [y,x]) giving velocities.
* density controls the closeness of the streamlines.
'''
## Set up some constants - size of the grid used.
NGX = len(x)
NGY = len(y)
## Constants used to convert between grid index coords and user coords.
DX = x[1]-x[0]
DY = y[1]-y[0]
XOFF = x[0]
YOFF = y[0]
## Now rescale velocity onto axes-coordinates
u = u / (x[-1]-x[0])
v = v / (y[-1]-y[0])
speed = np.sqrt(u*u+v*v)
## s (path length) will now be in axes-coordinates, but we must
## rescale u for integrations.
u *= NGX
v *= NGY
## Now u and v in grid-coordinates.
NBX = int(30*density)
NBY = int(30*density)
blank = np.zeros((NBY,NBX))
bx_spacing = NGX/float(NBX-1)
by_spacing = NGY/float(NBY-1)
def blank_pos(xi, yi):
return int((xi / bx_spacing) + 0.5), \
int((yi / by_spacing) + 0.5)
def value_at(a, xi, yi):
if type(xi) == np.ndarray:
x = xi.astype(np.int)
y = yi.astype(np.int)
else:
x = np.int(xi)
y = np.int(yi)
a00 = a[y,x]
a01 = a[y,x+1]
a10 = a[y+1,x]
a11 = a[y+1,x+1]
xt = xi - x
yt = yi - y
a0 = a00*(1-xt) + a01*xt
a1 = a10*(1-xt) + a11*xt
return a0*(1-yt) + a1*yt
def rk4_integrate(x0, y0):
## This function does RK4 forward and back trajectories from
## the initial conditions, with the odd 'blank array'
## termination conditions. TODO tidy the integration loops.
def f(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return ui*dt_ds, vi*dt_ds
def g(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return -ui*dt_ds, -vi*dt_ds
check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1
bx_changes = []
by_changes = []
## Integrator function
def rk4(x0, y0, f):
ds = 0.01 #min(1./NGX, 1./NGY, 0.01)
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK4
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)
k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)
k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.
yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
integrator = rk4
sf, xf_traj, yf_traj = integrator(x0, y0, f)
sb, xb_traj, yb_traj = integrator(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
## Tests to check length of traj. Remember, s in units of axes.
if len(x_traj) < 1: return None
if stotal > .2:
initxb, inityb = blank_pos(x0, y0)
blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(bx_changes, by_changes):
blank[yb, xb] = 0
return None
## A quick function for integrating trajectories if blank==0.
trajectories = []
def traj(xb, yb):
if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:
return
if blank[yb, xb] == 0:
t = rk4_integrate(xb*bx_spacing, yb*by_spacing)
if t is not None:
trajectories.append(t)
## Now we build up the trajectory set. I've found it best to look
## for blank==0 along the edges first, and work inwards.
for indent in range((max(NBX,NBY))//2):
for xi in range(max(NBX,NBY)-2*indent):
traj(xi+indent, indent)
traj(xi+indent, NBY-1-indent)
traj(indent, xi+indent)
traj(NBX-1-indent, xi+indent)
xs = [np.array(t[0])*DX+XOFF for t in trajectories]
ys = [np.array(t[1])*DY+YOFF for t in trajectories]
return xs, ys
xx = np.linspace(-3, 3, 100)
yy = np.linspace(-3, 3, 100)
Y, X = np.meshgrid(xx, yy)
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
theta = np.arctan(V/U)
x0 = X[::2, ::2].flatten()
y0 = Y[::2, ::2].flatten()
length = speed[::2, ::2].flatten()/40
angle = theta[::2, ::2].flatten()
x1 = x0 + length * np.cos(angle)
y1 = y0 + length * np.sin(angle)
xs, ys = streamlines(xx, yy, U.T, V.T, density=2)
cm = np.array(["#C7E9B4", "#7FCDBB", "#41B6C4", "#1D91C0", "#225EA8", "#0C2C84"])
ix = ((length-length.min())/(length.max()-length.min())*5).astype('int')
colors = cm[ix]
p1 = figure(x_range=(-3,3 ), y_range=(-3, 3))
p1.segment(x0, y0, x1, y1, color=colors, line_width=2)
p2 = figure(x_range=p1.x_range, y_range=p1.y_range)
p2.multi_line(xs, ys, color="#ee6666", line_width=2, line_alpha=0.8)
output_file("vector.html", title="vector.py example")
show(gridplot([[p1,p2]], plot_width=400, plot_height=400)) # open a browser
from typing import Any, List, Tuple
import numpy as np
from bokeh.layouts import gridplot
from bokeh.plotting import figure, output_file, show
def streamlines(x: np.ndarray, y, u, v, density: float = 1) -> Tuple[List[Any], List[Any]]:
''' Return streamlines of a vector flow.
* x and y are 1d arrays defining an *evenly spaced* grid.
* u and v are 2d arrays (shape [y,x]) giving velocities.
* density controls the closeness of the streamlines.
'''
## Set up some constants - size of the grid used.
NGX = len(x)
NGY = len(y)
## Constants used to convert between grid index coords and user coords.
DX = x[1]-x[0]
DY = y[1]-y[0]
XOFF = x[0]
YOFF = y[0]
## Now rescale velocity onto axes-coordinates
u = u / (x[-1]-x[0])
v = v / (y[-1]-y[0])
speed = np.sqrt(u*u+v*v)
## s (path length) will now be in axes-coordinates, but we must
## rescale u for integrations.
u *= NGX
v *= NGY
## Now u and v in grid-coordinates.
NBX = int(30*density)
NBY = int(30*density)
blank = np.zeros((NBY,NBX))
bx_spacing = NGX/float(NBX-1)
by_spacing = NGY/float(NBY-1)
def blank_pos(xi, yi):
return int((xi / bx_spacing) + 0.5), \
int((yi / by_spacing) + 0.5)
def value_at(a, xi, yi):
if type(xi) == np.ndarray:
x = xi.astype(np.int)
y = yi.astype(np.int)
else:
x = np.int(xi)
y = np.int(yi)
a00 = a[y,x]
a01 = a[y,x+1]
a10 = a[y+1,x]
a11 = a[y+1,x+1]
xt = xi - x
yt = yi - y
a0 = a00*(1-xt) + a01*xt
a1 = a10*(1-xt) + a11*xt
return a0*(1-yt) + a1*yt
def rk4_integrate(x0, y0):
## This function does RK4 forward and back trajectories from
## the initial conditions, with the odd 'blank array'
## termination conditions. TODO tidy the integration loops.
def f(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return ui*dt_ds, vi*dt_ds
def g(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return -ui*dt_ds, -vi*dt_ds
check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1
bx_changes = []
by_changes = []
## Integrator function
def rk4(x0, y0, f):
ds = 0.01 #min(1./NGX, 1./NGY, 0.01)
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK4
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)
k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)
k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.
yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
integrator = rk4
sf, xf_traj, yf_traj = integrator(x0, y0, f)
sb, xb_traj, yb_traj = integrator(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
## Tests to check length of traj. Remember, s in units of axes.
if len(x_traj) < 1: return None
if stotal > .2:
initxb, inityb = blank_pos(x0, y0)
blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(bx_changes, by_changes):
blank[yb, xb] = 0
return None
## A quick function for integrating trajectories if blank==0.
trajectories = []
def traj(xb, yb):
if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:
return
if blank[yb, xb] == 0:
t = rk4_integrate(xb*bx_spacing, yb*by_spacing)
if t is not None:
trajectories.append(t)
## Now we build up the trajectory set. I've found it best to look
## for blank==0 along the edges first, and work inwards.
for indent in range((max(NBX,NBY))//2):
for xi in range(max(NBX,NBY)-2*indent):
traj(xi+indent, indent)
traj(xi+indent, NBY-1-indent)
traj(indent, xi+indent)
traj(NBX-1-indent, xi+indent)
xs = [np.array(t[0])*DX+XOFF for t in trajectories]
ys = [np.array(t[1])*DY+YOFF for t in trajectories]
return xs, ys
xx = np.linspace(-3, 3, 100)
yy = np.linspace(-3, 3, 100)
Y, X = np.meshgrid(xx, yy)
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
theta = np.arctan(V/U)
x0 = X[::2, ::2].flatten()
y0 = Y[::2, ::2].flatten()
length = speed[::2, ::2].flatten()/40
angle = theta[::2, ::2].flatten()
x1 = x0 + length * np.cos(angle)
y1 = y0 + length * np.sin(angle)
xs, ys = streamlines(xx, yy, U.T, V.T, density=2)
cm = np.array(["#C7E9B4", "#7FCDBB", "#41B6C4", "#1D91C0", "#225EA8", "#0C2C84"])
ix = ((length-length.min())/(length.max()-length.min())*5).astype('int')
colors = cm[ix]
p1 = figure(x_range=(-3,3 ), y_range=(-3, 3))
p1.segment(x0, y0, x1, y1, color=colors, line_width=2)
p2 = figure(x_range=p1.x_range, y_range=p1.y_range)
p2.multi_line(xs, ys, color="#ee6666", line_width=2, line_alpha=0.8)
output_file("vector.html", title="vector.py example")
show(gridplot([[p1,p2]], plot_width=400, plot_height=400)) # open a browser
import numpy as np
from bokeh.plotting import figure, output_file, show
from bokeh.sampledata.les_mis import data
nodes = data['nodes']
names = [node['name'] for node in sorted(data['nodes'], key=lambda x: x['group'])]
N = len(nodes)
counts = np.zeros((N, N))
for link in data['links']:
counts[link['source'], link['target']] = link['value']
counts[link['target'], link['source']] = link['value']
colormap = ["#444444", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99",
"#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a"]
xname = []
yname = []
color = []
alpha = []
for i, node1 in enumerate(nodes):
for j, node2 in enumerate(nodes):
xname.append(node1['name'])
yname.append(node2['name'])
alpha.append(min(counts[i,j]/4.0, 0.9) + 0.1)
if node1['group'] == node2['group']:
color.append(colormap[node1['group']])
else:
color.append('lightgrey')
data=dict(
xname=xname,
yname=yname,
colors=color,
alphas=alpha,
count=counts.flatten(),
)
p = figure(title="Les Mis Occurrences",
x_axis_location="above", tools="hover,save",
x_range=list(reversed(names)), y_range=names,
tooltips = [('names', '@yname, @xname'), ('count', '@count')])
p.plot_width = 800
p.plot_height = 800
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi/3
p.rect('xname', 'yname', 0.9, 0.9, source=data,
color='colors', alpha='alphas', line_color=None,
hover_line_color='black', hover_color='colors')
output_file("les_mis.html", title="les_mis.py example")
show(p) # show the plot
``` |
{
"source": "jiry17/PolyGen",
"score": 2
} |
#### File: PolyGen/exp/eusolver_runner.py
```python
import os
import random
from parser import sexp_from_string
from config import KMemoryLimit, KTimeLimit, KExampleLimit
import time
from util import verify, flush_line
eusolver_path = "../recommend/my-euphony"
def parse_result(result_file):
if not os.path.exists(result_file): return None
with open(result_file, "r") as inp:
oup_lines = inp.readlines()
if len(oup_lines) <= 1: return None
#print(oup_lines)
#input()
example_num = int(oup_lines[0][:-1])
program = sexp_from_string("".join(oup_lines[1:]))
var_map = {name: i for i, (name, _) in enumerate(program[2])}
program = program[4]
return example_num, program, var_map
def run_eusolver_with_file(file_path, oup_name=None):
if oup_name is None:
oup_name = str(random.randint(0, 10 ** 9)) + ".out"
oup_file = "/tmp/" + oup_name
command = ["cd", eusolver_path, ";", ". bin/setenv;" ,'ulimit -v ' + str(KMemoryLimit) + ';' + "timeout " + str(KTimeLimit), "./bin/run_int_eusolver", file_path, ">", oup_file]
command = " ".join(command)
start_time = time.time()
os.system(command)
#exit(0)
end_time = time.time()
result = parse_result(oup_file)
os.system("rm " + oup_file)
return end_time - start_time, result
def run_eusolver_cegis(ps_result, is_cegis=True):
assert is_cegis
bm = ps_result["bm"]
name = str(random.randint(0, 10 ** 9))
inp_name = name + ".in"
oup_name = name + ".out"
inp_path = "/tmp/" + inp_name
with open(inp_path, "w") as oup:
oup.write(bm)
time_cost, result = run_eusolver_with_file(inp_path, oup_name)
os.system("rm " + inp_path)
if result is None: return None
return time_cost, result[0], result[1]
def run_eusolver_with_example(p_res, example_space):
name = str(random.randint(0, 10 ** 9))
inp_name = name + ".in"
oup_name = name + ".out"
inp_path = "/tmp/" + inp_name
with open(inp_path, "w") as oup:
oup.write(p_res["builder"](example_space) + "\n")
time_cost, result = run_eusolver_with_file(inp_path, oup_name)
os.system("rm " + inp_path)
if result is None: return None
return time_cost, result[0], result[1], result[2]
def run_eusolver_random(ps_result, is_cegis = False):
if is_cegis: return 1 #return run_eusolver_cegis(ps_result)
l = 1
r = 1
example_space = []
g = ps_result["gen"]
def evaluate(n):
while len(example_space) < n: example_space.append(g())
return run_eusolver_with_example(ps_result, example_space[:n])
def check(res):
if res is None: return 0
program, param = res[2], res[3]
if verify(program, param, ps_result["cons"], False)[0] is None: return 1
return 2
while True:
flush_line()
print("\rtest %d" % (r), end="\n")
res = evaluate(r)
status = check(res)
if status == 1: break
if r == KExampleLimit or status == 0: return None
l = r + 1
r = min(r * 2, KExampleLimit)
ans = r
while l < r:
flush_line()
print("\rsearch %d %d" % (l, r), end="\n")
mid = (l + r) // 2
current_res = evaluate(mid)
status = check(current_res)
if status == 1:
r = mid
ans = mid
res = current_res
else:
l = mid + 1
return res[0], ans, res[2]
```
#### File: PolyGen/exp/runner.py
```python
from util import *
from config import KRepeatNum
from parser import benchmark_parser
from example_model import *
import json
import time
import os
def _collect_benchmark(folder):
benchmark_list = os.listdir(folder)
return [name for name in benchmark_list if ".sl" in name]
def run_cegis(parse_result, run_with_example):
example_space = []
while True:
program, param = run_with_example(parse_result, example_space)
inp, oup = verify(program, param, parse_result["cons"])
if inp is None: return program, len(example_space)
example_space.append((inp, oup))
def _check_all_fail(result):
if result is None:
return True
for info in result:
if info["status"] != "fail": return False
return True
def run(benchmark_folder, runner, name, clear_cache, skip_failed_cegis):
print("test : " + name)
cache_file = "result_cache/" + name + ".json"
cache = load_cache(cache_file)
random_and_skip = False
if(name[-6:] == "random" and skip_failed_cegis):
random_and_skip = True
cegis_cache_file = "result_cache/" + name[:-6] + "cegis.json"
cegis_cache = load_cache(cegis_cache_file)
if cache is None: cache = {}
if clear_cache:
cache = {}
benchmark_list = reorder_benchmark(_collect_benchmark(benchmark_folder))
is_changed = False
for ind, (type_name, size, benchmark) in enumerate(benchmark_list):
print("run %d/%d: %s" % (ind, len(benchmark_list), benchmark))
if benchmark in cache:
assert len(cache[benchmark]) == KRepeatNum
continue
cache[benchmark] = []
if(random_and_skip and _check_all_fail(cegis_cache.get(benchmark))):
for _ in range(KRepeatNum): cache[benchmark].append({"status": "fail"})
print("skip for cegis fail")
continue
if (ind > 0 and benchmark_list[ind - 1][0] == type_name and _check_all_fail(cache[benchmark_list[ind - 1][2]])):
for _ in range(KRepeatNum): cache[benchmark].append({"status": "fail"})
print("skip for weaker benchmark fail")
continue
parse_result = benchmark_parser(os.path.join(benchmark_folder, benchmark))
parse_result["gen"] = generate_random_example(get_input_generator(type_name, parse_result["param_num"], parse_result["bool_id"]), parse_result["oup"])
parse_result["benchmark_name"] = benchmark
init_res = runner(parse_result, True)
print("finish init")
print(init_res)
if init_res is None:
for _ in range(KRepeatNum): cache[benchmark].append({"status": "fail"})
continue
for _ in range(KRepeatNum):
result = runner(parse_result)
if result is None:
cache[benchmark].append({"status": "fail"})
else:
print(result)
time_cost, example_num, prog = result
cache[benchmark].append({"status": "succeed", "example": example_num, "time": time_cost, "program": prog})
for status in cache[benchmark]:
if status["status"] == "fail":
print(status["status"])
else:
print(status["status"], status["example"], status["time"])
save_cache(cache_file, cache, not is_changed)
is_changed = True
save_cache(cache_file, cache, not is_changed)
return cache
```
#### File: PolyGen/exp/util.py
```python
import random
from config import *
from z3 import *
import time, json
def getRandom():
return random.randint(-KIntMin, KIntMax)
z3_semantics_map = {
"=": lambda x: x[0] == x[1],
"<": lambda x: x[0] < x[1],
">": lambda x: x[0] > x[1],
"<=": lambda x: x[0] <= x[1],
">=": lambda x: x[0] >= x[1],
"=>": lambda x: Implies(x[0], x[1]),
"or": lambda x: Or(*x),
"and": lambda x: And(*x),
"not": lambda x: Not(x[0]),
"+": lambda x: x[0] + x[1],
"-": lambda x: (x[0] - x[1]) if len(x) > 1 else -x[0],
"*": lambda x: x[0] * x[1],
"ite": lambda x: If(x[0], x[1], x[2]),
"let" : lambda x: x[0]
}
z3_type_map = {
"=": "Bool",
"<": "Bool",
">": "Bool",
"<=": "Bool",
">=": "Bool",
"=>": "Bool",
"or": "Bool",
"and": "Bool",
"not": "Bool",
"+": "Int",
"-": "Int",
"*": "Int"
}
from functools import reduce
value_semantics_map = {
"=": lambda x: x[0] == x[1],
"<": lambda x: x[0] < x[1],
">": lambda x: x[0] > x[1],
"<=": lambda x: x[0] <= x[1],
">=": lambda x: x[0] >= x[1],
"=>": lambda x: (not(x[0]) or x[1]),
"or": lambda x: reduce(lambda a, b: a or b, x),
"and": lambda x: reduce(lambda a, b: a and b, x),
"not": lambda x: not(x[0]),
"+": lambda x: x[0] + x[1],
"-": lambda x: (x[0] - x[1]) if len(x) > 1 else -x[0],
"*": lambda x: x[0] * x[1],
"ite": lambda x: x[1] if x[0] else x[2],
"let" : lambda x: x[0]
}
err_oup = open("log.out", "w")
def expr_to_z3(cons, param_map, func_name = None):
#print(cons, type(cons))
#print(param_map)
if type(cons) == tuple:
if cons[0] == "Int": return cons[1]
if cons[0] == "Bool": return False if cons[1] == "false" else True
assert False
if type(cons) == str:
assert cons in param_map
if type(param_map[cons]) == int:
return Int("Param" + str(param_map[cons]))
elif len(param_map[cons]) == 2:
para_id, para_type = param_map[cons]
if para_type == "Int":
return Int("Param" + str(para_id))
else:
return Bool("Param" + str(para_id))
elif len(param_map[cons]) == 3:
return param_map[cons][2]
else:
assert False
if type(cons) == list:
op = cons[0]
if op == "int" :
if(type(cons[1])==str):
return Int("Param" + str(param_map[cons[1]]))
else: return cons[1][1]
if op == func_name:
return Int("Result")
if op == "let":
var,val = cons[1][0][0],cons[1][0][1]
var_type = z3_type_map[val[0]]
#print("var =" +str(var))
#print("val =" +str(val) +" vartype =" + var_type)
new_param_map = param_map
new_param_map[var] = [len(new_param_map),var_type]
val = expr_to_z3(val, new_param_map, func_name)
new_param_map[var] = [len(new_param_map),var_type , val]
#print(new_param_map)
sub_list = [expr_to_z3(sub_cons, new_param_map, func_name) for sub_cons in cons[2:]]
else:
sub_list = [expr_to_z3(sub_cons, param_map, func_name) for sub_cons in cons[1:]]
err_oup.write(str(cons) + "\n")
err_oup.write(str(sub_list) + "\n")
#if op == "or" or op =="and":
# print(op)
# print(sub_list)
#print()
#print(op, sub_list)
return z3_semantics_map[op](sub_list)
assert False
_S = Solver()
import sys
def expr_to_val(cons, param_map):
if type(cons) == tuple:
if cons[0] == "Int": return cons[1]
if cons[0] == "Bool": return False if cons[1] == "false" else True
assert False
if type(cons) == str:
assert cons in param_map
return param_map[cons]
if type(cons) == list:
op = cons[0]
if op == "let":
var,val = cons[1][0][0],cons[1][0][1]
new_param_map = param_map
val = expr_to_val(val, new_param_map)
new_param_map[var] = val
#print(new_param_map)
sub_list = [expr_to_val(sub_cons, new_param_map) for sub_cons in cons[2:]]
else:
sub_list = [expr_to_val(sub_cons, param_map) for sub_cons in cons[1:]]
return value_semantics_map[op](sub_list)
assert False
def verify_by_example(program, param_map, example_space):
sys.setrecursionlimit(10000000)
z3expr = expr_to_val(program, param_map)
for point in example_space:
for var,val in zip(param_map,point[0]):
param_map[var] = val
myoup = expr_to_val(program, param_map)
if point[1] == myoup :
#print("success")
continue
#print("fail")
inp,oup = point[0], point[1]
break
return inp,oup
def verify(program, param_map, cons, is_random = True):
#print(program)
#print(param_map)
#print(cons)
#input()
_S.push()
_S.add(Not(And(cons)))
_S.add(Int("Result") == expr_to_z3(program, param_map))
#print(program)
#print(expr_to_z3(program, param_map))
#input()
# print(_S)
sys.setrecursionlimit(10000000)
if _S.check() == unsat:
_S.pop()
return None, None
model = _S.model()
if is_random:
id_list = list(range(len(param_map)))
random.shuffle(id_list)
for id in id_list:
param = Int("Param" + str(id))
bound = getRandom()
if random.randint(0, 1) == 0:
_S.add(param >= bound)
else: _S.add(param <= bound)
if _S.check() == unsat: break
model = _S.model()
_S.pop()
inp = [parse_int_variable(model, Int("Param" + str(id))) for id in range(len(param_map))]
_S.push()
_S.add(And(cons))
for id in range(len(param_map)):
_S.add(Int("Param" + str(id)) == inp[id])
assert _S.check() == sat
oup = parse_int_variable(_S.model(), Int("Result"))
_S.pop()
return inp, oup
def parse_int_variable(model, var):
try:
return model.eval(var).as_long()
except:
return getRandom()
def load_cache(cache_path):
if os.path.exists(cache_path):
with open(cache_path, "r") as inp:
cache = json.load(inp)
return cache
return None
def save_cache(cache_path, cache, is_backup = False):
if os.path.exists(cache_path) and is_backup:
t = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
os.system("mv " + cache_path + " " + cache_path + t)
with open(cache_path, "w") as oup:
json.dump(cache, oup)
def _get_last_int(benchmark_name):
result = ""
while len(benchmark_name) > 0 and ord(benchmark_name[-1]) in range(ord('0'), ord('9') + 1):
result = benchmark_name[-1:] + result
benchmark_name = benchmark_name[:-1]
if len(result) == 0: return 0
return int(result)
def _prefix(name_1, name_2):
return name_2[:len(name_1)] == name_1
def _get_benchmark_info(benchmark_name):
if ".sl" in benchmark_name: benchmark_name = benchmark_name[:-3]
if _prefix("array_sum", benchmark_name):
num = int(benchmark_name.split('_')[-1])
size = int(benchmark_name.split('_')[-2])
return "array_sum" + str(num), size
if _prefix("max", benchmark_name) or _prefix("array_search", benchmark_name):
size = _get_last_int(benchmark_name)
return benchmark_name[:-len(str(size))], size
return benchmark_name, 0
def reorder_benchmark(benchmark_list):
info_list = []
for benchmark_name in benchmark_list:
type_name, ind = _get_benchmark_info(benchmark_name)
info_list.append((type_name, ind, benchmark_name))
return sorted(info_list)
def flush_line():
print("\r" + " " * 50, end="")
```
#### File: src/extra/util.py
```python
import random
from extra.config import *
from z3 import *
def getRandom():
return random.randint(-KIntMin, KIntMax)
z3_semantics_map = {
"=": lambda x: x[0] == x[1],
"<": lambda x: x[0] < x[1],
">": lambda x: x[0] > x[1],
"<=": lambda x: x[0] <= x[1],
">=": lambda x: x[0] >= x[1],
"=>": lambda x: Implies(x[0], x[1]),
"or": lambda x: Or(x[0], x[1]),
"and": lambda x: And(x[0], x[1]),
"not": lambda x: Not(x[0]),
"+": lambda x: x[0] + x[1],
"-": lambda x: x[0] - x[1],
"*": lambda x: x[0] * x[1],
"ite": lambda x: If(x[0], x[1], x[2])
}
def expr_to_z3(cons, param_map, func_name = None):
if type(cons) == tuple:
return cons[1]
if type(cons) == str:
assert cons in param_map
return Int("Param" + str(param_map[cons]))
if type(cons) == list:
op = cons[0]
if op == func_name:
return Int("Result")
sub_list = [expr_to_z3(sub_cons, param_map, func_name) for sub_cons in cons[1:]]
print(cons, z3_semantics_map[op](sub_list))
return z3_semantics_map[op](sub_list)
assert False
_S = Solver()
def verify(program, param_map, cons, is_random = True):
_S.push()
_S.add(Not(And(cons)))
_S.add(Int("Result") == expr_to_z3(program, param_map))
# print(_S)
if _S.check() == unsat:
_S.pop()
return None, None
model = _S.model()
if is_random:
id_list = list(range(len(param_map)))
random.shuffle(id_list)
for id in id_list:
param = Int("Param" + str(id))
bound = getRandom()
if random.randint(0, 1) == 0:
_S.add(param >= bound)
else: _S.add(param <= bound)
if _S.check() == unsat: break
model = _S.model()
_S.pop()
inp = [parse_int_variable(model, Int("Param" + str(id))) for id in range(len(param_map))]
_S.push()
_S.add(And(cons))
for id in range(len(param_map)):
_S.add(Int("Param" + str(id)) == inp[id])
assert _S.check() == sat
oup = parse_int_variable(_S.model(), Int("Result"))
_S.pop()
return inp, oup
def parse_int_variable(model, var):
try:
return model.eval(var).as_long()
except:
return getRandom()
def _get_last_int(benchmark_name):
result = ""
while len(benchmark_name) > 0 and ord(benchmark_name[-1]) in range(ord('0'), ord('9') + 1):
result = benchmark_name[-1:] + result
benchmark_name = benchmark_name[:-1]
if len(result) == 0: return 0
return int(result)
def _prefix(name_1, name_2):
return name_2[:len(name_1)] == name_1
def _get_benchmark_info(benchmark_name):
if ".sl" in benchmark_name: benchmark_name = benchmark_name[:-3]
if _prefix("array_sum", benchmark_name):
num = int(benchmark_name.split('_')[-1])
size = int(benchmark_name.split('_')[-2])
return "array_sum" + str(num), size
if _prefix("max", benchmark_name) or _prefix("array_search", benchmark_name):
size = _get_last_int(benchmark_name)
return benchmark_name[:-len(str(size))], size
return benchmark_name, 0
def reorder_benchmark(benchmark_list):
info_list = []
for benchmark_name in benchmark_list:
type_name, ind = _get_benchmark_info(benchmark_name)
info_list.append((type_name, ind, benchmark_name))
return sorted(info_list)
``` |
{
"source": "jis4nx/square-pass",
"score": 4
} |
#### File: square-pass/passwordManager/argaction.py
```python
import pyperclip
import string
import random
def generate_password(passlen,lc=50,nc=30,pc=20):
letters = list(string.ascii_letters)
puncs = ['$', '#', '_', '-', '!', '@', '+', '&', '(', ')', '?', '.', '*', '%']
digits = list(string.digits)
def percent(char,pers):
return int(pers*(char/100))
password = ""
password += "".join(random.choices(letters,k=percent(passlen,lc) ))
password += "".join(random.choices(puncs,k=percent(passlen,pc) ))
password += "".join(random.choices(digits,k=percent(passlen,nc)))
breh = list(password)
random.shuffle(breh)
return "".join(breh)
def copy_to_clipboard(*argv ,combo=False ,devider=":"):
"""
simply copy a string to clipboard or mixes gmail,password seperated by `devider`
to make a combo and copy the full text into clipboard
`*argv` : List of arguments
`combo` ( options are either False or True ):
False : Copies the first argument only .
True : Copies the combo of all `argv` seperated by `devider`
`devider` : devides different argument
e.g : copy_to_clipboard( "This_is_A_pass" )
copy_to_clipboard(
"<EMAIL>",
"This_is_A_pass",
combo=True ,
devider="-"
)
"""
if combo is False:
pyperclip.copy(argv[0])
else :
combined_text = ""
for arg in argv:
if arg != argv[-1]:
combined_text += str(arg)+devider
else:
combined_text += str(arg)
pyperclip.copy(combined_text)
def show_hints(text , text_type="password", security=3, jokes=False):
"""
text_types are :
password
gmail
`security` : number of last digits to show as hint
`jokes` : special feature for hint option . shows funny hints .
"""
if jokes is False :
amount_of_chars = len(text) - security + 1
hidden_chars = "*" * amount_of_chars
hint = text[0]+hidden_chars+text[-(security):]
return(hint)
else:
pass
#developments goin on ( shoaib islam )
```
#### File: square-pass/passwordManager/ciphers.py
```python
import base64, hashlib
from Crypto.Cipher import AES
import os
def encrypt(msg, masterkey):
masterkey = base64.b64encode(masterkey)
IV = os.urandom(16)
cipher = AES.new(masterkey, AES.MODE_CFB, IV)
return base64.b64encode(IV + cipher.encrypt(msg))
def decrypt(encMsg, masterkey):
masterkey = base64.b64encode(masterkey)
encMsg = base64.b64decode(encMsg)
IV = encMsg[:AES.block_size]
cipher = AES.new(masterkey, AES.MODE_CFB, IV)
return cipher.decrypt(encMsg[AES.block_size:])
def finalhash(msg, masterkey):
masterkey = base64.b64encode(masterkey)
IV = "agun".encode()*4
cipher = AES.new(masterkey, AES.MODE_CFB, IV)
encd = base64.b64encode(IV + cipher.encrypt(msg))
return hashlib.sha256(encd).hexdigest()
def hashuser(masterkey):
salt = '<PASSWORD>'
keysalt = (masterkey[-4:]+ salt) * 2
hashed_mpass = finalhash(masterkey.encode(), keysalt.encode())
return hashed_mpass
```
#### File: square-pass/passwordManager/reset.py
```python
from ciphers import decrypt, encrypt
import sqlite3
def resetpass():
credlist = []
conn = sqlite3.connect("../passwordmanager.db")
cur = conn.cursor()
mpass = """SELECT mspass from pass;"""
keysalt = (mpass[-4:] + "xx01") * 2
try:
cur.execute(mpass)
for x in cur.fetchall():
credlist.append(x)
except Exception as err:
print(err)
finally:
if conn:
conn.close()
decpass = decrypt(mpass, keysalt)
return decpass
print(resetpass())
```
#### File: square-pass/passwordManager/tools.py
```python
from prettytable import PrettyTable
from passwordManager.ciphers import encrypt, decrypt
from rich import box
from rich.panel import Panel
from rich.markdown import Markdown
from rich.align import Align
from rich.console import Console
from rich.layout import Layout
def print_note(note,title,sub,markdown=True):
console = Console()
layout = Layout()
note = Markdown(note) if markdown else note
styles = Panel(
note,
box=box.ROUNDED,
padding=(1, 2),
title=f"[b red]{title}",
border_style="bright_blue",
subtitle=sub
)
console.print(styles)
def print_box(lst, master_pass):
headers = ["Index","App Name", "Username", "Password"]
t = PrettyTable(headers)
for row in lst:
idx = row[0]
encPass = str(row[3]).encode()
username , app_name = row[1] , row[2]
decipher = decrypt(encPass, master_pass.encode()).decode()
conts = [idx, username, app_name, decipher]
t.add_row(conts)
return t
``` |
{
"source": "jisaacstone/odata-py",
"score": 2
} |
#### File: odata-py/odata/filter_grammar.py
```python
import parsley
import operator
from sqlalchemy import func
from sqlalchemy.sql import expression
from sqlalchemy.sql.functions import concat, char_length, operators
from odata.exc import RequestParseError
gops = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'lt': operator.lt,
'le': operator.le,
'and': operator.and_,
'or': operator.or_,
'not': operator.not_,
'add': operator.add,
'sub': operator.sub,
'mul': operator.mul,
'div': operator.truediv,
'mod': operator.mod}
def literals(fn):
def wrapped(*args):
largs = []
for arg in args:
if not isinstance(arg, expression.ClauseElement):
arg = expression.literal(arg)
largs.append(arg)
return fn(*largs)
return wrapped
@literals
def fun_substringof(searchstr, searchin):
return searchin.contains(searchstr)
@literals
def fun_indexof(searchin, searchstr):
raise NotImplementedError()
@literals
def fun_replace(base, find, replace):
raise NotImplementedError()
@literals
def fun_substring(string, pos, length=None):
raise NotImplementedError()
@literals
def fun_day(datetime):
raise NotImplementedError()
@literals
def fun_hour(datetime):
raise NotImplementedError()
@literals
def fun_minute(datetime):
raise NotImplementedError()
@literals
def fun_second(datetime):
raise NotImplementedError()
@literals
def fun_year(datetime):
raise NotImplementedError()
@literals
def fun_isof(obj, compareto=None):
raise NotImplementedError()
gfuns = dict(
substringof=fun_substringof,
endswith=literals(operators.endswith_op),
startswith=literals(operators.startswith_op),
length=literals(char_length),
index_of=fun_indexof,
replace=fun_replace,
tolower=literals(func.lower),
toupper=literals(func.toupper),
trim=literals(func.trim),
concat=literals(concat),
day=fun_day,
hour=fun_hour,
minute=fun_minute,
second=fun_second,
year=fun_year,
round=literals(func.round),
floor=literals(func.floor),
ceiling=literals(func.ceil),
isof=fun_isof)
filter_grammar = r'''
escapedChar = '\\' ('\'')
integer = <digit+>:ds -> int(ds)
float = (integer '.' integer):fs -> float(fs)
number = (integer | float)
boolean = ('true' | 'false'):value -> value == 'true'
string = '\'' (escapedChar | ~'\'' anything)*:c '\'' -> u''.join(c)
parens = '(' ws expr:e ws ')' -> e
column = <(letterOrDigit | '_')+>:name -> get_column(name)
null = 'null' -> None
atom = ws (parens | number | boolean | string | null | column)
op = ws <letter+>:name ws atom:e ?(name in ops) -> ops[name], e
expr = atom:left op*:right -> compose(left, right)
'''
def compose(left, right):
if not right:
return left
op, value = right.pop(0)
if op in (operator.and_, operator.or_):
return op(left, compose(value, right))
return compose(op(left, value), right)
def colgetter(sqlobj):
def getcol(name):
for tbl in sqlobj.froms:
for col in tbl.columns:
if col.name.lower() == name.lower():
return col
raise RequestParseError('{} is unknown'.format(name))
return getcol
def parse(context, value):
if not value:
raise RequestParseError('Must provide a value for $filter')
try:
grammar = parsley.makeGrammar(
filter_grammar,
dict(compose=compose,
ops=gops,
get_column=colgetter(context['sqlobj'])))
except EOFError:
raise RequestParseError('Bad $filter {}'.format(value))
context['sqlobj'] = context['sqlobj'].where(grammar(value).expr())
```
#### File: odata-py/odata/parser.py
```python
from sqlalchemy.sql import expression
from sqlalchemy import select, insert, update, delete
from odata import urlpath, urlquery, urlheaders, render
from odata.exc import RequestParseError, NoContent
from odata.shared import select_star
verbfuncs = dict(
GET=lambda table: select().select_from(table),
POST=insert,
PUT=update,
PATCH=update,
MERGE=update,
DELETE=delete)
def create_context(tables, http_verb, headers=None, payload=None):
if http_verb not in verbfuncs:
raise RequestParseError('Unknown verb')
return dict(
tables=tables,
http_verb=http_verb,
request_payload=payload,
request_headers=headers if headers is not None else {},
sqlobj=verbfuncs[http_verb],
response_headers={},
response_status=200)
def validate_and_cleanup(sqlobj, request_payload):
if isinstance(sqlobj, expression.Select):
if not sqlobj.columns:
return select_star(sqlobj)
if ((isinstance(sqlobj, expression.Update) or
isinstance(sqlobj, expression.Delete)) and
sqlobj._whereclause is None):
raise RequestParseError('Global collection modifications not allowed')
if ((isinstance(sqlobj, expression.Update) or
isinstance(sqlobj, expression.Insert)) and
not sqlobj.parameters):
if hasattr(request_payload, 'iteritems'):
return sqlobj.values(request_payload)
else:
raise RequestParseError('Invalid Payload')
return sqlobj
class RequestParser(object):
def __init__(self, tables, engine=None, dialect=None, connection=None):
self.engine = engine
self.tables = tables
self.dialect = dialect
self.connection = connection
def parse(self, path, http_verb,
headers=None, query_args=None, payload=None):
context = create_context(self.tables, http_verb, headers, payload)
urlpath.parse(path, context)
if query_args:
urlquery.parse(context, query_args)
if headers:
urlheaders.parse(context)
context['sqlobj'] = validate_and_cleanup(context['sqlobj'], payload)
try:
context['payload'] = self.query(context['sqlobj'])
except NoContent as e:
e.code = context['response_status']
raise e
return self.render(context)
def query(self, sqlobj):
if self.connection is None:
if self.engine is not None:
connection = self.engine.connect()
else:
raise AttributeError("No connection or engine found")
else:
connection = self.connection()
sqlobj = sqlobj.compile(self.engine, self.dialect)
result = connection.execute(sqlobj)
if not result.returns_rows:
raise NoContent()
return map(dict, result)
def render(self, context):
return dict(payload=render.payload(context),
headers=context['response_headers'],
status=context['response_status'])
```
#### File: odata-py/odata/render.py
```python
from json import JSONEncoder, dumps
from odata.exc import RequestParseError
class ReprEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, (list, dict, str, unicode, int,
float, bool, type(None))):
return JSONEncoder.default(self, obj)
return repr(obj)
def jsonify(payload):
return dumps(payload, cls=ReprEncoder)
def plaintext(payload):
if hasattr(payload, 'values'):
payload = payload.values()
if isinstance(payload, list) or isinstance(payload, tuple):
if len(payload) != 1:
raise RequestParseError('No plaintext support for this endpoint')
payload = payload[0]
return payload
#TODO: 'application/xml', 'application/xml+atom'
formatters = {
'application/json': jsonify,
'text/plain': plaintext}
def payload(context):
headers = context.get('headers', {})
code = context.get('response_code', 200)
ct = context.get('content_type',
headers.get('Content-Type', 'application/json'))
if ct not in formatters:
raise NotImplementedError
payload = formatters[ct](context.get('payload', ''))
return payload
``` |
{
"source": "jisantuc/HPDMinorityReport",
"score": 3
} |
#### File: jisantuc/HPDMinorityReport/loader.py
```python
import pandas as pd
class DataLoader(object):
def __init__(self, complaint_fname, weather_fname):
self.fname = fname
self.cols = map(
lambda x: x.proper(),
['unique key', 'created date', 'closed date', 'agency', 'agency name',
'complaint type', 'descriptor', 'location type', 'incident zip',
'incident address', 'street name', 'address type', 'city',
'facility type', 'status', 'resolution description', 'borough']
)
def load_complaint_data(self):
chunks = pd.read_csv(
self.complaint_fname,
header=None,
names=self.cols,
usecols=self.cols,
iterator=True,
chunksize=10000
)
return pd.concat([chunk for chunk in chunks], ignore_index=True).rename(
columns={'created date': 'date'}
)
def load_weather_data(self):
chunks = pd.read_csv(
self.wather_fname,
iterator=True,
chunksize=10000
)
return pd.concat([chunk for chunk in chunks], ignore_index=True)
def load_data(self):
weather = self.load_weather_data()
complaints = self.load_complaint_data()
return complaints.join(
weather, on='date'
)
``` |
{
"source": "jisantuc/labeller",
"score": 2
} |
#### File: labeller/common/assignment_notification_daemon.py
```python
import sys
import time
from datetime import datetime
from MappingCommon import MappingCommon
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
mapc = MappingCommon()
# create log
logFilePath = mapc.projectRoot + "/log"
k = open(logFilePath + "/assignment_notification_daemon.log", "a+")
now = str(datetime.today())
k.write("\nassignment_notification: Daemon starting up at %s\n" % now)
k.close()
# workers = dict()
lastAssignableCount = dict()
def email_worker(message, subject, sender, receiver):
msg = MIMEMultipart('related')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = receiver
body = MIMEText(message)
msg.attach(body)
s = smtplib.SMTP('localhost')
s.sendmail(sender, [receiver], msg.as_string())
s.quit()
while True:
hitPollingInterval = int(mapc.getConfiguration('Hit_PollingInterval'))
if(hitPollingInterval * 3 < 60):
notificationInterval = hitPollingInterval * 3
else:
notificationInterval = 60
sender = "<EMAIL>"
receiver = mapc.getConfiguration('Hit_NotificationEmailAddress')
k = open(logFilePath + "/assignment_notification_daemon.log", "a+")
now = str(datetime.today())
# Read worker_data
# Get currently qualified workers and their information
sql = """SELECT worker_id, first_name, last_name, email,
qualified FROM users
INNER JOIN worker_data ON users.id = worker_data.worker_id
where qualified='True' ORDER BY worker_id ASC;
"""
mapc.cur.execute(sql)
worker_data = mapc.cur.fetchall()
mapc.dbcon.commit()
# Read in worker data to collect
for worker in worker_data:
workerId = worker[0]
workerName = worker[1] + " " + worker[2]
workerEmail = worker[3]
if workerId not in lastAssignableCount:
lastAssignable = -1
else:
lastAssignable = lastAssignableCount[workerId]
assignable = 0
pending = 0
assigned = 0
# Loop through each each hit counting assignables, assigneds, pendings
for hitId, hit in mapc.getHitInfo().iteritems():
# check assignable F hits
if hit['kmlType'] != MappingCommon.KmlFQAQC:
continue
# 'else' clause below is executed if no match on workerId.
for asgmtId, asgmt in hit['assignments'].iteritems():
if asgmt['workerId'] == workerId:
if asgmt['status'] == MappingCommon.HITAssigned:
assigned += 1
elif asgmt['status'] == MappingCommon.HITPending:
pending += 1
break
else:
if hit['assignmentsRemaining'] > 0:
assignable += 1
lastAssignableCount[workerId] = assignable
# notify
# if worker has just 1 assignment left, log it
if assignable == 1 and lastAssignable > 1:
assignResults = ("worker %s: last assignable=%s; assignable=%s;" +
"pending=%s; assigned=%s\n") % (workerId,
lastAssignable, assignable, pending, assigned)
message = (workerName + " has just " + str(assignable) +
" assignable assignments on " + mapc.hostName +
". Of these, " + str(assigned) +
" are assigned and " + str(pending) +
" are pending. Please keep mapping to finish until" +
" all assignments and pendings are cleared")
subject = (workerName + " 1 assignable assignment on " +
mapc.hostName)
email_worker(message, subject, sender, receiver)
k.write("\nOne remaining assignment: datetime = %s\n" % now)
k.write("Worker %s (%s) notified on %s\n" %
(workerId, workerName, mapc.hostName))
k.write(assignResults)
# if worker has no assigned or pending assignments
if pending == 0 and assigned == 0:
# the first time no further assignable HITs are avaiable
if lastAssignable <> 0 and assignable == 0:
# tell them to map on a different instance
message = (workerName + " has finished all" +
" assignments on " + mapc.hostName + ", and should " +
" log in any other instance where there are " +
" available assignments")
subject = (workerName + " has 0 assignments left on " +
mapc.hostName)
email_worker(message, subject, sender, receiver)
k.write("Finished assignments: datetime = %s\n" % now)
k.write("Worker %s (%s) notified of 0 assignments on %s\n" %
(workerId, workerName, mapc.hostName))
# if new HITs become avaiable and previously there were none assignable
# (this includes cases where a worker might still have pending & assgnd)
if assignable > 0 and lastAssignable <= 0:
message = (workerName + " has " + str(assignable) +
" new assignments available for mapping on " +
mapc.home)
subject = (workerName + " has " + str(assignable) +
" new assignments on " + mapc.hostName)
email_worker(message, subject, sender, receiver)
k.write("New Assignments: datetime = %s\n" % now)
k.write("Worker %s (%s) notified of %s assignments on %s\n" %
(workerId, workerName, assignable, mapc.hostName))
# Sleep for specified polling interval
k.close()
time.sleep(notificationInterval)
```
#### File: labeller/common/fire_up_labeller.py
```python
import yaml
import re
import boto3
import sys
import os
import click
from math import floor
# check worker_mem_yarn at:
# https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-hadoop-task-config.html#emr-hadoop-task-config
def fire_up_labeller(initial=3,
ec2_instance="start",
run_id=0,
aoi_index=1,
aoi_name="aoi",
aoi_s3_object=None,
incoming_names_static_path=None,
github_branch="devel",
github_repo="agroimpacts/issues",
security_group_id="sg-ac924ee6",
worker_type="m4.2xlarge",
bid_price=0.16,
worker_count=50,
secret_key=None,
db_user=None,
db_pwd=<PASSWORD>,
github_token=None,
api_key=None,
aws_access=None,
aws_secret=None,
aws_region=None,
bucket="activemapper",
number_outgoing_names=10,
num_possibilities=20,
slack_url=None,
worker_vcpu=16,
worker_mem_yarn=24,
executor_cores=5,
image_catalog_predict=None):
# Set the config.yaml
# Parse the config template
home = os.environ['HOME']
projectRoot = '%s/labeller' % home
config_template = "%s/common/%s" % (projectRoot, "config_template.yaml")
with open(config_template, 'r') as yaml_file:
config = yaml.safe_load(yaml_file)
config_path = "%s/common/%s" % (projectRoot, "config.yaml")
# Get the incoming_names_file
if incoming_names_static_path is None:
print("You leave this blank, so just set it to incoming_names_static_cluster_blank.csv.")
incoming_names_file_path = "incoming_names_static_cluster_blank.csv"
else:
incoming_names_file_path = incoming_names_static_path
# Check the initial mode index
if initial not in [1, 2, 3]:
sys.exit("Invalid initial mode index! Should be in [1, 2, 3].")
# Set parameters
f_pool_file = "f_pool_%s_%d.csv" % (aoi_name, aoi_index)
qs_pool_file = "q_sites_%s_%d.csv" % (aoi_name, aoi_index)
incoming_names_file = "incoming_names_%s_%d.csv" % (aoi_name, aoi_index)
incoming_metrics_file = "incoming_metrics_%s_%d.csv" % (aoi_name, aoi_index)
image_catalog_file = "planet/planet_catalog_%s_%d.csv" % (aoi_name, aoi_index)
if image_catalog_predict is None:
image_catalog_predict = image_catalog_file
image_output_pattern = "s3://activemapper/classified-images/%s_%d/image_c{}_r{}_{}_run{}_iteration{}.tif" \
% (aoi_name, aoi_index)
outgoing_names_file = "s3://activemapper/planet/outgoing_names_%s_%d.csv" \
% (aoi_name, aoi_index)
# spark
worker_executor = floor((worker_vcpu - 1) / executor_cores)
executor_mem = floor((worker_mem_yarn / worker_executor) - 1)
executor = worker_executor * worker_count - 1
num_shuffle = executor * executor_cores * 2
# Set the values
# labeller
config['labeller']['DEBUG'] = True
config['labeller']['initial'] = initial
config['labeller']['SECRET_KEY'] = secret_key
config['labeller']['slack_url'] = slack_url
config['labeller']['db_production_name'] = 'Africa'
config['labeller']['db_username'] = db_user
config['labeller']['db_password'] = <PASSWORD>
config['labeller']['github_token'] = github_token
config['labeller']['github_repo'] = github_repo
config['labeller']['MAIL_SERVER'] = "localhost"
config['labeller']['MAIL_PORT'] = 25
config['labeller']['PL_API_KEY'] = api_key
config['labeller']['aws_access'] = aws_access
config['labeller']['aws_secret'] = aws_secret
config['labeller']['aws_region'] = aws_region
config['labeller']['mapping_category1'] = "field"
config['labeller']['consensus_directory'] = "/labels/%s/" % aoi_name
config['labeller']['consensus_heatmap_dir'] = "heatmaps/%s" % aoi_name
config['labeller']['s3_catalog_name'] = "planet_catalog_%s_full.csv" % aoi_name
config['labeller']['aoi_s3_object'] = "grid/%s" % aoi_s3_object
# learner
config['learner']['aws_access'] = aws_access
config['learner']['aws_secret'] = aws_secret
config['learner']['aws_region'] = aws_region
config['learner']['runid'] = int(run_id)
config['learner']['aoiid'] = aoi_index
config['learner']['aoiname'] = aoi_name
config['learner']['bucket'] = "activemapper"
config['learner']['prefix'] = "planet"
config['learner']['pool'] = f_pool_file
config['learner']['qs'] = qs_pool_file
config['learner']['incoming_names'] = incoming_names_file
config['learner']['incoming_names_static'] = incoming_names_file_path
config['learner']['metrics'] = incoming_metrics_file
config['learner']['image_catalog'] = image_catalog_file
config['learner']['image_catalog_predict'] = image_catalog_predict
config['learner']['image_output_pattern'] = image_output_pattern
config['learner']['outgoing'] = outgoing_names_file
config['learner']['number_outgoing_names'] = int(number_outgoing_names)
# Set the private ip of the instance
aws_session = boto3.session.Session(aws_access_key_id=config['learner']['aws_access'],
aws_secret_access_key=config['learner']['aws_secret'],
region_name=config['learner']['aws_region'])
ec2_instances = aws_session.resource('ec2').instances.filter(
Filters=[{
'Name': 'tag:Name',
'Values': [ec2_instance]}])
try:
private_ip = [instance.private_ip_address for instance in ec2_instances][0]
subnet = [instance.subnet_id for instance in ec2_instances][0]
except IndexError:
sys.exit("No such instance, please check this on AWS console.")
config['labeller']['db_host'] = private_ip
# Remove the null from the output file
def represent_none(self, _):
return self.represent_scalar(u'tag:yaml.org,2002:null', u'')
yaml.SafeDumper.add_representer(type(None), represent_none)
with open(config_path, "w") as f:
yaml.safe_dump(config, f, default_flow_style=False)
s3_client = aws_session.client('s3', region_name=config['learner']['aws_region'])
des_on_s3 = "config_%s_%d.yaml" % (aoi_name, aoi_index)
s3_client.upload_file(config_path, bucket, des_on_s3)
# Set the emr.tf and variables.tf
emr_path = "%s/terraform/%s" % (projectRoot, "emr_template.tf")
variables_path = "%s/terraform/%s" % (projectRoot, "variables_template.tf")
emr_path_new = "%s/terraform/%s" % (projectRoot, "emr.tf")
variables_path_new = "%s/terraform/%s" % (projectRoot, "variables.tf")
def change_variables(old_content, params, new_default):
lines = re.findall('variable "%s" {(?s).*?}' % params, old_content)
line = re.findall('default(?s).*?= ".*?"', lines[0])
old = re.findall('".*?"', line[0])
line_new = line[0].replace(old[0], '"%s"' % new_default)
lines_new = lines[0].replace(line[0], line_new)
new_content = old_content.replace(lines[0], lines_new)
return new_content
with open(variables_path, "r") as f:
variables = f.read()
variables = change_variables(old_content=variables, params="worker_count", new_default=worker_count)
variables = change_variables(old_content=variables, params="worker_type", new_default=worker_type)
variables = change_variables(old_content=variables, params="bid_price", new_default=bid_price)
variables = change_variables(old_content=variables, params="security_group", new_default=security_group_id)
variables = change_variables(old_content=variables, params="subnet", new_default=subnet)
with open(variables_path_new, "w+") as f:
f.write(variables)
def change_emr(old_content, params, step):
# run_geopyspark
param = params[step]
lines = re.findall('step {\n *name="%s"(?s).*?}' % step, old_content)[0]
line = re.findall('args = \[.*?\]', lines)[0] \
.replace('"]', '') \
.split('", "')
content = old_content
lines_new = lines
for key in param.keys():
if "spark" in key:
old = [m for m in line if m.startswith(key)][0]
pos_index = line.index(old)
new = "%s=%s" % (key, str(param[key]))
elif "-" in key:
new = param[key]
pos_index = line.index(key) + 1
else:
try:
pos_index, new = param[key]
except:
sys.exit("Please provide both pos_index and new value for %s!" % param[key])
lines_new = lines_new.replace('"%s"' % line[pos_index], '"%s"' % str(new))
content = content.replace(lines, lines_new)
return content
emr_params = {
"Clone Learner": {
"-b": github_branch
},
"run_geopyspark.py": {
"spark.executor.instances": executor,
"spark.executor.cores": executor_cores,
"spark.executor.memory": "%dg" % executor_mem,
"spark.driver.cores": executor_cores,
"spark.driver.memory": "%dg" % executor_mem,
"spark.sql.shuffle.partition": num_shuffle,
"spark.default.parallelism": num_shuffle,
"--config-filename": des_on_s3,
"--probability-images": num_possibilities,
"run": (-2, run_id),
"aoi": (-1, aoi_index),
},
"run_DB_insert.py": {
"--config-filename": des_on_s3
}
}
with open(emr_path, "r") as f:
emr = f.read()
emr = change_emr(old_content=emr, params=emr_params, step="Clone Learner")
emr = change_emr(old_content=emr, params=emr_params, step="run_geopyspark.py")
emr = change_emr(old_content=emr, params=emr_params, step="run_DB_insert.py")
with open(emr_path_new, "w+") as f:
f.write(emr)
@click.command()
@click.option('--initial', default=2, type=int, help='The labeller mode: 1 initial; 2 single; 3 regular.')
@click.option('--ec2_instance', default='start', help='The name of the labeller instance.')
@click.option('--run_id', default=0, type=int, help='The run id of the iteration.')
@click.option('--aoi_index', default=1, help='The index of the aoi in geojson the iteration will run on.')
@click.option('--aoi_name', default="aoi", help='The general name of the aoi.')
@click.option('--aoi_s3_object', default="image_target_aois.geojson", help='The name of AOI geojson in '
'S3/activemapper/grid.')
@click.option('--incoming_names_static_path', default='incoming_names_static_cluster_blank.csv', help='The S3 path of static '
'incoming names.')
@click.option('--github_branch', default="master", help='The branch name of learner to pull.')
@click.option('--github_repo', default="agroimpacts/issues", help='The repo to steer issues to.')
@click.option('--worker_type', default="m4.xlarge", help='The worker type of emr worker.')
@click.option('--bid_price', default=0.086, type=float, help='The bid price of emr worker.')
@click.option('--worker_count', default=200, type=int, help='The number of emr worker.')
@click.option('--bucket', default="activemapper", help='The name for S3 bucket.')
@click.option('--number_outgoing_names', default=10, type=int, help='The number of outgoing names.')
@click.option('--num_possibilities', default=20, type=int, help='The number of possibility maps to save out each '
'iteration.')
@click.option('--security_group_id', default=None, help='The security group id of learner.')
@click.option('--secret_key', default=None, help='The secret key for labeller.')
@click.option('--db_user', default=None, help='The name of database user.')
@click.option('--db_pwd', default=None, help='The password of database.')
@click.option('--github_token', default=None, help='The github token of maphelp.')
@click.option('--api_key', default=None, help='The api key for downloading planet.')
@click.option('--aws_access', default=None, help='The aws access key.')
@click.option('--aws_secret', default=None, help='The aws secret key.')
@click.option('--aws_region', default=None, help='The aws region.')
@click.option('--slack_url', default=None, help='The url of slack APP.')
@click.option('--worker_vcpu', default=16, type=int, help='The number of cup for workers.')
@click.option('--worker_mem_yarn', default=24, type=int, help='The size of memeory yarn for workers.')
@click.option('--executor_cores', default=5, type=int, help='The number of executor cores for workers.')
@click.option('--image_catalog_predict', default=None, help='The catalog of images to apply the model.')
def main(initial, ec2_instance, run_id, aoi_index, aoi_name, aoi_s3_object, incoming_names_static_path,
github_branch, github_repo, worker_type, bid_price, worker_count, bucket,
number_outgoing_names, num_possibilities, security_group_id,
secret_key, db_user, db_pwd, github_token, api_key,
aws_access, aws_secret, aws_region, slack_url,
worker_vcpu, worker_mem_yarn, executor_cores,
image_catalog_predict):
fire_up_labeller(initial, ec2_instance, run_id, aoi_index, aoi_name, aoi_s3_object, incoming_names_static_path,
github_branch, github_repo, security_group_id, worker_type, bid_price, worker_count,
secret_key, db_user, db_pwd, github_token, api_key, aws_access,
aws_secret, aws_region, bucket, number_outgoing_names, num_possibilities,
slack_url, worker_vcpu, worker_mem_yarn, executor_cores, image_catalog_predict)
if __name__ == "__main__":
main()
```
#### File: labeller/common/last_day_on_fly.py
```python
import os
import re
import yaml
import boto3
home = os.environ['HOME']
projectRoot = '%s/labeller' % home
config_path = "%s/common/%s" % (projectRoot, "config.yaml")
with open(config_path, 'r') as yaml_file:
config = yaml.safe_load(yaml_file)
config['learner']['image_output_pattern'] = "s3://activemapper/classified-images/%s_whole/" \
"image_c{}_r{}.tif" % config['learner']['aoiid']
config['learner']['image_catalog_fix'] = 'planet/planet_catalog_{}_fix.csv'.format(config['learner']['aoiid'])
# Remove the null from the output file
def represent_none(self, _):
return self.represent_scalar(u'tag:yaml.org,2002:null', u'')
yaml.SafeDumper.add_representer(type(None), represent_none)
with open(config_path, "w") as f:
yaml.safe_dump(config, f, default_flow_style=False)
aws_session = boto3.session.Session(aws_access_key_id=config['learner']['aws_access'],
aws_secret_access_key=config['learner']['aws_secret'],
region_name=config['learner']['aws_region'])
s3_client = aws_session.client('s3', region_name=config['learner']['aws_region'])
des_on_s3 = "config_%s_whole.yaml" % config['learner']['aoiid']
s3_client.upload_file(config_path, "activemapper", des_on_s3)
emr_path = "%s/terraform/%s" % (projectRoot, "emr.tf")
def change_emr(old_content, params, new_value, pos_index):
lines = re.findall('step {\n *name="%s"(?s).*?}' % params, old_content)
line = re.findall('args = \[.*?\]', lines[0])
old = line[0].split('", "')[pos_index]
if pos_index == -6:
line_new = line[0].replace('--probability-images", "%s"' % old,
'--probability-images", "%s"' % str(new_value))
elif pos_index == -2:
line_new = line[0].replace('activemapper", "%s"' % old, 'activemapper", "%s"' % str(new_value))
elif pos_index == -3:
line_new = line[0].replace('activemapper', '%s", "activemapper' % str(new_value))
else:
line_new = line[0].replace("%s" % old, "%s" % str(new_value))
lines_new = lines[0].replace(line[0], line_new)
new_content = old_content.replace(lines[0], lines_new)
return new_content
with open(emr_path, "r") as f:
emr = f.read()
emr = change_emr(old_content=emr, params="run_geopyspark.py", new_value=str(config['learner']['aoiid']) + '"]',
pos_index=-1) # aoi_id
emr = change_emr(old_content=emr, params="run_geopyspark.py", new_value="--output-all-images", pos_index=-3)
emr = change_emr(old_content=emr, params="run_geopyspark.py",
new_value=des_on_s3, pos_index=-9)
try:
lines = re.findall('step {\n *name="%s"(?s).*?}' % "run_DB_insert.py", emr)[0]
emr = emr.replace(lines+"}", "")
except IndexError:
exit("Run_DB_insert already gone!")
with open(emr_path, "w") as f:
f.write(emr)
``` |
{
"source": "jisantuc/planetary-computer-apis-1",
"score": 2
} |
#### File: pcstac/pcstac/main.py
```python
import logging
import os
from typing import Any, Awaitable, Callable, Dict, List
from fastapi import FastAPI, Request, Response
from fastapi.exceptions import RequestValidationError, StarletteHTTPException
from fastapi.openapi.utils import get_openapi
from fastapi.responses import ORJSONResponse
from stac_fastapi.api.errors import DEFAULT_STATUS_CODES
from stac_fastapi.extensions.core import FieldsExtension, QueryExtension, SortExtension
from stac_fastapi.pgstac.config import Settings
from stac_fastapi.pgstac.db import close_db_connection, connect_to_db
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import PlainTextResponse
from pccommon.logging import init_logging
from pccommon.openapi import fixup_schema
from pcstac.api import PCStacApi
from pcstac.client import PCClient
from pcstac.config import API_DESCRIPTION, API_TITLE, API_VERSION, get_settings
from pcstac.errors import PC_DEFAULT_STATUS_CODES
from pcstac.middleware import count_collection_requests, trace_request
from pcstac.search import PCItemCollectionUri, PCSearch, PCSearchGetRequest
DEBUG: bool = os.getenv("DEBUG") == "TRUE" or False
# Initialize logging
init_logging("stac")
logger = logging.getLogger(__name__)
# Get the root path if set in the environment
APP_ROOT_PATH = os.environ.get("APP_ROOT_PATH", "")
logger.info(f"APP_ROOT_PATH: {APP_ROOT_PATH}")
INCLUDE_TRANSACTIONS = os.environ.get("INCLUDE_TRANSACTIONS", "") == "yes"
logger.info(f"INCLUDE_TRANSACTIONS: {INCLUDE_TRANSACTIONS}")
# Allow setting of SQLAlchemy connection pools
POOL_SIZE = int(os.environ.get("POOL_SIZE", "1"))
logger.info(f"POOL_SIZE: {POOL_SIZE}")
extensions = [QueryExtension(), SortExtension(), FieldsExtension()]
# Planetary Computer conformance classes differ from the default
# stac-fastapi case so they are manually specified
cql_conformance_classes: List[str] = [
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#fields",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:cql-json",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:item-search-filter",
"https://api.stacspec.org/v1.0.0-beta.3/item-search#filter:basic-spatial-operators",
(
"https://api.stacspec.org/v1.0.0-beta.3/item-search"
"#filter:basic-temporal-operators"
),
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#sort",
"https://api.stacspec.org/v1.0.0-beta.3/item-search/#query",
]
collections_conformance_classes: List[str] = [
"http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/oas30",
]
extra_conformance_classes = cql_conformance_classes + collections_conformance_classes
api = PCStacApi(
title=API_TITLE,
description=API_DESCRIPTION,
api_version=API_VERSION,
settings=Settings(debug=DEBUG),
client=PCClient.create(extra_conformance_classes=extra_conformance_classes),
extensions=extensions,
app=FastAPI(root_path=APP_ROOT_PATH, default_response_class=ORJSONResponse),
search_request_model=PCSearch,
search_get_request=PCSearchGetRequest,
item_collection_uri=PCItemCollectionUri,
response_class=ORJSONResponse,
exceptions={**DEFAULT_STATUS_CODES, **PC_DEFAULT_STATUS_CODES},
)
app: FastAPI = api.app
app.add_middleware(
CORSMiddleware,
allow_origins="*",
allow_methods=["GET", "POST"],
allow_headers=["*"],
)
@app.middleware("http")
async def _count_collection_requests(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
return await count_collection_requests(request, call_next)
@app.middleware("http")
async def _trace_request(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
return await trace_request(request, call_next)
@app.on_event("startup")
async def startup_event() -> None:
"""Connect to database on startup."""
await connect_to_db(app)
@app.on_event("shutdown")
async def shutdown_event() -> None:
"""Close database connection."""
await close_db_connection(app)
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(
request: Request, exc: StarletteHTTPException
) -> PlainTextResponse:
return PlainTextResponse(str(exc.detail), status_code=exc.status_code)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(
request: Request, exc: RequestValidationError
) -> PlainTextResponse:
return PlainTextResponse(str(exc), status_code=400)
def custom_openapi() -> Dict[str, Any]:
if app.openapi_schema:
return app.openapi_schema
else:
schema = get_openapi(
title="Planetary Computer STAC API",
version=get_settings().api_version,
routes=app.routes,
)
app.openapi_schema = fixup_schema(app.root_path, schema)
import json
print(json.dumps(app.openapi_schema["paths"], indent=2))
return schema
``` |
{
"source": "jisantuc/pystac",
"score": 3
} |
#### File: tests/extensions/test_datacube.py
```python
import unittest
import pystac
from pystac import ExtensionTypeError
from pystac.extensions.datacube import DatacubeExtension
from tests.utils import TestCases
class DatacubeTest(unittest.TestCase):
def setUp(self) -> None:
self.maxDiff = None
self.example_uri = TestCases.get_path("data-files/datacube/item.json")
def test_validate_datacube(self) -> None:
item = pystac.Item.from_file(self.example_uri)
item.validate()
def test_extension_not_implemented(self) -> None:
# Should raise exception if Item does not include extension URI
item = pystac.Item.from_file(self.example_uri)
item.stac_extensions.remove(DatacubeExtension.get_schema_uri())
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = DatacubeExtension.ext(item)
# Should raise exception if owning Item does not include extension URI
asset = item.assets["data"]
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = DatacubeExtension.ext(asset)
# Should succeed if Asset has no owner
ownerless_asset = pystac.Asset.from_dict(asset.to_dict())
_ = DatacubeExtension.ext(ownerless_asset)
def test_item_ext_add_to(self) -> None:
item = pystac.Item.from_file(self.example_uri)
item.stac_extensions.remove(DatacubeExtension.get_schema_uri())
self.assertNotIn(DatacubeExtension.get_schema_uri(), item.stac_extensions)
_ = DatacubeExtension.ext(item, add_if_missing=True)
self.assertIn(DatacubeExtension.get_schema_uri(), item.stac_extensions)
def test_asset_ext_add_to(self) -> None:
item = pystac.Item.from_file(self.example_uri)
item.stac_extensions.remove(DatacubeExtension.get_schema_uri())
self.assertNotIn(DatacubeExtension.get_schema_uri(), item.stac_extensions)
asset = item.assets["data"]
_ = DatacubeExtension.ext(asset, add_if_missing=True)
self.assertIn(DatacubeExtension.get_schema_uri(), item.stac_extensions)
def test_should_raise_exception_when_passing_invalid_extension_object(
self,
) -> None:
self.assertRaisesRegex(
ExtensionTypeError,
r"^Datacube extension does not apply to type 'object'$",
DatacubeExtension.ext,
object(),
)
```
#### File: tests/extensions/test_version.py
```python
import datetime
import unittest
from typing import List, Optional
import pystac
from pystac import ExtensionTypeError
from pystac.extensions import version
from pystac.extensions.version import VersionExtension, VersionRelType
from tests.utils import TestCases
URL_TEMPLATE: str = "http://example.com/catalog/%s.json"
def make_item(year: int) -> pystac.Item:
"""Create basic test items that are only slightly different."""
asset_id = f"USGS/GAP/CONUS/{year}"
start = datetime.datetime(year, 1, 2)
item = pystac.Item(
id=asset_id, geometry=None, bbox=None, datetime=start, properties={}
)
item.set_self_href(URL_TEMPLATE % year)
VersionExtension.add_to(item)
return item
class VersionExtensionTest(unittest.TestCase):
def test_should_raise_exception_when_passing_invalid_extension_object(
self,
) -> None:
self.assertRaisesRegex(
ExtensionTypeError,
r"^Version extension does not apply to type 'object'$",
VersionExtension.ext,
object(),
)
class ItemVersionExtensionTest(unittest.TestCase):
version: str = "1.2.3"
def setUp(self) -> None:
super().setUp()
self.item = make_item(2011)
self.example_item_uri = TestCases.get_path("data-files/version/item.json")
def test_rel_types(self) -> None:
self.assertEqual(VersionRelType.LATEST.value, "latest-version")
self.assertEqual(VersionRelType.PREDECESSOR.value, "predecessor-version")
self.assertEqual(VersionRelType.SUCCESSOR.value, "successor-version")
def test_stac_extensions(self) -> None:
self.assertTrue(VersionExtension.has_extension(self.item))
def test_add_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version)
self.assertEqual(self.version, VersionExtension.ext(self.item).version)
self.assertNotIn(version.DEPRECATED, self.item.properties)
self.assertFalse(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_version_in_properties(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=True)
self.assertIn(version.VERSION, self.item.properties)
self.assertIn(version.DEPRECATED, self.item.properties)
self.item.validate()
def test_add_not_deprecated_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=False)
self.assertIn(version.DEPRECATED, self.item.properties)
self.assertFalse(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_add_deprecated_version(self) -> None:
VersionExtension.ext(self.item).apply(self.version, deprecated=True)
self.assertIn(version.DEPRECATED, self.item.properties)
self.assertTrue(VersionExtension.ext(self.item).deprecated)
self.item.validate()
def test_latest(self) -> None:
year = 2013
latest = make_item(year)
VersionExtension.ext(self.item).apply(self.version, latest=latest)
latest_result = VersionExtension.ext(self.item).latest
self.assertIs(latest, latest_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.LATEST)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_predecessor(self) -> None:
year = 2010
predecessor = make_item(year)
VersionExtension.ext(self.item).apply(self.version, predecessor=predecessor)
predecessor_result = VersionExtension.ext(self.item).predecessor
self.assertIs(predecessor, predecessor_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.PREDECESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_successor(self) -> None:
year = 2012
successor = make_item(year)
VersionExtension.ext(self.item).apply(self.version, successor=successor)
successor_result = VersionExtension.ext(self.item).successor
self.assertIs(successor, successor_result)
expected_href = URL_TEMPLATE % year
link = self.item.get_links(VersionRelType.SUCCESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.item.validate()
def test_fail_validate(self) -> None:
with self.assertRaises(pystac.STACValidationError):
self.item.validate()
def test_all_links(self) -> None:
deprecated = True
latest = make_item(2013)
predecessor = make_item(2010)
successor = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest, predecessor, successor
)
self.item.validate()
def test_full_copy(self) -> None:
cat = TestCases.test_case_1()
# Fetch two items from the catalog
item1 = cat.get_item("area-1-1-imagery", recursive=True)
item2 = cat.get_item("area-2-2-imagery", recursive=True)
assert item1 is not None
assert item2 is not None
# Enable the version extension on each, and link them
# as if they are different versions of the same Item
VersionExtension.add_to(item1)
VersionExtension.add_to(item2)
VersionExtension.ext(item1).apply(version="2.0", predecessor=item2)
VersionExtension.ext(item2).apply(version="1.0", successor=item1, latest=item1)
# Make a full copy of the catalog
cat_copy = cat.full_copy()
# Retrieve the copied version of the items
item1_copy = cat_copy.get_item("area-1-1-imagery", recursive=True)
assert item1_copy is not None
item2_copy = cat_copy.get_item("area-2-2-imagery", recursive=True)
assert item2_copy is not None
# Check to see if the version links point to the instances of the
# item objects as they should.
predecessor = item1_copy.get_single_link(VersionRelType.PREDECESSOR)
assert predecessor is not None
predecessor_target = predecessor.target
successor = item2_copy.get_single_link(VersionRelType.SUCCESSOR)
assert successor is not None
successor_target = successor.target
latest = item2_copy.get_single_link(VersionRelType.LATEST)
assert latest is not None
latest_target = latest.target
self.assertIs(predecessor_target, item2_copy)
self.assertIs(successor_target, item1_copy)
self.assertIs(latest_target, item1_copy)
def test_setting_none_clears_link(self) -> None:
deprecated = False
latest = make_item(2013)
predecessor = make_item(2010)
successor = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest, predecessor, successor
)
VersionExtension.ext(self.item).latest = None
links = self.item.get_links(VersionRelType.LATEST)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).latest)
VersionExtension.ext(self.item).predecessor = None
links = self.item.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).predecessor)
VersionExtension.ext(self.item).successor = None
links = self.item.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.item).successor)
def test_multiple_link_setting(self) -> None:
deprecated = False
latest1 = make_item(2013)
predecessor1 = make_item(2010)
successor1 = make_item(2012)
VersionExtension.ext(self.item).apply(
self.version, deprecated, latest1, predecessor1, successor1
)
year = 2015
latest2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).latest = latest2
links = self.item.get_links(VersionRelType.LATEST)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2009
predecessor2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).predecessor = predecessor2
links = self.item.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2014
successor2 = make_item(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.item).successor = successor2
links = self.item.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
def test_extension_not_implemented(self) -> None:
# Should raise exception if Item does not include extension URI
item = pystac.Item.from_file(self.example_item_uri)
item.stac_extensions.remove(VersionExtension.get_schema_uri())
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = VersionExtension.ext(item)
def test_ext_add_to(self) -> None:
item = pystac.Item.from_file(self.example_item_uri)
item.stac_extensions.remove(VersionExtension.get_schema_uri())
self.assertNotIn(VersionExtension.get_schema_uri(), item.stac_extensions)
_ = VersionExtension.ext(item, add_if_missing=True)
self.assertIn(VersionExtension.get_schema_uri(), item.stac_extensions)
def make_collection(year: int) -> pystac.Collection:
asset_id = f"my/collection/of/things/{year}"
start = datetime.datetime(2014, 8, 10)
end = datetime.datetime(year, 1, 3, 4, 5)
bboxes = [[-180.0, -90.0, 180.0, 90.0]]
spatial_extent = pystac.SpatialExtent(bboxes)
intervals: List[List[Optional[datetime.datetime]]] = [[start, end]]
temporal_extent = pystac.TemporalExtent(intervals)
extent = pystac.Extent(spatial_extent, temporal_extent)
collection = pystac.Collection(asset_id, "desc", extent)
collection.set_self_href(URL_TEMPLATE % year)
VersionExtension.add_to(collection)
return collection
class CollectionVersionExtensionTest(unittest.TestCase):
version: str = "1.2.3"
def setUp(self) -> None:
super().setUp()
self.collection = make_collection(2011)
self.example_collection_uri = TestCases.get_path(
"data-files/version/collection.json"
)
def test_stac_extensions(self) -> None:
self.assertTrue(VersionExtension.has_extension(self.collection))
def test_add_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version)
self.assertEqual(self.version, VersionExtension.ext(self.collection).version)
self.assertNotIn(version.DEPRECATED, self.collection.extra_fields)
self.assertFalse(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_version_deprecated(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=True)
self.assertIn(version.VERSION, self.collection.extra_fields)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.collection.validate()
def test_add_not_deprecated_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=False)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.assertFalse(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_add_deprecated_version(self) -> None:
VersionExtension.ext(self.collection).apply(self.version, deprecated=True)
self.assertIn(version.DEPRECATED, self.collection.extra_fields)
self.assertTrue(VersionExtension.ext(self.collection).deprecated)
self.collection.validate()
def test_latest(self) -> None:
year = 2013
latest = make_collection(year)
VersionExtension.ext(self.collection).apply(self.version, latest=latest)
latest_result = VersionExtension.ext(self.collection).latest
self.assertIs(latest, latest_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.LATEST)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_predecessor(self) -> None:
year = 2010
predecessor = make_collection(year)
VersionExtension.ext(self.collection).apply(
self.version, predecessor=predecessor
)
predecessor_result = VersionExtension.ext(self.collection).predecessor
self.assertIs(predecessor, predecessor_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.PREDECESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_successor(self) -> None:
year = 2012
successor = make_collection(year)
VersionExtension.ext(self.collection).apply(self.version, successor=successor)
successor_result = VersionExtension.ext(self.collection).successor
self.assertIs(successor, successor_result)
expected_href = URL_TEMPLATE % year
link = self.collection.get_links(VersionRelType.SUCCESSOR)[0]
self.assertEqual(expected_href, link.get_href())
self.collection.validate()
def test_fail_validate(self) -> None:
with self.assertRaises(pystac.STACValidationError):
self.collection.validate()
def test_validate_all(self) -> None:
deprecated = True
latest = make_collection(2013)
predecessor = make_collection(2010)
successor = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest, predecessor, successor
)
self.collection.validate()
def test_full_copy(self) -> None:
cat = TestCases.test_case_1()
# Fetch two collections from the catalog
col1 = cat.get_child("area-1-1", recursive=True)
assert isinstance(col1, pystac.Collection)
col2 = cat.get_child("area-2-2", recursive=True)
assert isinstance(col2, pystac.Collection)
# Enable the version extension on each, and link them
# as if they are different versions of the same Collection
VersionExtension.add_to(col1)
VersionExtension.add_to(col2)
VersionExtension.ext(col1).apply(version="2.0", predecessor=col2)
VersionExtension.ext(col2).apply(version="1.0", successor=col1, latest=col1)
# Make a full copy of the catalog
cat_copy = cat.full_copy()
# Retrieve the copied version of the items
col1_copy = cat_copy.get_child("area-1-1", recursive=True)
assert col1_copy is not None
col2_copy = cat_copy.get_child("area-2-2", recursive=True)
assert col2_copy is not None
# Check to see if the version links point to the instances of the
# col objects as they should.
predecessor = col1_copy.get_single_link(VersionRelType.PREDECESSOR)
assert predecessor is not None
predecessor_target = predecessor.target
successor = col2_copy.get_single_link(VersionRelType.SUCCESSOR)
assert successor is not None
successor_target = successor.target
latest = col2_copy.get_single_link(VersionRelType.LATEST)
assert latest is not None
latest_target = latest.target
self.assertIs(predecessor_target, col2_copy)
self.assertIs(successor_target, col1_copy)
self.assertIs(latest_target, col1_copy)
def test_setting_none_clears_link(self) -> None:
deprecated = False
latest = make_collection(2013)
predecessor = make_collection(2010)
successor = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest, predecessor, successor
)
VersionExtension.ext(self.collection).latest = None
links = self.collection.get_links(VersionRelType.LATEST)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).latest)
VersionExtension.ext(self.collection).predecessor = None
links = self.collection.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).predecessor)
VersionExtension.ext(self.collection).successor = None
links = self.collection.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(0, len(links))
self.assertIsNone(VersionExtension.ext(self.collection).successor)
def test_multiple_link_setting(self) -> None:
deprecated = False
latest1 = make_collection(2013)
predecessor1 = make_collection(2010)
successor1 = make_collection(2012)
VersionExtension.ext(self.collection).apply(
self.version, deprecated, latest1, predecessor1, successor1
)
year = 2015
latest2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).latest = latest2
links = self.collection.get_links(VersionRelType.LATEST)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2009
predecessor2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).predecessor = predecessor2
links = self.collection.get_links(VersionRelType.PREDECESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
year = 2014
successor2 = make_collection(year)
expected_href = URL_TEMPLATE % year
VersionExtension.ext(self.collection).successor = successor2
links = self.collection.get_links(VersionRelType.SUCCESSOR)
self.assertEqual(1, len(links))
self.assertEqual(expected_href, links[0].get_href())
def test_extension_not_implemented(self) -> None:
# Should raise exception if Collection does not include extension URI
collection = pystac.Collection.from_file(self.example_collection_uri)
collection.stac_extensions.remove(VersionExtension.get_schema_uri())
with self.assertRaises(pystac.ExtensionNotImplemented):
_ = VersionExtension.ext(collection)
def test_ext_add_to(self) -> None:
collection = pystac.Collection.from_file(self.example_collection_uri)
collection.stac_extensions.remove(VersionExtension.get_schema_uri())
self.assertNotIn(VersionExtension.get_schema_uri(), collection.stac_extensions)
_ = VersionExtension.ext(collection, add_if_missing=True)
self.assertIn(VersionExtension.get_schema_uri(), collection.stac_extensions)
```
#### File: tests/serialization/test_identify.py
```python
import unittest
import pystac
from pystac.cache import CollectionCache
from pystac.serialization import (
identify_stac_object,
identify_stac_object_type,
merge_common_properties,
)
from pystac.serialization.identify import STACVersionRange, STACVersionID
from tests.utils import TestCases
class IdentifyTest(unittest.TestCase):
def setUp(self) -> None:
self.examples = TestCases.get_examples_info()
def test_identify(self) -> None:
collection_cache = CollectionCache()
for example in self.examples:
with self.subTest(example.path):
path = example.path
d = pystac.StacIO.default().read_json(path)
if identify_stac_object_type(d) == pystac.STACObjectType.ITEM:
merge_common_properties(
d, json_href=path, collection_cache=collection_cache
)
actual = identify_stac_object(d)
# Explicitly cover __repr__ functions in tests
str_info = str(actual)
self.assertIsInstance(str_info, str)
msg = "Failed {}:".format(path)
self.assertEqual(actual.object_type, example.object_type, msg=msg)
version_contained_in_range = actual.version_range.contains(
example.stac_version
)
self.assertTrue(version_contained_in_range, msg=msg)
self.assertEqual(
set(actual.extensions), set(example.extensions), msg=msg
)
def test_identify_non_stac_type(self) -> None:
plain_feature_dict = {
"type": "Feature",
"properties": {},
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
self.assertIsNone(identify_stac_object_type(plain_feature_dict))
def test_identify_invalid_stac_object_with_version(self) -> None:
# Has stac_version but is not a valid STAC object
invalid_dict = {
"id": "concepts",
"title": "Concepts catalogs",
"links": [
{
"rel": "self",
"type": "application/json",
"href": "https://tamn.snapplanet.io/catalogs/concepts",
},
{
"rel": "root",
"type": "application/json",
"href": "https://tamn.snapplanet.io",
},
],
"stac_version": "1.0.0",
}
with self.assertRaises(pystac.STACTypeError) as ctx:
identify_stac_object(invalid_dict)
self.assertIn("JSON does not represent a STAC object", str(ctx.exception))
def test_identify_non_stac_raises_error(self) -> None:
plain_feature_dict = {
"type": "Feature",
"properties": {},
"geometry": {"type": "Point", "coordinates": [0, 0]},
}
with self.assertRaises(pystac.STACTypeError) as ctx:
identify_stac_object(plain_feature_dict)
self.assertIn("JSON does not represent a STAC object", str(ctx.exception))
def test_identify_invalid_with_stac_version(self) -> None:
not_stac = {"stac_version": "0.9.0", "type": "Custom"}
self.assertIsNone(identify_stac_object_type(not_stac))
class VersionTest(unittest.TestCase):
def test_version_ordering(self) -> None:
self.assertEqual(STACVersionID("0.9.0"), STACVersionID("0.9.0"))
self.assertFalse(STACVersionID("0.9.0") < STACVersionID("0.9.0"))
self.assertFalse(STACVersionID("0.9.0") != STACVersionID("0.9.0"))
self.assertFalse(STACVersionID("0.9.0") > STACVersionID("0.9.0"))
self.assertTrue(STACVersionID("1.0.0-beta.2") < "1.0.0")
self.assertTrue(STACVersionID("0.9.1") > "0.9.0")
self.assertFalse(STACVersionID("0.9.0") > "0.9.0")
self.assertTrue(STACVersionID("0.9.0") <= "0.9.0")
self.assertTrue(STACVersionID("1.0.0-beta.1") <= STACVersionID("1.0.0-beta.2"))
self.assertFalse(STACVersionID("1.0.0") < STACVersionID("1.0.0-beta.2"))
def test_version_range_ordering(self) -> None:
version_range = STACVersionRange("0.9.0", "1.0.0-beta.2")
self.assertIsInstance(str(version_range), str)
self.assertTrue(version_range.contains("1.0.0-beta.1"))
self.assertFalse(version_range.contains("1.0.0"))
self.assertTrue(version_range.is_later_than("0.8.9"))
version_range = STACVersionRange("0.9.0", "1.0.0-beta.1")
self.assertFalse(version_range.contains("1.0.0-beta.2"))
version_range = STACVersionRange(min_version="0.6.0-rc1", max_version="0.9.0")
self.assertTrue(version_range.contains("0.9.0"))
```
#### File: pystac/tests/test_item_collection.py
```python
from copy import deepcopy
import json
from pystac.item_collection import ItemCollection
import unittest
from os.path import relpath
import pystac
from tests.utils import TestCases
class TestItemCollection(unittest.TestCase):
SIMPLE_ITEM = TestCases.get_path("data-files/examples/1.0.0-RC1/simple-item.json")
CORE_ITEM = TestCases.get_path("data-files/examples/1.0.0-RC1/core-item.json")
EXTENDED_ITEM = TestCases.get_path(
"data-files/examples/1.0.0-RC1/extended-item.json"
)
ITEM_COLLECTION = TestCases.get_path(
"data-files/item-collection/sample-item-collection.json"
)
def setUp(self) -> None:
self.maxDiff = None
with open(self.ITEM_COLLECTION) as src:
self.item_collection_dict = json.load(src)
self.items = [
pystac.Item.from_dict(f) for f in self.item_collection_dict["features"]
]
self.stac_io = pystac.StacIO.default()
def test_item_collection_length(self) -> None:
item_collection = pystac.ItemCollection(items=self.items)
self.assertEqual(len(item_collection), len(self.items))
def test_item_collection_iter(self) -> None:
expected_ids = [item.id for item in self.items]
item_collection = pystac.ItemCollection(items=self.items)
actual_ids = [item.id for item in item_collection]
self.assertListEqual(expected_ids, actual_ids)
def test_item_collection_get_item_by_index(self) -> None:
expected_id = self.items[0].id
item_collection = pystac.ItemCollection(items=self.items)
self.assertEqual(item_collection[0].id, expected_id)
def test_item_collection_contains(self) -> None:
item = pystac.Item.from_file(self.SIMPLE_ITEM)
item_collection = pystac.ItemCollection(items=[item])
self.assertIn(item, item_collection)
def test_item_collection_extra_fields(self) -> None:
item_collection = pystac.ItemCollection(
items=self.items, extra_fields={"custom_field": "My value"}
)
self.assertEqual(item_collection.extra_fields.get("custom_field"), "My value")
def test_item_collection_to_dict(self) -> None:
item_collection = pystac.ItemCollection(
items=self.items, extra_fields={"custom_field": "My value"}
)
d = item_collection.to_dict()
self.assertEqual(len(d["features"]), len(self.items))
self.assertEqual(d.get("custom_field"), "My value")
def test_item_collection_from_dict(self) -> None:
features = [item.to_dict() for item in self.items]
d = {
"type": "FeatureCollection",
"features": features,
"custom_field": "My value",
}
item_collection = pystac.ItemCollection.from_dict(d)
expected = len(features)
self.assertEqual(expected, len(item_collection.items))
self.assertEqual(item_collection.extra_fields.get("custom_field"), "My value")
def test_clone_item_collection(self) -> None:
item_collection_1 = pystac.ItemCollection.from_file(self.ITEM_COLLECTION)
item_collection_2 = item_collection_1.clone()
item_ids_1 = [item.id for item in item_collection_1]
item_ids_2 = [item.id for item in item_collection_2]
# All items from the original collection should be in the clone...
self.assertListEqual(item_ids_1, item_ids_2)
# ... but they should not be the same objects
self.assertIsNot(item_collection_1[0], item_collection_2[0])
def test_raise_error_for_invalid_object(self) -> None:
item_dict = self.stac_io.read_json(self.SIMPLE_ITEM)
with self.assertRaises(pystac.STACTypeError):
_ = pystac.ItemCollection.from_dict(item_dict)
def test_from_relative_path(self) -> None:
_ = pystac.ItemCollection.from_file(
relpath(
TestCases.get_path(
"data-files/item-collection/sample-item-collection.json"
)
)
)
def test_from_list_of_dicts(self) -> None:
item_dict = self.stac_io.read_json(self.SIMPLE_ITEM)
item_collection = pystac.ItemCollection(items=[item_dict])
self.assertEqual(item_collection[0].id, item_dict.get("id"))
def test_add_item_collections(self) -> None:
item_1 = pystac.Item.from_file(self.SIMPLE_ITEM)
item_2 = pystac.Item.from_file(self.EXTENDED_ITEM)
item_3 = pystac.Item.from_file(self.CORE_ITEM)
item_collection_1 = pystac.ItemCollection(items=[item_1, item_2])
item_collection_2 = pystac.ItemCollection(items=[item_2, item_3])
combined = item_collection_1 + item_collection_2
self.assertEqual(len(combined), 3)
def test_add_other_raises_error(self) -> None:
item_collection = pystac.ItemCollection.from_file(self.ITEM_COLLECTION)
with self.assertRaises(TypeError):
_ = item_collection + 2
def test_identify_0_8_itemcollection_type(self) -> None:
itemcollection_path = TestCases.get_path(
"data-files/examples/0.8.1/item-spec/"
"examples/itemcollection-sample-full.json"
)
itemcollection_dict = pystac.StacIO.default().read_json(itemcollection_path)
self.assertTrue(
pystac.ItemCollection.is_item_collection(itemcollection_dict),
msg="Did not correctly identify valid STAC 0.8 ItemCollection.",
)
def test_identify_0_9_itemcollection(self) -> None:
itemcollection_path = TestCases.get_path(
"data-files/examples/0.9.0/item-spec/"
"examples/itemcollection-sample-full.json"
)
itemcollection_dict = pystac.StacIO.default().read_json(itemcollection_path)
self.assertTrue(
pystac.ItemCollection.is_item_collection(itemcollection_dict),
msg="Did not correctly identify valid STAC 0.9 ItemCollection.",
)
def test_from_dict_preserves_dict(self) -> None:
param_dict = deepcopy(self.item_collection_dict)
# test that the parameter is preserved
_ = ItemCollection.from_dict(param_dict)
self.assertEqual(param_dict, self.item_collection_dict)
# assert that the parameter is not preserved with
# non-default parameter
_ = ItemCollection.from_dict(param_dict, preserve_dict=False)
self.assertNotEqual(param_dict, self.item_collection_dict)
```
#### File: tests/utils/stac_io_mock.py
```python
from typing import Any, Union
from unittest.mock import Mock
import pystac
class MockStacIO(pystac.StacIO):
"""Creates a mock that records StacIO calls for testing and allows
clients to replace StacIO functionality, all within a context scope.
"""
def __init__(self) -> None:
self.mock = Mock()
def read_text(
self, source: Union[str, pystac.Link], *args: Any, **kwargs: Any
) -> str:
self.mock.read_text(source)
return pystac.StacIO.default().read_text(source)
def write_text(
self, dest: Union[str, pystac.Link], txt: str, *args: Any, **kwargs: Any
) -> None:
self.mock.write_text(dest, txt)
pystac.StacIO.default().write_text(dest, txt)
```
#### File: tests/validation/test_schema_uri_map.py
```python
import unittest
import pystac
from pystac.validation.schema_uri_map import DefaultSchemaUriMap
class SchemaUriMapTest(unittest.TestCase):
def test_gets_schema_uri_for_old_version(self) -> None:
d = DefaultSchemaUriMap()
uri = d.get_object_schema_uri(pystac.STACObjectType.ITEM, "0.8.0")
self.assertEqual(
uri,
"https://raw.githubusercontent.com/radiantearth/stac-spec/v0.8.0/"
"item-spec/json-schema/item.json",
)
``` |
{
"source": "jisantuc/raster-foundry",
"score": 2
} |
#### File: rf/commands/update_aoi_project.py
```python
import logging
import subprocess
import click
from ..utils.exception_reporting import wrap_rollbar
logger = logging.getLogger(__name__)
@click.command(name='update-aoi-project')
@click.argument('project_id')
@wrap_rollbar
def update_aoi_project(project_id):
"""Search for and add any new scenes to a given project
Args:
project_id (str): ID of project to check for new scenes to add
"""
bash_cmd = [
'java', '-cp', '/opt/raster-foundry/jars/batch-assembly.jar',
'com.rasterfoundry.batch.Main', 'update_aoi_project', project_id
]
exit_code = subprocess.call(bash_cmd)
logger.info('Checking whether %s has updated scenes available', project_id)
is_success = exit_code == 0
if is_success:
logger.info('Successfully completed project %s update', project_id)
else:
raise Exception('Update of project %s failed', project_id)
return is_success
``` |
{
"source": "jisantuc/raster-vision",
"score": 3
} |
#### File: pytorch_backend/examples/test_utils.py
```python
import unittest
from shapely.geometry import Polygon
from rastervision.pytorch_backend.examples.utils import read_stac
from tests import data_file_path
class TestUtils(unittest.TestCase):
def test_read_stac(self):
expected_keys = {
'label_uri': str,
'image_uris': list,
'label_bbox': Polygon,
'image_bbox': (type(None), Polygon),
'bboxes_intersect': bool,
'aoi_geometry': (type(None), dict)
}
zip_path = data_file_path('catalog.zip')
out = read_stac(zip_path)
# check for correctness of format
self.assertIsInstance(out, list)
for o in out:
self.assertIsInstance(o, dict)
for k, v in o.items():
self.assertTrue(k in expected_keys)
self.assertIsInstance(v, expected_keys[k])
for uri in o['image_uris']:
self.assertIsInstance(uri, str)
# check for correctness of content (WRT the test catalog)
self.assertEqual(len(out), 1)
self.assertEqual(len(out[0]['image_uris']), 1)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jisantuc/redistricting-graph-sim",
"score": 3
} |
#### File: redistricting-graph-sim/src/io.py
```python
from collections import Counter
import random
from uuid import uuid4
import fiona
from fiona.crs import from_epsg
import numpy as np
from numpy import float64, int64, object_
import pandas as pd
from shapely.geometry import mapping
from adjacency_graphs.algorithms import TwoStepGraph
from unify_shapes import unify_shapes
visited = set()
def row_to_geojson(row, geometry_col='geometry'):
"""Convert a row of data into a geojson record
Args:
row (pd.Series): the row to write
geometry_col (str): the column name containing the geometry
Returns:
dict
"""
geom_dict = mapping(row[geometry_col])
properties = {
k: v
for k, v in row.to_dict().iteritems() if k != geometry_col
}
properties[u'GEOID'] = row.name
return {
'geometry': geom_dict,
'properties': properties,
# The index value of a series is the series' name
'id': row.name
}
def schema_from_graph(mggg_graph, geometry_col='geometry'):
"""Create a schema for fiona to use with output from a graph
Args:
mggg_graph (MgggGraph): the graph to create the schema from
geometry_col (str): the column name of the records' geometry column
Returns:
dict
"""
type_lookup = {float64: 'float:15.2', int64: 'int:64', object_: 'str'}
dtypes = mggg_graph.shape_df.dtypes
schema = {
'properties': {
k: type_lookup[v.type]
for k, v in dtypes.to_dict().iteritems() if k != geometry_col
}
}
schema['properties'][u'GEOID'] = 'str'
schema[geometry_col] = 'Polygon'
return schema
def write_graph(path, mggg_graph, geometry_col='geometry'):
"""Write an MgggGraph to a shapefile
Args:
path (str): path to write the output file to
mggg_graph (MgggGraph): the graph to write
Returns:
None
"""
records = [
row_to_geojson(row, geometry_col)
for _, row in mggg_graph.shape_df.iterrows()
]
schema = schema_from_graph(mggg_graph, geometry_col)
with fiona.open(
path, 'w', driver='ESRI Shapefile', schema=schema,
crs=from_epsg(4326)) as sink:
for record in records:
sink.write(record)
def load_graph(shape_path):
"""Return an adjacency graph from shape_path with geoid in GEOID
GEOID _must_ be unique (sort of obviously), otherwise the adjacency
graph construction will drop some records, which is bad.
Args:
shape_path (str): path to the input shapefile
"""
return TwoStepGraph(shape_path, 'GEOID')
def find_start(gr, x_column, y_column, iteration=0):
filtered = gr.shape_df[pd.isnull(gr.shape_df['DISTRICT'])
& ~(gr.shape_df.index.isin(visited))]
if iteration % 6 == 0:
ind = (filtered[x_column]**2 + filtered[y_column]**2).idxmin()
elif iteration % 6 == 1:
ind = (filtered[x_column]**2 + filtered[y_column]**2).idxmax()
elif iteration % 6 == 2:
ind = filtered[x_column].idxmax()
elif iteration % 6 == 3:
ind = filtered[x_column].idxmin()
elif iteration % 6 == 4:
ind = filtered[y_column].idxmax()
else:
ind = filtered[y_column].idxmin()
return ind
def fill_district(district_id, mggg_graph, df, start, target_pop, upper_bound,
population_col):
# Get all the neighbors for the district
neighbors = mggg_graph.neighbors[start]
# Get the start polygon at the "corner" index
record = mggg_graph.get_vertex_attrs(start)
record['DISTRICT'] = district_id
total_pop = record[population_col]
keep_going = True
while keep_going:
# Calculate available population neighboring this district
# .query('@pd.isnull(DISTRICT)') evaluates the dataframe object it's
# called on for whether the DISTRICT column is null without having
# had to name the intermediate filtered dataframe
available_pop = (df.loc[neighbors].query('@pd.isnull(DISTRICT)')[
population_col].sum())
# Add all the neighbors to this district if the population is below
# target
if total_pop + available_pop < target_pop:
df.loc[neighbors, 'DISTRICT'] = district_id
neighbor_ids = reduce(lambda x, y: x | y,
[mggg_graph.neighbors[x] for x in neighbors])
neighbors = df.loc[neighbor_ids].query(
'@pd.isnull(DISTRICT)').index.tolist()
total_pop += available_pop
# Otherwise, add some of the neighbors until the target population is
# approximately reached
else:
ordered = df.loc[neighbors].query(
'@pd.isnull(DISTRICT)').sort_values(population_col)
ordered_idx = ordered.index
for ind in ordered_idx:
this_pop = ordered.loc[ind, population_col]
# if under target population after adding this one, definitely
# add this one to the district
if this_pop + total_pop < target_pop:
df.loc[[ind], 'DISTRICT'] = district_id
total_pop += this_pop
# if over the target population after adding this one but
# within the tolerance, flip a coin to decide whether to add
# this one
elif (target_pop < this_pop + total_pop < upper_bound
and random.random() > 0.5):
df.loc[[ind], 'DISTRICT'] = district_id
total_pop += this_pop
keep_going = False
break
# This should only be reached in practice when we opt out of
# adding the district in the check above but would also fall
# through when the geometry in question would add too much
# population I guess
else:
keep_going = False
break
def build_districts(mggg_graph,
n_districts,
population_col='TOTAL_POP',
x_column='CENTROID_XCOORDINATES',
y_column='CENTROID_YCOORDINATES',
tolerance=0.01):
"""Assign all geometries in the loaded graph to a district
"""
# strategy
# x start in a corner
# get populations for all neighbors
# figure out closest it's possible to get to target population with
# available neighbors add units from bottom to top until either no more
# units or target population reached
# Set a more convenient reference for the shape dataframe
df = mggg_graph.shape_df
# Calculate the equipopulation targets
target_pop = df[population_col].sum() / n_districts
upper_bound = target_pop * (1 + tolerance)
# Initialize the DISTRICT column with nulls
df['DISTRICT'] = np.nan
# Find a corner by getting the index that's least far from 0
# It might not end up being a literal corner, but it will probably be a
# polygon on the edge of the set of polygons
counter = 0
tries_at_numbers = Counter([])
while len(df['DISTRICT'].unique() < n_districts):
# Choose a random ID
district_id = str(uuid4())
try:
corner = find_start(mggg_graph, x_column, y_column, counter)
except ValueError:
break
try:
fill_district(district_id, mggg_graph, df, corner, target_pop,
upper_bound, population_col)
except TypeError:
df['DISTRICT'] = df['DISTRICT'].map(
lambda x: np.nan if x == district_id else x)
length_of_dist = len(df['DISTRICT'].unique())
tries_at_numbers[length_of_dist] += 1
if tries_at_numbers[length_of_dist] > 50:
percentage_filled = '%.2f' % ((
1 - pd.isnull(df['DISTRICT']).mean()) * 100)
df['DISTRICT'] = np.nan
tries_at_numbers = Counter([])
print 'Starting over after s districts... %s' % percentage_filled
global visited
visited |= {corner}
finally:
counter += 1
def main():
gr = unify_shapes(load_graph('data/unique_id.shp'))
try:
build_districts(gr, 15)
except KeyboardInterrupt:
return gr
return gr
``` |
{
"source": "jisantuc/UselessMap",
"score": 2
} |
#### File: jisantuc/UselessMap/map_maker.py
```python
import time
import numpy as np
from bokeh.io import curdoc
from bokeh.models import HoverTool, HBox, VBox, Slider, Toggle
from bokeh.plotting import figure, show, ColumnDataSource
from bokeh.sampledata.us_states import data as states
from bokeh.palettes import Purples9
states = {
code: state for code, state in states.items() if
code not in ['HI', 'AK']
}
def gen_initial_rate(y):
return min(
np.random.choice([15, 40]) + np.random.uniform(-10, 10),
100
)
state_xs = [state['lons'] for state in states.values()]
state_ys = [state['lats'] for state in states.values()]
colors = Purples9[::-1]
names = [state['name'] for state in states.values()]
initial_rates = [gen_initial_rate(1) for _ in states.values()]
state_colors = [colors[int(rate / 20)] for rate in initial_rates]
source = ColumnDataSource(data=dict(
x=state_xs,
y=state_ys,
color=state_colors,
name=names,
rate=initial_rates
))
TOOLS=['hover']
p = figure(title='Algorithms Deployed, Iteration 0', tools=TOOLS,
plot_width=1440, plot_height=810)
patches = p.patches('x', 'y', source=source,
fill_color='color', fill_alpha=0.85,
line_color='white', line_width=0.5)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.yaxis.axis_line_color = None
p.xaxis.axis_line_color = None
hover = p.select_one(HoverTool)
hover.point_policy = 'follow_mouse'
hover.tooltips = [
('Name', '@name'),
('Score', '@rate')
]
counter = 0
def run(new):
global p, patches, colors, counter
for _ in range(slider.value):
counter += 1
data = patches.data_source.data.copy()
rates = np.random.uniform(0, 100, size=100).tolist()
color = [colors[2 + int(rate / 16.667)] for rate in rates]
p.title = 'Algorithms Deployed, Iteration: {}'.format(counter)
source.data['rate'] = rates
source.data['color'] = color
time.sleep(5)
toggle = Toggle(label='START')
toggle.on_click(run)
slider = Slider(name='N iterations to advance',
title='N iterations to advance',
start=5,
end=10000,
step=5,
value=500)
# set up layout
toggler = HBox(toggle)
inputs = VBox(toggler, slider)
# add to document
curdoc().add_root(HBox(inputs))
``` |
{
"source": "jisantuc/workflow_examples",
"score": 2
} |
#### File: workflow_examples/wordcount/wordcount.py
```python
import os
from copy import deepcopy
import luigi
from luigi.parameter import Parameter
from luigi.s3 import S3Target, S3Client
from luigi.contrib.spark import SparkSubmitTask
import ssl
# allow bucket names with dots, see:
# https://github.com/boto/boto/issues/2836
_old_match_hostname = ssl.match_hostname
def _new_match_hostname(cert, hostname):
if hostname.endswith('.s3.amazonaws.com'):
pos = hostname.find('.s3.amazonaws.com')
hostname = hostname[:pos].replace('.', '') + hostname[pos:]
return _old_match_hostname(cert, hostname)
ssl.match_hostname = _new_match_hostname
VERSION = '0.0.1'
CLIENT = S3Client(os.getenv('AWS_ACCESS_KEY_ID'),
os.getenv('AWS_SECRET_ACCESS_KEY'))
class UploadFile(luigi.Task):
version = Parameter(default=VERSION)
input_file = Parameter(default='frankenstein.txt')
def run(self):
CLIENT.put(self.input_file,
's3://test.objects/' + self.input_file)
self.complete()
def complete(self):
if CLIENT.exists('s3://test.objects/' + self.input_file):
return True
return False
class CountWords(SparkSubmitTask):
version = Parameter(default=VERSION)
input_file = Parameter(default='frankenstein.txt')
# basic config options from
# https://github.com/spotify/luigi/blob/master/examples/pyspark_wc.py
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(100, significant=False)
name = 'SparkSubmit Word Count'
app = 'target/scala-2.10/word-count_2.10-1.0.jar'
def requires(self):
return UploadFile(version=self.version, input_file=self.input_file)
# define what to do with the output
def output(self):
return luigi.LocalTarget('wc_{}'.format(self.input_file))
def input(self):
return S3Target('s3n://test.objects/' + self.input_file)
def app_options(self):
return [self.input().path, self.output().path]
class RunAll(luigi.Task):
version = Parameter(default=VERSION)
input_file = Parameter(default='frankenstein.txt')
def requires(self):
return CountWords(version=self.version, input_file=self.input_file)
def output(self):
return S3Target('s3://test.objects/wc_' + self.input_file)
def run(self):
CLIENT.put('wc_' + self.input_file,
's3://test.objects/wc_' + self.input_file)
os.remove('wc_' + self.input_file)
if __name__ == '__main__':
luigi.run()
``` |
{
"source": "jisaw/termTime",
"score": 3
} |
#### File: jisaw/termTime/db_wrangler.py
```python
import psycopg2
class db_wrangler(object):
def __init__(self, dbname="", user=""):
self.dbname = dbname
self.user = user
self.conn = psycopg2.connect("dbname=%s user=%s" % dbname, user)
self.cur = self.conn.cursor()
def initialize(self):
self.cur.execute(
"IF NOT EXISTS (SELECT * FROM projects) CREATE TABLE projects(id SERIAL, name varchar(64) not null, owner varchar(64) not null, pay money);"
)
self.cur.execute(
"IF NOT EXISTS (SELECT * FROM tSheets) CREATE TABLE tSheets(id SERIAL, owner varchar(64), startDate date, endDate date, projectId int not null);"
)
self.cur.execute(
"IF NOT EXISTS (SELECT * FROM workSessions) CREATE TABLE workSession(id SERIAL, date date not null, hours float8 not null, startTime time not null, endTime time not null, tSheetId int not null);"
)
def get_projects(self):
self.cur.execute(
"SELECT * FROM projects;"
)
return self.cur.fetchall()
def print_projects(self):
for i in self.get_projects():
print("id: %s\nname: %s\nowner: %s\npay: %s" % (i[0], i[1], i[2]))
print("*" * 10)
def get_tSheets(self):
self.cur.execute(
"SELECT * FROM tSheets;"
)
return self.cur.fetchall()
def print_tSheets(self):
for i in self.get_tSheets():
print("id: %s\nowner: %s\nstartDate: %s\nendDate: %s\nprojectId: %s" % (i[0], i[1], i[2], i[3], i[4]))
print("*" * 10)
def get_workSessions(self):
self.cur.execute(
"SELECT * FROM workSessions;"
)
return self.cur.fetchall()
def print_workSessions(self):
for i in self.get_workSessions():
print("id: %s\ndate: %s\nhours: %s\nstartTime: %s\nendTime: %s\ntSheetId: %s" % (i[0], i[1], i[2], i[3], i[4], i[5]))
print("*" * 10)
def insert_project(self, name, owner, pay):
self.cur.execute(
"INSERT INTO projects (name, owner, pay) VALUES (%s, %s, %s)" %(name, owner, pay)
)
def insert_tSheet(self, owner, startDate, endDate, projectId):
self.cur.execute(
"INSERT INTO tSheets (owner, startDate, endDate, projectId) VALUES (%s, %s, %s, %s)" % (owner, startDate, endDate, projectId)
)
def insert_workSession(self, date, hours, startTime, endTime, tSheetId):
self.cur.execute(
"INSERT INTO workSessions (date, hours, startTime, endTime, tSheetId) VALUES (%s, %s, %s, %s, %s)" % (date, hours, startTime, endTime, tSheetId)
)
``` |
{
"source": "jisazaTappsi/mastermind",
"score": 4
} |
#### File: mastermind/shatter/comparison.py
```python
__author__ = '<NAME>'
class Comparison:
"""In charged of comparing variables and numbers."""
def __init__(self, a, b, operator):
self.a = a # number 1
self.b = b # number 2
self.operator = operator # '>=', '<=', '==' or 'and'
# TODO: reactivate this, but first fix bug when making intervals (check them they suck)
#self.simplify()
def __str__(self):
out = '{} {} {}'.format(self.a, self.operator, self.b)
if self.is_composite():
out = '({})'.format(out)
return out
def is_composite(self):
return isinstance(self.a, Comparison) and isinstance(self.b, Comparison) and self.operator == 'and'
def get_input(self):
"""
Returns the variable, it's a string
:return: string
"""
return self.a.a if self.is_composite() else self.a
def composite_has_opposing_operators(self):
"""
Composite Comparison Objects, that have opposing operators (<=, >=)
:return: Boolean
"""
return self.a.operator == '>=' and self.b.operator == '<=' or self.a.operator == '<=' and self.b.operator == '>='
def should_be_removed(self, my_range, percent_cut):
"""
Calculates whether the current variable is too particular to consider for the percent cut demanded.
:param my_range: absolute range of the variable
:param percent_cut: percentage of absolute range below which the variable should be dropped from dataframe.
:return: Boolean
"""
if self.is_composite() and self.composite_has_opposing_operators():
current_percentage = abs(self.a.b - self.b.b) / abs(my_range[0] - my_range[1])
return current_percentage < percent_cut
elif self.operator == '==' and percent_cut > 0: # if equality
return True
return False
def simplify(self):
"""
It simplifies expressions like: x2 >= 3.25 and x2 <= 3.25 >>> x2 == 3.25
Because this statement is just the single number x2 = 3.25
:return:
"""
# it simplifies to an equality
if self.is_composite() and self.a.b == self.b.b:
self.a = self.a.a
self.b = self.b.b
self.operator = '=='
```
#### File: mastermind/shatter/float_input_helper.py
```python
from shatter.constants import *
from shatter.comparison import Comparison
__author__ = '<NAME>'
def get(l, idx, default):
try:
return l[idx]
except IndexError:
return default
def mean(a, b):
return (a+b)/2
def add_variable(variables, last_variable, input_var, output, last_input, last_output, input_name):
"""
Adds or append boolean variable
:param variables: array with boolean variables
:param last_variable: the previous variable on the outer for iteration.
:param input_var: input variable
:param output: output variable
:param last_input: the previous input variable on the outer for iteration.
:param last_output: the previous output variable on the outer for iteration.
:param input_name: string with name of input variable
:return: variables list
"""
if not output and last_output: # from 1 to 0
if last_variable is None or last_variable.operator == 'and': # starts new interval
variables.append(Comparison(input_name, mean(input_var, last_input), '<='))
else: # completes interval
# TODO: Make intervals the Pythonic may, eg: 2.5 < b < 5.5
comp2 = Comparison(input_name, mean(input_var, last_input), '<=')
variables[-1] = Comparison(variables[-1], comp2, 'and')
elif output and not last_output: # from 0 to 1
on_first_var = len(variables) == 1
# or len(variables) > 0
if last_variable is None or last_variable.operator == 'and' or on_first_var: # starts new interval
variables.append(Comparison(input_name, mean(input_var, last_input), '>='))
else: # completes interval
comp2 = Comparison(input_name, mean(input_var, last_input), '>=')
variables[-1] = Comparison(variables[-1], comp2, 'and')
return variables
def get_variables(df, an_input):
"""
Given a DataFrame and an_input it returns the associated conditions as variables.
:param df: DataFrame
:param an_input: string with an input.
:return: list containing strings. Each string is a condition as well as a variable of the QM problem
"""
variables = []
last_output = None
last_input = None
for idx, row in df.iterrows():
input_var = row[an_input]
output = row[KEYWORDS[OUTPUT]]
last_variable = get(variables, -1, None)
if last_output is not None:
variables = add_variable(variables, last_variable, input_var, output, last_input, last_output, an_input)
last_output = output
last_input = input_var
return variables
```
#### File: mastermind/shatter/processed_rules.py
```python
from shatter.code import Code
from shatter.constants import *
from shatter.rules import *
from shatter.util.frozen_dict import FrozenDict
__author__ = '<NAME>'
class ProcessedRules:
"""
Has 2 properties, the 'default' value of the output and the 'tables'.
Tables is a dict() where each (key, value) pair are a truth table. Tables have:
Keys = possible function outputs.
Values = Are the tables represented with lists containing tuples eg:
>>> [(True, False), (False, True)]
These tuples are rows of the truth table where the function should return the output value (the key).
So we have a collection of tables; each one with its own output as key.
Example:
>>> ProcessedRules().tables
is of the form:
>>> {1: [(True, Code('1==3')), (False, False)], 2: [(False, True), (True, False)]}
In this case 1 and 2 are the outputs while (True, Code('1==3')), (False, False) are the rows of the truth table
, ie the cases where 1 should be returned.
"""
def __init__(self, tables=FrozenDict(), default=False):
self.tables = tables
self.default = default
def get_default_output(rules):
"""
Gets the default value by iterating over all rows until a default word is caught.
:param rules: a Rules obj.
:return: the default value or False if None is found or if rules is not a obj.
"""
if isinstance(rules, Rules) and rules:
for row in rules:
if KEYWORDS[DEFAULT] in row:
return row[KEYWORDS[DEFAULT]]
return False
else:
return False
def get_processed_rules(rules, function_args):
"""
:param rules:
:param function_args: args
:return: processedRules instance
"""
tables = get_truth_tables(rules, function_args)
return ProcessedRules(tables, get_default_output(rules))
```
#### File: mastermind/shatter/tester.py
```python
import traceback
import unittest
from shatter.code import Code
from shatter.util.helpers import *
__author__ = '<NAME>'
def get_eval_code(args_str, function):
"""
Invokes function below implementation.
:param args_str: eg: 'a=True, b=False'
:param function: to be tested
:return: code to run.
"""
return function.__name__ + '(' + args_str + ')'
def get_all_possible_inputs(inputs):
"""
List comprehensions looping to create all possible binary combinations
of inputs.
:param inputs: the input list
:return: a set containing all possible combinations.
"""
n = len(inputs)
return set([tuple([bool(int(c)) for c in bin(x)[2:].rjust(n, '0')]) for x in range(2**n)])
def get_used_inputs(tables):
"""
List comprehensions are used to get all tuples.
:param tables: the variable in processed_rules.
:return: a set with all used tuples.
"""
return set([item for k, set_v in tables.items() for item in set_v])
def print_inputs_of_tuple(a_tuple):
"""
Prints parts of the tuple which have inputs.
:param a_tuple: a tuple
:return: str
"""
result = ''.join([str(e) + ', ' if not isinstance(e, Code) else '' for e in a_tuple])
return result[:-2]
def run_single_test(test_class, a_tuple, solution, expected_value):
"""
Test for a single input values.
:param test_class: the unittest instance
:param a_tuple: either dict() or tuple with inputs.
:param solution: obj
:param expected_value: the value that should have the result to pass the test.
:return: passes, not passes, or cannot be tested by lack of context :(
"""
function_call_code = get_eval_code(print_inputs_of_tuple(a_tuple), solution.function)
try:
exec("\n".join(solution.implementation))
returned = eval(function_call_code)
except:
w_str = "Cannot test function, probably lack of context, exception is: "
warnings.warn(w_str, UserWarning)
traceback.print_exc()
else:
test_class.assertEqual(returned, expected_value)
def has_code_args(tables):
"""
Returns True if any argument in the tables is a Code obj.
:param tables: dict with keys as outputs and sets of tuples as dict values.
:return: bool
"""
for k, v_set in tables.items():
for a_tuple in v_set:
for e in a_tuple:
if isinstance(e, Code):
return True
return False
def validate(test_class):
"""
Makes sure that the class passed can call the assetEqual() method.
:param test_class: any unittest class, or other object(will raise type error)
:return: raise error if wrong class found.
"""
assert_equal = getattr(test_class, "assertEqual", None)
if not callable(assert_equal):
raise TypeError("unittest class of type {0}, has not assertEqual defined.".format(type(test_class)))
def problem_has_boolean_output(tables):
"""
The problem only has True or False outputs.
:param tables: all truth tables.
:return: boolean indicating whether the problem is boolean or not.
"""
return all([isinstance(k, bool) or k in (0, 1) for k in tables.keys()])
class SolvableWithMLImplementation(Exception):
"""Not all cases have been implemented with machine learning. This error type indicates that this case can be
solved by running a machine learning algorithm."""
pass
class NotImplementedWithMLYet(Exception):
"""Not all cases have been implemented with machine learning. This error type indicates that this case
HAS NOT BEEN IMPLEMENTED YET"""
pass
def test_implementation(test_class, solution):
"""
:param test_class: the unittest instance
:param solution: solution obj.
:return: False if not test done, True if success or raises error if test doesn't pass.
"""
if test_class is None:
# assigns a local class to perform tests.
class MyTest(unittest.TestCase):
pass
test_class = MyTest()
validate(test_class)
if solution.processed_rules is None:
return False
tables = solution.processed_rules.tables
if has_code_args(tables):
warnings.warn("Cannot test function, it has added code", UserWarning)
return False
else:
try:
for expected_value, tuple_set in tables.items():
for a_tuple in tuple_set:
run_single_test(test_class=test_class,
a_tuple=a_tuple,
solution=solution,
expected_value=expected_value)
except AssertionError:
# catches exception and re-raises 2 different exceptions depending if the problem is solvable with
# current machine learning implementation.
if problem_has_boolean_output(tables):
raise SolvableWithMLImplementation()
# TODO: add implementation
raise NotImplementedWithMLYet()
return True
```
#### File: shatter/util/helpers.py
```python
import inspect
import warnings
import os
import re
import shatter.constants as cts
__author__ = '<NAME>'
def read_file(absolute_path):
"""
:param absolute_path: string path.
:return: list with lines of the file.
"""
return [line.rstrip('\n') for line in open(absolute_path)]
def delete_file(filename):
"""
:param filename: relative path to file.
"""
if os.path.exists(filename):
os.remove(filename)
return True
return False
def write_file(filename, the_list):
"""
:param filename: relative path to file.
:param the_list: new file information.
:return: void
"""
new_file = open(filename, 'a')
for item in the_list:
new_file.write("%s\n" % item)
def rewrite_file(filename, the_list):
"""
Delete and write again
:param filename: relative path to file.
:param the_list: new file information.
:return: void
"""
delete_file(filename)
write_file(filename, the_list)
def bit_in_string(string):
"""
Contains a bit in the string
:param string: arbitrary string
:return: boolean
"""
return ('0' in string) or ('1' in string)
def string_has_bits_for_and(str_bits, index):
"""
Returns true if finds a bit, before and after index.
:param index: int
:param str_bits: string
:return: boolean
"""
str_start = str_bits[:index]
str_end = str_bits[index:]
return index > 0 and bit_in_string(str_start) and bit_in_string(str_end)
def from_bool_to_bit(boolean):
"""
Conversion from boolean to bit
:param boolean: True or False
:return: '1' or '0'
"""
if boolean:
return "1"
else:
return "0"
def get_function_path(f):
"""
Passes the internal func_code to a attribute called internal_code on the wrapper.
Then we call the wrapper attribute which throws metadata of the internal function, and gets the path.
:param f: function
:return: path
"""
# does the wrapper is defining the new attribute, to expose internal func_code? or use std func_code if no decorator
code = f.internal_code if hasattr(f, cts.INTERNAL_CODE) else f.__code__
return code.co_filename
def valid_function(f):
"""
Validates function. Returns warning if it is not a function or it doesn't have a decorator.
:param f: function
:return: passes, raises warning or raises TypeError
"""
if not hasattr(f, '__call__'):
raise TypeError('{} is not a valid function.'.format(f))
if not hasattr(f, cts.INTERNAL_CODE):
warnings.warn('Function {} has no decorator, reading can be harder!!!'.format(f.__name__), UserWarning)
return True
def get_function_line_number(f, file_code):
"""
Returns first line number for decorated and un-decorated methods. -1 if not found.
:param f: function.
:param file_code: the code as a list where each element is a line.
:return: the line of the file(starting in zero), 0 if not found!
"""
for index, line in enumerate(file_code):
pattern = re.compile(cts.PARTICULAR_DEFINITION.pattern.format(name=f.__name__))
definition = re.search(pattern, line)
if definition:
return index
return -1
def get_function_inputs(f):
"""
Given function signatures gets the name of the function.
:param f: a callable function
:return: input names on a tuple.
"""
if hasattr(f, cts.INTERNAL_PARAMETERS):
# 'internal_parameters' is defined inside the solver() annotation, see solver.py for details.
return f.internal_parameters
else:
return f.__code__.co_varnames
def get_function_code(start, file_code):
"""
Gets the source code of function. Opt for not using
inspect package because it doesn't work with decorators
:param start: the starting line number, of the function
:param file_code: the source file lines
:return: code.
"""
def not_space_nor_comment(line):
return len(line.strip()) > 0 and line.strip()[0] != '#'
def inside_function(line_indent, f_indent):
return len(line_indent) > len(f_indent) + 3
base_indent = re.search(cts.INDENT, file_code[start]).group()
end = start
for index, l in enumerate(file_code[start + 1:]):
l_indent = re.search(cts.INDENT, l).group()
# decides if adding to function is required: no black space or comment
if not_space_nor_comment(l):
if inside_function(l_indent, base_indent):
end = index + start + 2 # only add code if non-comment or empty spaces are inside function
else:
# end of function if found lower indent that is not a blank space and not a comment
break
return file_code[start:end]
def var_is_true(var):
"""
Returns True if var= True, else False. Remember here that 1 is a almost True value
but in this case should return False.
:param var: any variable.
:return: boolean
"""
return var and isinstance(var, bool)
def var_is_false(var):
"""
Returns True if var = False, else False. Remember here that 1 is a almost True value
but in this case should return False.
:param var: any variable.
:return: boolean
"""
return not var and isinstance(var, bool)
def has_true_key(d):
"""
Returns True only if it has a True value as key.
Has to be done this way because Python confuses '0' and '1' with False and True.
:param d: dict()
:return: boolean
"""
for key in d:
if var_is_true(key):
return True
return False
def has_return(implementation, definition):
"""
Finds if the implementation already has a return.
:param implementation: array with code implementation
:param definition: function definition
:return: Boolean
"""
last_line = implementation[-1]
indent = get_indent_from_definition(definition)
pattern = r"^{indent} return".format(indent=indent)
return re.search(pattern, last_line) is not None
def has_false_key(d):
"""
Returns True only if it has a False value as key.
Has to be done this way because Python confuses '0' and '1' with False and True.
:param d: dict()
:return: boolean
"""
for key in d:
if var_is_false(key):
return True
return False
def var_is_1(var):
"""
Boolean if var is equal to 1 and not True.
:param var: variable
:return: boolean
"""
if var and not isinstance(var, bool):
return True
return False
def var_is_0(var):
"""
Boolean if var is equal to 0 and not False.
:param var: variable
:return: boolean
"""
if not var and not isinstance(var, bool):
return True
return False
def get_indent_from_definition(definition):
"""
Uses regex to get the indent
:param definition: of a function
:return: indent as string
"""
return re.search(cts.INDENT, definition).group()
def is_function(f):
"""
Is it a function?
:param f: function
:return: boolean
"""
return hasattr(f, '__call__')
def remove_list_from_list(all_list, list_to_remove):
"""
:param all_list: original list
:param list_to_remove: elements that will be removed from the original list.
:return: subtracted list
"""
return [value for value in all_list if value not in list_to_remove]
def is_private_call():
"""
Searches in the stack for places where the package is. If there is something then the
function is being called privately from inside the package, otherwise it is called from outside the package.
:return: boolean
"""
p_name = '/{}/'.format(cts.PACKAGE_NAME)
p = re.match(r'^.*' + p_name, inspect.stack()[0].filename).group()
# the number 2 in 'inspect.stack()[2:]' is because we are not looking inside is_private_call() function nor one
# level above it, where its suppose to tell us if that function is being called privately or publicly.
return any(re.match(p, frame.filename) is not None for frame in inspect.stack()[2:])
def name_in_frame(var, frame):
"""
Looks at the locals of the frame and searches in it for var.
:param var: variable to get name from.
:param frame: a inspect frame
:return: list with strings.
"""
callers_local_vars = frame.f_locals.items()
return [var_name for var_name, var_val in callers_local_vars if var_val is var]
def retrieve_name(var):
"""
Gets the name of var. Does it from the out most frame inner-wards.
:param var: variable to get name from.
:return: string
"""
for fi in reversed(inspect.stack()):
names = name_in_frame(var, fi.frame)
if len(names) > 0:
return names[0]
```
#### File: mastermind/testa/dsl.py
```python
import re
from testa.tokens import TOKENS, get_token_sequence
def sub_str(v, p1, p2):
"""
returns substring if all reasonable conditions met otherwise None.
:param v: MyString object.
:param p1: position index
:param p2: position index
:return: substring
"""
if p1 is not None and p2 is not None and 0 <= p1 <= len(v) and 0 <= p2 <= len(v):
return v[p1, p2]
return None
def sub_str_2(v, r, c):
"""
Return cth match of regex in v.
:param v: MyString
:param r: Regex
:param c: cth match of regex in v.
:return: string
"""
# Where the cth match starts
p1 = v.pos(TOKENS['empty'], r, c)
# Where the cth match ends
p2 = v.pos(r, TOKENS['empty'], c)
return sub_str(v, p1, p2)
def match(v, r, k):
"""
Has at least k matches.
:param v: MyString
:param r: regex
:param k: integer. It is the minimum number of matches.
:return:
"""
return len(re.findall(r, str(v))) >= k
class MyString:
def __init__(self, s):
self.s = s
def __str__(self):
return self.s
def __len__(self):
return len(self.s)
def __eq__(self, other):
if isinstance(other, MyString):
return self.s == other.s
elif isinstance(other, str):
return self.s == other
else:
return False
def __getitem__(self, item):
if isinstance(item, tuple) and len(item) > 1:
return MyString(self.s[item[0]:item[1]])
elif isinstance(item, int):
return self.s[item]
elif isinstance(item, slice):
if item.step is None:
return self.s[item.start: item.stop]
else:
return self.s[item.start: item.step]
else:
return NotImplemented('Fuck off')
def c_pos(self, k):
"""
Index, including negative case (from the right side).
:param k:
:return:
"""
if k >= 0:
return k
else:
# TODO: bug here for k=-1. Will be out of range!
return len(self.s) + k + 1
def pos(self, r1, r2, c):
"""
Find the index t for the cth occurrence of: Matching the suffix of s[:t] and the prefix of s[t:]
If c is negative reverses the string and returns 'len(s) - t + 1' instead of 't'
:param r1: regex1
:param r2: regex2
:param c: integer, the occurrence number
:return: index.
"""
# c is a non-zero integer
if isinstance(c, str):
c = eval(c)
# reverse string if c is negative.
if c < 0:
s = ''.join([e for e in reversed(self.s)])
c = abs(c)
is_reversed = True
else:
s = self.s
is_reversed = False
match_number = 0
for t, _ in enumerate(s):
s1 = s[:t]
s2 = s[t:]
# matches suffix
match1 = re.findall(r1 + '$', s1)
# matches prefix
match2 = re.findall('^' + r2, s2)
if len(match1) > 0 and len(match2) > 0:
match_number += 1
if match_number == c:
if is_reversed:
return len(s) - t + 1
else:
return t
return None
@staticmethod
def loop(lambda_function):
"""
Concatenates stuff created by the lambda function until the output of this function is None.
:param lambda_function: a function with a w integer input.
:return: MyString object.
"""
concat = ''
w = 0
# do while loop.
while True:
o = lambda_function(w + 1)
if o is None:
break
else:
concat += o
w += 1
return MyString(concat)
def switch(*args):
"""
Given each arg as a tuple (b, e) returns the first match. This is equivalent to
if b1:
return e1
elif b2:
:return e2
...
:param args: are in the form ((b1, e1), (b2, e2) ...)
:return: one of the e
"""
for b, e in args:
if b:
return e
def concatenate(*args):
"""
If no argument is None, then join all strings.
:param args: Can be strings, MyString objects or None
:return: None or string.
"""
for e in args:
if e is None:
return None
return ''.join([str(e) for e in args])
def example_2(s):
v = MyString(s)
return sub_str(v, v.pos(TOKENS['empty'], TOKENS['digits'], 1), v.c_pos(-1))
def example_3(s):
v = MyString(s)
return sub_str(v, v.c_pos(0), v.pos(TOKENS['forward_slash'], TOKENS['empty'], -1))
def example_4(s):
v = MyString(s)
return str(v.loop(lambda w: concatenate(sub_str_2(v, TOKENS['uppercase'], w))))
def example_5(s):
v = MyString(s)
def lambdasuri(w):
s1 = get_token_sequence(TOKENS['digits'], TOKENS['forward_slash'])
s2 = get_token_sequence(TOKENS['forward_slash'], TOKENS['digits'])
p1 = v.pos(TOKENS['left_parenthesis'], s1, w)
p2 = v.pos(s2, TOKENS['right_parenthesis'], w)
return concatenate(sub_str(v, p1, p2), ' #')
return str(v.loop(lambdasuri))
def example_6(s):
v = MyString(s)
def lambdasuri(w):
# TODO: what the fuck is '[^ ]' ?
p1 = v.pos(TOKENS['space'], '[^ ]', w)
p2 = v.pos('[^ ]', TOKENS['space'], w)
return concatenate(sub_str(v, p1, p2), ' ')
res = v.loop(lambdasuri)
return sub_str(res, 0, len(res) - 1)
def example_7(s1, s2):
v1 = MyString(s1)
v2 = MyString(s2)
b1 = match(v1, TOKENS['all_chars'], 1) and match(v2, TOKENS['all_chars'], 1)
e1 = concatenate(v1, '(', v2, ')')
b2 = not match(v1, TOKENS['all_chars'], 1) or not match(v2, TOKENS['all_chars'], 1)
e2 = ''
return switch((b1, e1), (b2, e2))
if __name__ == '__main__':
assert example_2('BTR KRNL WK CORN 15Z') == '15Z'
assert example_2('CAMP DRY DBL NDL 3.6 OZ') == '3.6 OZ'
assert example_2('CHORE BOY HD SC SPNG 1 PK') == '1 PK'
out = example_3('Company/Code/index.html')
assert out == 'Company/Code/'
out = example_3('Company/Docs/Spec/specs.doc')
assert out == 'Company/Docs/Spec/'
out = str(example_4('International Business Machines'))
assert out == 'IBM'
out = example_4('Principles Of Programming Languages')
assert out == 'POPL'
out = example_5('(6/7)(4/5)(14/1)')
assert out == '6/7 #4/5 #14/1 #'
out = example_5('49(28/11)(14/1)')
assert out == '28/11 #14/1 #'
out = example_5('() (28/11)(14/1)')
assert out == '28/11 #14/1 #'
out = example_6(' something with lots of spaces ')
assert out == 'something with lots of spaces'
out = example_7('Alex', 'Asst.')
assert out == 'Alex(Asst.)'
out = example_7('', 'Manager')
assert out == ''
out = example_7('Alex', '')
assert out == ''
```
#### File: mastermind/tests/test_code_equality.py
```python
import unittest
from shatter.code import Code
from tests.generated_code import code_functions as f
from tests.testing_helpers import common_testing_code
from shatter.custom_operator import CustomOperator
__author__ = '<NAME>'
class CodeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
common_testing_code.reset_functions_file(f.__file__, hard_reset=True)
def test_inequality_different_operator(self):
"""Always false if there is an operator mismatch"""
i = Code()
j = Code()
m = (i == j)
k = (i < j)
self.assertFalse(str(m) == str(k))
self.assertFalse(str(k) == str(m))
def test_equality(self):
i = Code()
j = Code()
for s in CustomOperator.OPERATORS.values():
m = (eval('i {} j'.format(s)))
k = (eval('i {} j'.format(s)))
self.assertTrue(str(m) == str(k))
def test_no_commutation(self):
i = Code()
j = Code()
for s in CustomOperator.OPERATORS.values():
m = (eval('i {} j'.format(s)))
k = (eval('j {} i'.format(s)))
self.assertFalse(str(m) == str(k))
if __name__ == '__main__':
unittest.main()
```
#### File: mastermind/tests/test_code_generator.py
```python
import unittest
from shatter import code_generator as c
from shatter import solver as s
from shatter.rules import Rules
from shatter.util.helpers import get_function_inputs
from tests.generated_code import code_generator_functions as f
from tests.testing_helpers import constants as cts, common_testing_code
from shatter.solver import Code
from shatter import QM_helper
__author__ = '<NAME>'
class GeneratorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
common_testing_code.reset_functions_file(f.__file__, hard_reset=True)
def test_get_signature_exception(self):
"""a non valid definition is given, should raise a FunctionNotFound exception."""
with self.assertRaises(c.FunctionNotFound):
c.get_signature_from_definition('invalid_function_definition')
def test_code_generation_with_if(self):
"""
Test with outputs different from boolean.
"""
r = Rules(a=True, b=True, output=1)
solution = r.solve(f.non_boolean_and, self)
code = ['def ' + f.non_boolean_and.__name__ + '(a, b):',
'',
' if a and b:',
' return 1',
'',
' return False']
self.assertEqual(solution.implementation, code)
def get_function_code(self, signature, expression_expected, table, fun):
"""
Tests that a right function definition is generated.
:param signature: of the function eg: sum(a,b).
:param table: truth table.
"""
expected_code = ["def " + signature + ":", " return " + expression_expected]
inputs = get_function_inputs(fun)
expression = QM_helper.get_boolean_expression(table, inputs, 2)
definition = 'def ' + signature
code = s.add_code_to_implementation(current_implementation=s.get_initial_implementation(definition),
bool_expression=expression,
definition=definition,
the_output=True)
self.assertListEqual(code, expected_code)
def test_get_function_implementation(self):
"""
Testing for and, or & xor the "get_function_implementation".
"""
self.get_function_code(cts.sig_and, cts.exp_and, cts.and_table, f.and_function)
self.get_function_code(cts.sig_or, cts.exp_or, cts.or_table, f.or_function)
self.get_function_code(cts.sig_xor, cts.exp_xor, cts.xor_table, f.xor_function)
def test_boolean_and_quasi_boolean_mix_true_values(self):
"""
Tests whether changing inputs for True and 1 outputs affect the final result.
BY DEFAULT IF 1 AND TRUE are present will choose 1 as output.
Test with both a boolean and quasi-boolean output.
In python True == 1. Therefore output=1 is the same as output=True.
"""
code = ['def mix_true_values(a, b):',
'',
' if a and not b or not a and b:',
' return 1',
'',
' return False']
r = Rules(a=True, b=False, output=1) # non-boolean output
r.add(a=False, b=True, output=True) # boolean condition
solution = r.solve(f.mix_true_values, self)
self.assertEqual(solution.implementation, code)
def test_boolean_and_quasi_boolean_mix_false_values(self):
"""
Will make an if for the 0 case, while it will ignore the False case.
"""
code = ['def mix_false_values(a, b):',
'',
' if a and not b or not a and b:',
' return 0',
'',
' return False']
r = Rules(a=False, b=True, output=0) # non-boolean output
r.add(a=True, b=False, output=False) # boolean condition
solution = r.solve(f.mix_false_values, self)
self.assertEqual(solution.implementation, code)
r = Rules(a=True, b=False, output=False) # non-boolean output
r.add(a=False, b=True, output=0) # boolean condition
solution = r.solve(f.mix_false_values, self)
self.assertEqual(solution.implementation, code)
def test_rules_input_order_is_respected(self):
"""
First input has to be first on the final boolean expression.
So programmers can use short circuiting to their advantage ;). Very useful when validating data.
Changing input order will change expression order.
"""
code = ['def ordered_expression(a, b):',
' return a or b']
r = Rules(a=True, output=True) # boolean output
r.add(b=True, output=True) # boolean condition
solution = r.solve(f.ordered_expression, self)
self.assertEqual(solution.implementation, code)
code = ['def ordered_expression(a, b):',
' return a or b']
r = Rules(b=True, output=True) # boolean output
r.add(a=True, output=True) # boolean condition
solution = r.solve(f.ordered_expression, self)
self.assertEqual(solution.implementation, code)
def multiple_value_test(self, out1, out2, function):
"""
Testing multiple output types.
:param out1: anything
:param out2: anything
:param function: object
"""
code = ['def ' + function.__name__ + '(a, b):',
'',
' if not a and b:',
' return ' + c.print_object(out1),
'',
' if a and not b:',
' return ' + c.print_object(out2),
'',
' return False']
r = Rules(a=False, b=True, output=out1) # non-boolean output
r.add(a=True, b=False, output=out2) # non-boolean condition
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_multiple_outputs(self):
"""
Test for more than 1 if, outputs are longs.
"""
uniform_pairs = [(2, 3, f.fun2),
(2.12345, 3.12345, f.fun3),
('3', '2', f.fun4),
(3j, 2j, f.fun5),
((3, 3), (2, 2), f.fun6),
(2, '3', f.fun7),
(3.12345, (3, 3), f.fun8)]
# TODO: include lists dictionaries and sets.
#([1, 2, 3], {4, 5, 6}, f.fun10)]
for values in uniform_pairs:
self.multiple_value_test(*values)
def test_function_outputs(self):
"""
When output is a function.
"""
function = f.output_function_obj
out1 = f.fun8
code = ['def ' + function.__name__ + '(a, b):',
'',
' if not a and b:',
' return ' + c.print_object(out1),
'',
' return False']
r = Rules(a=False, b=True, output=out1) # non-boolean output
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_mix_output_boolean(self):
"""
When ifs and pure boolean expression mix.
"""
function = f.mix_output
out = 'a'
code = ['def ' + function.__name__ + '(a, b):',
'',
' if not a and b:',
' return ' + c.print_object(out),
' return a and b']
r = Rules(a=False, b=True, output=out) # non-boolean output
r.add(a=True, b=True) # boolean output
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_calling_another_function_no_args(self):
"""
Invoke function with NO arguments.
"""
function = f.another_call
out = f.no_args
code = ['def {}(a, b):'.format(function.__name__),
'',
' if not a and b:',
' return {}()'.format(out.__name__),
'',
' return False']
r = Rules(a=False, b=True, output=out, output_args={}) # non-boolean output
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_calling_another_function_with_args(self):
"""
Invoke function with arguments.
"""
function = f.another_call2
args = {'a': s.Code(code_str='a'), 'b': s.Code(code_str='b')}
out_f = f.another_call
code = ['def {}(a, b):'.format(function.__name__),
'',
' if not a and b:',
' return {}(a, b)'.format(out_f.__name__),
'',
' return False']
r = Rules(a=False, b=True, output=out_f, output_args=args) # non-boolean output
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_default_keyword(self):
"""
default keyword changes the last return from False to determined value.
"""
function = f.with_default_value
out = 3
default = 5
code = ['def ' + function.__name__ + '(a, b):',
'',
' if not a and b:',
' return ' + str(out),
'',
' return ' + str(default)]
r = Rules(a=False, b=True, output=out, default=default)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
r = Rules(a=False, b=True, output=out)
r.add(default=default)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_recursive_function(self):
"""
Will do recursion, extremely cool!!!
"""
function = f.recursive
not_a = 'not a'
args = {'a': s.Code(code_str=not_a)}
out = s.Output(function, args)
code = ['def {}(a):'.format(function.__name__),
'',
' if {}:'.format(not_a),
' return 0',
'',
' return {0}({1})'.format(function.__name__, not_a)]
r = Rules(a=False, output=0, default=out)
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_recursive_iteration(self):
"""
Will do recursive iteration, extremely cool!!!
"""
function = f.recursive_iteration
array_len_0 = 'len(array) == 0'
array_1 = 'array[1:]'
args = {'array': s.Code(code_str=array_1)}
out = s.Output(function, args)
code = ['def {}(array):'.format(function.__name__),
'',
' if {}:'.format(array_len_0),
' return 0',
'',
' return {0}({1})'.format(function.__name__, array_1)]
r = Rules(r1=s.Code(code_str=array_len_0), output=0, default=out)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_calling_nested_functions(self):
"""
call nested functions.
"""
function = f.nested_call
out_obj = s.Output(f.f, {'a': s.Output(f.g, {'a': s.Code(code_str='a')})})
code = ['def ' + function.__name__ + '(a):',
'',
' if not a:',
' return ' + f.f.__name__ + '(' + f.g.__name__ + '(a))',
'',
' return False']
r = Rules(a=False, output=out_obj)
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_internal_code_arguments(self):
"""
Do logic with pieces of code that evaluate to boolean.
"""
function = f.with_internal_code_arg
code = ['def ' + function.__name__ + '(a):',
'',
' if isinstance(a, str):',
' return 2',
'',
' return False']
r = Rules(any_non_input_name=s.Code(code_str='isinstance(a, str)'), output=2)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_right_code_input_order(self):
"""
For programmer convenience and to be able to use short circuiting.
Code pieces on expressions will follow the same order as the input order.
"""
function = f.right_expression_order
right_str = 'right order!!!'
code1_str = 'len(array) > 1'
code2_str = 'array[0]'
code3_str = 'isinstance(array[0], int)'
code = ['def ' + function.__name__ + '(array):',
'',
' if ' + code1_str + ' and ' + code2_str + ' and ' + code3_str + ':',
' return ' + "\"" + right_str + "\"",
'',
' return False']
r = Rules(r1=s.Code(code_str=code1_str),
r2=s.Code(code_str=code2_str),
r3=s.Code(code_str=code3_str),
output=right_str)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_factor_unordered_pieces_of_code(self):
"""
Tests that string output is factored, when inputs are given all at once.
"""
function = f.factor_pieces_of_code
right_str = 'factoring!!!'
code1_str = 'isinstance(array[0], int)'
code2_str = 'isinstance(array[1], int)'
code3_str = 'isinstance(array[2], int)'
code = ['def ' + function.__name__ + '(array):',
'',
' if ' + code1_str + ' and ' + code2_str + ' or ' + code3_str + ':',
' return ' + "\"" + right_str + "\"",
'',
' return False']
r = Rules(rule1=s.Code(code_str=code1_str),
rule2=s.Code(code_str=code2_str),
output=right_str)
r.add(rule3=s.Code(code_str=code3_str), output=right_str)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_factor_ordered_pieces_of_code(self):
"""
Tests that string output is factored, when inputs are given in more than one addition.
"""
function = f.factor_ordered_pieces_of_code
right_str = 'factoring!!!'
code1_str = 'isinstance(array[0], int)'
code2_str = 'isinstance(array[1], int)'
code3_str = 'isinstance(array[2], int)'
code = ['def ' + function.__name__ + '(array):',
'',
' if ' + code1_str + ' and ' + code2_str + ' or ' + code3_str + ':',
' return ' + "\"" + right_str + "\"",
'',
' return False']
r = Rules(r1=s.Code(code_str=code1_str),
r2=s.Code(code_str=code2_str),
output=right_str)
r.add(s.Code(code_str=code3_str), output=right_str)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_factor_code_output(self):
"""
Tests that code output can be factored.
"""
function = f.factor_ordered_pieces_of_code
output_code = '2*2'
code1_str = 'isinstance(array[0], int)'
code2_str = 'isinstance(array[1], int)'
code = ['def ' + function.__name__ + '(array):',
'',
' if ' + code1_str + ' or ' + code2_str + ':',
' return ' + output_code,
'',
' return False']
r = Rules(r1=s.Code(code_str=code1_str), output=s.Code(code_str=output_code))
r.add(s.Code(code_str=code2_str), output=s.Code(code_str=output_code))
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_factor_ordered_pieces_with_redundancy(self):
"""Tests that string output is factored, when inputs are given in more than one addition."""
function = f.factor_ordered_pieces_with_redundancy
right_str = 'factoring!!!'
code0_str = 'isinstance(array[0], int)'
code1_str = 'isinstance(array[1], int)'
code = ['def {}(array):'.format(function.__name__),
'',
' if {}:'.format(code1_str),
" return \"{}\"".format(right_str),
'',
' return False']
r = Rules(r1=s.Code(code_str=code0_str),
r2=s.Code(code_str=code1_str),
output=right_str)
r.add(s.Code(code_str=code1_str), output=right_str)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
# TODO: auxiliary test: remove?
def test_basic(self):
function = f.basic
code = ['def {}(a, b):'.format(function.__name__),
' return b']
r = Rules(a=True,
b=True,
output=True)
r.add(b=True, output=True)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_basic_if(self):
"""test basic if statement"""
function = f.basic_if
ouput = 'le'
code = ['def {}(a, b):'.format(function.__name__),
'',
' if b:',
" return \"{}\"".format(ouput),
'',
' return False']
r = Rules(a=True,
b=True,
output=ouput)
r.add(b=True, output=ouput)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_simple_constant_output(self):
"""
When the result output of the QM algorithm is an empty expression, that means that regardless
of the input the output is constant.
"""
function = f.simple_constant_output
code = ['def {}(a):'.format(function.__name__),
' return True']
r = Rules(a=True, output=True)
r.add(a=False, output=True)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_inner_inputs_with_different_outputs(self):
"""
Inner inputs are not function arguments but are for example pieces of code, that act as inputs to the tables.
Inside function 'return_solution', on module solver.py:
On each table iteration the all_inputs variable has to be calculated inside the main function of solver.py
This is because if not this test fails.
"""
function = f.many_outputs
input1 = 'isinstance(a, int)'
input2 = 'isinstance(a, str)'
r = Rules(r1=Code(code_str=input1), output=1)
r.add(r1=Code(code_str='isinstance(a, str)'), output=2)
solution = r.solve(function, self)
print(solution.implementation)
# is taking the correct inputs
self.assertEqual(solution.implementation[2], ' if {}:'.format(input1))
self.assertEqual(solution.implementation[5], ' if {}:'.format(input2))
if __name__ == '__main__':
unittest.main()
```
#### File: mastermind/tests/test_code.py
```python
import unittest
from shatter.code import Code
from tests.generated_code import code_functions as f
from shatter.rules import Rules
from tests.testing_helpers import common_testing_code
__author__ = '<NAME>'
class CodeTest(unittest.TestCase):
# TODO: REMINDER THE ISINSTANCE OF A FUNCTION AND METHOD SHOULD BE LIKE:
# elif not isinstance(self.build_fn, types.FunctionType) and not isinstance(self.build_fn, types.MethodType):
@classmethod
def setUpClass(cls):
common_testing_code.reset_functions_file(f.__file__, hard_reset=True)
def test_comparison_operators(self):
i = Code()
j = Code()
self.assertTrue(isinstance(i == 2, Code))
self.assertTrue(isinstance(2 == i, Code))
self.assertTrue(isinstance(i == j, Code))
self.assertTrue(isinstance(i != 2, Code))
self.assertTrue(isinstance(2 != i, Code))
self.assertTrue(isinstance(i != j, Code))
self.assertTrue(isinstance(i < 2, Code))
self.assertTrue(isinstance(2 < i, Code))
self.assertTrue(isinstance(i < j, Code))
self.assertTrue(isinstance(i > 2, Code))
self.assertTrue(isinstance(2 > i, Code))
self.assertTrue(isinstance(i > j, Code))
self.assertTrue(isinstance(i <= 2, Code))
self.assertTrue(isinstance(2 <= i, Code))
self.assertTrue(isinstance(i <= j, Code))
self.assertTrue(isinstance(i >= 2, Code))
self.assertTrue(isinstance(2 >= i, Code))
self.assertTrue(isinstance(i >= j, Code))
def test_arithmetic_operators(self):
i = Code()
j = Code()
self.assertTrue(isinstance(i + 2, Code))
self.assertTrue(isinstance(2 + i, Code))
self.assertTrue(isinstance(i + j, Code))
self.assertTrue(isinstance(i - 2, Code))
self.assertTrue(isinstance(2 - i, Code))
self.assertTrue(isinstance(i - j, Code))
self.assertTrue(isinstance(i * 2, Code))
self.assertTrue(isinstance(2 * i, Code))
self.assertTrue(isinstance(i * j, Code))
self.assertTrue(isinstance(i / 2, Code))
self.assertTrue(isinstance(2 / i, Code))
self.assertTrue(isinstance(i / j, Code))
self.assertTrue(isinstance(i % 2, Code))
self.assertTrue(isinstance(2 % i, Code))
self.assertTrue(isinstance(i % j, Code))
self.assertTrue(isinstance(i ** 2, Code))
self.assertTrue(isinstance(2 ** i, Code))
self.assertTrue(isinstance(i ** j, Code))
self.assertTrue(isinstance(i // 2, Code))
self.assertTrue(isinstance(2 // i, Code))
self.assertTrue(isinstance(i // j, Code))
# TODO: NOT READY TO IMPLEMENT THESE. NOT SURE ABOUT CONSEQUENCES!
"""
def test_logical_operators(self):
i = Code()
j = Code()
self.assertTrue(isinstance(i and 2, Code))
self.assertTrue(isinstance(2 and i, Code))
self.assertTrue(isinstance(i and j, Code))
self.assertTrue(isinstance(i or 2, Code))
self.assertTrue(isinstance(2 or i, Code))
self.assertTrue(isinstance(i or j, Code))
self.assertTrue(isinstance(i not 2, Code))
self.assertTrue(isinstance(2 not i, Code))
self.assertTrue(isinstance(i not j, Code))
"""
def test_composition(self):
"""
Complex expression is assembled, should print out the same value.
"""
i = Code()
j = Code()
k = Code()
l = Code()
c = j + i ** i // 5 / l < j - k
self.assertEqual(str(c), 'j + i ** i // 5 / l < j - k')
def test_code_with_int(self):
"""
When user declares
>>> v = Code()
and
>>> code_object = v == 2
Then code_object should be of type Code, rather than boolean and
>>>str(code_object)
'v == 2'
"""
v = Code()
code_object = v == 2
self.assertTrue(isinstance(code_object, Code))
self.assertEqual(str(code_object), 'v == 2')
def test_code_with_code(self):
"""
When user declares
>>> v = Code()
>>> w = Code()
and
>>> code_object = v == w
Then code_object should be of type Code, rather than boolean and
>>>str(code_object)
'v == w'
"""
v = Code()
w = Code()
code_object = v == w
self.assertTrue(isinstance(code_object, Code))
self.assertEqual(str(code_object), 'v == w')
def test_factoring_with_code_var(self):
"""This is a hard test from test_code_generator.py, but additionally here it is added Code instances :)"""
function = f.factor_code_with_code
output_code = 'i * 2'
code1_str = 'i == 9'
code2_str = 'i == 7'
code = ['def ' + function.__name__ + '(i):',
'',
' if ' + code1_str + ' or ' + code2_str + ':',
' return ' + output_code,
'',
' return False']
i = Code()
r = Rules(i == 9, output=i * 2)
r.add(i == 7, output=i*2)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_factor_ordered_with_code(self):
"""This is a hard test from test_code_generator.py, but additionally here it is added Code vars :)"""
function = f.factor_ordered_with_code
right_str = 'i * j'
code1_str = 'i != 0'
code2_str = 'i < 1'
code3_str = 'i > j'
code = ['def ' + function.__name__ + '(i, j):',
'',
' if {0} and {1} or {2}:'.format(code1_str, code2_str, code3_str),
' return ' + right_str,
'',
' return False']
i = Code()
j = Code()
r = Rules(i != 0, i < 1, output=i * j)
r.add(i > j, output=i * j)
solution = r.solve(function, self)
self.assertEqual(solution.implementation, code)
def test_get_name_of_variable(self):
"""
Gets the name of a variable instance of Code class.
"""
name = Code()
self.assertEqual(str(name), 'name')
# TODO: STUPID BUG HERE!!! Last representation is NOT working
def test_identity_representation(self):
"""There are 4 equivalent representations of the identity function:
1. Boring:
>>> Rules(a=True, output=True)
2. Implicit True:
>>> Rules(a=True)
3. Using Code() magic:
>>> a = Code()
>>> Rules(a, output=True)
4. Using both Code() magic and implicit True output:
>>> a = Code()
>>> Rules(a)
lets test all representations!!!
"""
a = Code()
solution = ['def {}(a):'.format(f.identity.__name__),
' return a']
r = Rules(a=True, output=True)
s = r.solve(f.identity)
self.assertEqual(s.implementation, solution)
r = Rules(a=True)
s = r.solve(f.identity)
self.assertEqual(s.implementation, solution)
r = Rules(a, output=True)
s = r.solve(f.identity)
self.assertEqual(s.implementation, solution)
# TODO: pass this test:
"""
r = Rules(a)
s = r.solve(f.minimal_code)
self.assertEqual(s.implementation, solution)
"""
# TODO: Do test to be able to represent 'a=False' as the more natural 'not a', with Code() objects
if __name__ == '__main__':
unittest.main()
```
#### File: mastermind/tests/test_rules.py
```python
import unittest
from shatter.rules import *
from tests.testing_helpers import constants as cts
__author__ = '<NAME>'
class RulesTest(unittest.TestCase):
def test_get_truth_table(self):
"""
Input data in different ways.
"""
# case 1: single condition added on constructor
r = Rules(a=True, b=True)
self.assertEqual(r.get_truth_tables(['a', 'b']), {True: [(True, True)]})
# case 2: adding rules on constructor and with add method.
r = Rules(a=True, b=True)
r.add(a=True, b=False)
self.assertEqual(r.get_truth_tables(['a', 'b']), {True: [(True, True), (True, False)]})
# case 3: adding 2 arguments, one of them with an output.
r = Rules()
r.add(a=False, output=3)
r.add(a=True)
self.assertEqual(r.get_truth_tables(['a', 'b']),
{3: [(False, True), (False, False)], True: [(True, True), (True, False)]})
# case 0: empty dict.
def test_empty_dict_max_positional_arg(self):
self.assertEqual(Rules.gets_start_positional_idx({}), 0)
# case 1: no positional args.
def test_no_positional_arg(self):
self.assertEqual(Rules.gets_start_positional_idx({1: 2, 3: 4}), 0)
# case 2: some positional args.
def test_mix_positional_args(self):
r = Rules.gets_start_positional_idx({1: 2, POSITIONAL_ARGS_RULE + str(0): 4})
self.assertEqual(r, 1)
# case 3: all positional args.
def test_all_positional_args(self):
r = Rules.gets_start_positional_idx({POSITIONAL_ARGS_RULE + str(0): 6,
POSITIONAL_ARGS_RULE + str(1): 4})
self.assertEqual(r, 2)
def test_get_truth_tables(self):
"""anomaly case: when rules is not a set or a Rules object. It should raise exception."""
with self.assertRaises(RulesTypeError):
get_truth_tables(rules=1, function_args=None)
def test_correct_variables_order(self):
"""Order should be: function args first (from right to left), then args from condition obj from left to right
and top to bottom"""
out = -1
def f(a, b):
return a + b
r = Rules(c=1, d=2, output=out)
r.add(x=3, y=4, output='other_stuff')
r.add(e=3, f=4, output=out)
self.assertEqual(r.get_input_keys(helpers.get_function_inputs(f), out),
OrderedSet(['a', 'b', 'c', 'd', 'e', 'f']))
# --------- test validation --------- #
def test_non_callable(self):
"""
Checks that the function passed is valid.
"""
non_callable = ''
with self.assertRaises(TypeError):
solve(non_callable, cts.and_table, self)
# --------- test validation --------- #
if __name__ == '__main__':
unittest.main()
```
#### File: mastermind/tests/test_tester.py
```python
import unittest
from tests.generated_code import tester_functions as f
from tests.testing_helpers import common_testing_code
from shatter.rules import Rules
from shatter import tester
from shatter.solution import Solution
__author__ = '<NAME>'
class TesterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
common_testing_code.reset_functions_file(f.__file__, hard_reset=True)
def test_collision(self):
"""
Even though the rules are absurd and are a contradiction. The non deterministic model should choose and solve
the problem at random between the identity ('return a') and its negation ('return not a').
"""
r = Rules(a=True, output=True) # first condition
r.add(a=True, output=False) # contradictory condition.
r.solve(f.collision, self)
def test_non_collision(self):
"""
Testing bigger stuff. Multiple ifs with multiple boolean variables
"""
r = Rules(a=True, b=True, c=True, output=0) # leave d out
r.add(a=False, b=True, d=True, output=1) # leave c out
r.add(a=True, c=False, d=True, output=2) # leave b out
r.add(b=True, c=False, d=False, output=3) # leave a out
r.solve(f.non_collision, self)
def test_unittest_validation(self):
"""
Should raise exception if unittest is not of the correct class.
"""
with self.assertRaises(TypeError):
tester.test_implementation('wrong class', Solution(None, None, None))
def test_no_unittests_performed(self):
"""
Should not perform tests if there are no tables.
"""
self.assertFalse(tester.test_implementation(None, Solution(None, None, None)))
def test_function_solve_with_no_unittest(self):
"""
Same as test_basic_if() test but with no unittest provided.
"""
function = f.basic_if
ouput = 'le'
code = ['def {}(a, b):'.format(function.__name__),
'',
' if b:',
" return \"{}\"".format(ouput),
'',
' return False']
r = Rules(a=True,
b=True,
output=ouput)
r.add(b=True, output=ouput)
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jisbruzzi/equipo-q-tp1",
"score": 3
} |
#### File: src/gale_shapely/generator.py
```python
import os
import random
PREFS_DIR = os.path.join(os.getcwd(), 'prefs')
def generate(prefs: list, n: int, prefix: str):
for i in range(n):
current_filename = "{}_{}.prf".format(prefix, str(i + 1))
path = os.path.join(PREFS_DIR, current_filename)
random.shuffle(prefs)
# With open(...) as f: -> estructura tipo RAII de C++, llama al close automáticamente
with open(path, 'w') as f:
for j in prefs:
f.write(str(j) + '\n')
```
#### File: src/gale_shapely/queue.py
```python
class Queue:
class Node:
def __init__(self, value):
self.next = None
self.value = value
class EmptyError(Exception):
pass
def __init__(self, *args):
self.first = None
self.last = None
self.size = 0
if args:
for value in args:
self.enqueue(value)
def enqueue(self, value):
if not self.first:
self.first = self.Node(value)
self.last = self.first
else:
self.last.next = self.Node(value)
self.last = self.last.next
self.size += 1
def pop(self):
if not self.first:
raise self.EmptyError
value = self.first.value
self.first = self.first.next
self.size -= 1
return value
def top(self):
if not self.first:
raise self.EmptyError
return self.first.value
def __len__(self):
return self.size
def __bool__(self):
return bool(self.size)
```
#### File: src/search/mergesort.py
```python
import math
def merge(la, lb):
l1 = list(la)
l2 = list(lb)
ret = []
while len(l1) > 0 and len(l2) > 0:
if l1[0] < l2[0]:
ret.append(l1[0])
l1.pop(0)
else:
ret.append(l2[0])
l2.pop(0)
ret.extend(l1)
ret.extend(l2)
return ret
def mergesort(lista):
if len(lista) <= 1:
return lista
indice_medio = int(math.floor(len(lista) / 2))
l1 = lista[0:indice_medio]
l2 = lista[indice_medio:]
return merge(mergesort(l1), mergesort(l2))
def ordenar(lista):
return mergesort(lista)
```
#### File: src/search/quicksort.py
```python
def partition(lista, inf, sup):
pivot = lista[sup]
index = inf - 1
for i in range(inf, sup):
if lista[i] <= pivot:
index += 1
lista[i], lista[index] = lista[index], lista[i]
lista[sup], lista[index + 1] = lista[index + 1], lista[sup]
return index + 1
def quicksort(lista, inf, sup):
stack = [(inf, sup)]
while stack:
inf, sup = stack.pop()
mid_point = partition(lista, inf, sup)
if mid_point > inf:
stack.append((inf, mid_point - 1))
if mid_point < sup:
stack.append((mid_point + 1, sup))
return lista
def ordenar(lista):
return quicksort(lista, 0, len(lista) - 1)
```
#### File: src/utils/antimergesort.py
```python
import math
import mergesort
def antimerge(ordenada):
if len(ordenada) <= 1:
return ordenada
ordenadaSinUltimo = list(ordenada)
ultimo = ordenadaSinUltimo.pop()
indice_medio = int(math.floor(len(ordenadaSinUltimo) / 2))
l1 = ordenadaSinUltimo[0:indice_medio] + [ultimo]
l2 = ordenadaSinUltimo[indice_medio:]
return l2, l1
def antimergesort(ordenada):
if len(ordenada) <= 1:
return ordenada
l1, l2 = antimerge(ordenada)
return antimergesort(l1) + antimergesort(l2)
def desordenar(ordenada):
return antimergesort(ordenada)
```
#### File: src/utils/deshacer.py
```python
from importlib import import_module
import sys
def deshacer(nombreModulo, nombreCsv, nombreCsvSalida):
desordenar = getattr(import_module(nombreModulo), "desordenar")
with open(nombreCsv, "r") as archivoCsv:
lista = archivoCsv.read().split(",")
lista = map(lambda x: float(x), lista)
ordenada = sorted(lista)
desordenada = desordenar(ordenada)
stringGuardar = ",".join(map(str, desordenada))
with open(nombreCsvSalida, "w") as archivoCsvSalida:
archivoCsvSalida.write(stringGuardar)
print("primer argumento: modulo a probar")
print("segundo argumento: archivo entrada")
print("segundo argumento: archivo salida")
if len(sys.argv) >= 4:
nombreModulo = sys.argv[1]
nombreEntrada = sys.argv[2]
nombreSalida = sys.argv[3]
deshacer(nombreModulo, nombreEntrada, nombreSalida)
if len(sys.argv) == 2:
nombreModulo = sys.argv[1]
for i in range(10):
nombreEntrada = "sets/" + str(i) + ".csv"
nombreSalida = "sets/" + str(i) + nombreModulo + ".csv"
deshacer(nombreModulo, nombreEntrada, nombreSalida)
```
#### File: equipo-q-tp1/test/gale_shapely_test.py
```python
import unittest
import random
from src.gale_shapely.gale_shapely import gale_shapely
class GSTest(unittest.TestCase):
def test_random_preferences(self):
teams = 100
players = 1000
players_list = [i for i in range(players)]
t = {}
for i in range(teams):
random.shuffle(players_list)
t[i] = players_list.copy()
teams = list(t.keys())
p = {}
for i in range(players):
random.shuffle(teams)
p[i] = teams.copy()
self.run_gs(p, t)
def test_same_team_prefs(self):
teams = 100
players = 1000
t = {
i: [j for j in range(players)]
for i in range(teams)
}
teams = list(t.keys())
p = {}
for i in range(players):
random.shuffle(teams)
p[i] = teams.copy()
self.run_gs(p, t)
def test_same_player_prefs(self):
teams = 100
players = 1000
players_list = [i for i in range(players)]
t = {}
for i in range(teams):
random.shuffle(players_list)
t[i] = players_list.copy()
p = {
i: [j for j in range(teams)]
for i in range(players)
}
self.run_gs(p, t)
def test_simple_case(self):
p = {0: [0, 1], 1: [0, 1], 2: [0, 1], 3: [1, 0]}
t = {0: [3, 1, 0, 2], 1: [1, 2, 3, 0]}
self.run_gs(p, t)
def run_gs(self, p, t):
matches = gale_shapely(t, p)
for key in p:
p[key] = {item: i for i, item in enumerate(p[key])}
for key in t:
t[key] = {item: i for i, item in enumerate(t[key])}
self.assertNotEqual(len(matches), 0)
for team, player in matches:
for other_team in p[player].keys():
if other_team == team:
continue
# Comparo los matches (team, player) con (other_team, other_player)
# Hay inestabilidad si team prefiere a other_player antes que a player,
# y other_team prefiere a player antes que a other_player
for _team, other_player in matches:
if _team != other_team:
continue
player_prefers_other_team = p[player][other_team] < p[player][team]
other_team_prefers_player = t[other_team][player] < t[other_team][other_player]
both = player_prefers_other_team and other_team_prefers_player
team_prefers_other_player = t[team][other_player] < t[team][player]
other_player_prefers_team = p[other_player][team] < p[other_player][other_team]
other_both = team_prefers_other_player and other_player_prefers_team
self.assertFalse(both or other_both)
``` |
{
"source": "JiscDACT/qualkit",
"score": 3
} |
#### File: qualkit/test/test_clean.py
```python
import pandas as pd
import qualkit.clean
def test_replace_dont_knows():
text = qualkit.clean.__replace_dont_knows__('i dont know', 'idk')
assert text == 'idk'
def test_replace_domain_terms():
text = 'I use Blackboard'
domain = ['Blackboard', 'Moodle']
text = qualkit.clean.replace_domain_terms(text, domain, "a VLE")
assert text == 'I use a VLE'
def test_lemmatize():
text = {"text": ['more seminars running helping cooks find']}
df = pd.DataFrame(text, columns=['text'])
df = qualkit.clean.lemmatize(df, 'text')
output = df['text'][0]
assert output == 'more seminar run help cook find'
def test_lemmatize_string():
output = qualkit.clean.lemmatize_string('more seminars running helping cooks find')
assert output == 'more seminar run help cook find'
def test_remove_dont_knows():
text = {"text":
[
'i think it needs more salt',
'i really dont know',
'i dont know',
'i have no idea',
'no comment',
'its too spicy',
'idk'
]
}
df = pd.DataFrame(text, columns=['text'])
df = qualkit.clean.remove_dont_knows(df, 'text')
assert df.size == 2
assert df['text'].iloc[0] == 'i think it needs more salt'
def test_clean():
text = {"text": ["I'm a teapot"]}
df = pd.DataFrame(text, columns=['text'])
df = qualkit.clean.clean(df, 'text')
assert df['cleaned'].iloc[0] == 'im a teapot'
assert df['text'].iloc[0] == 'I\'m a teapot'
def test_clean_inner():
text = {"text": ["-"]}
df = pd.DataFrame(text)
df = qualkit.clean.__clean__(df, 'text', inplace=False)
assert df['text'].iloc[0] == '-'
assert pd.isnull(df['cleaned'].iloc[0])
def test_clean_inplace():
text = {"text": ["I'm a teapot"]}
df = pd.DataFrame(text, columns=['text'])
df = qualkit.clean.clean(df, 'text', inplace=True)
assert df['text'].iloc[0] == 'im a teapot'
def test_clean_multiple_inplace():
text = {
"text1": ["I'm a teapot", "", "row three :)"],
"text2": ['', '', ''],
"text3": ['Short and stout', 'Bananas are the only fruit!', '']
}
df = pd.DataFrame(text)
df = qualkit.clean.clean(df, ['text1', 'text2', 'text3'], inplace=True)
assert df['text1'].iloc[0] == 'im a teapot'
def test_clean_multiple():
text = {
"text1": ["I'm a teapot", "", "row three :)"],
"text2": ['', '', ''],
"text3": ['Short and stout', 'Bananas are the only fruit!', '']
}
df = pd.DataFrame(text)
df = qualkit.clean.clean(df, ['text1', 'text2', 'text3'])
assert df['cleaned'].iloc[0] == 'im a teapot'
``` |
{
"source": "JiscPER/jper",
"score": 3
} |
#### File: service/models/notifications.py
```python
from octopus.lib import dataobj
from service import dao
from copy import deepcopy
from octopus.modules.identifiers import postcode
import requests, json
from octopus.core import app
class NotificationMetadata(dataobj.DataObj):
"""
Class to represent the standard bibliographic metadata that a notification may contain
See the core system model documentation for details on the JSON structure used by this model.
It provides the "metadata" portion of all Notification objects that extend from this one.
"""
def __init__(self, raw=None):
"""
Create a new instance of the NotificationMetadata object, optionally around the
raw python dictionary.
In reality, this class provides a base-class for all other notification-like objects
(in this module and in others) so you will never instantiate it directly.
If supplied, the raw dictionary will be validated against the allowed structure of this
object, and an exception will be raised if it does not validate
:param raw: python dict object containing the metadata
"""
struct = {
"objects" : [
"metadata"
],
"structs" : {
"metadata" : {
"fields" : {
"title" : {"coerce" :"unicode"},
"version" : {"coerce" :"unicode"},
"publisher" : {"coerce" :"unicode"},
"type" : {"coerce" :"unicode"},
"language" : {"coerce" :"isolang"},
"publication_date" : {"coerce" :"utcdatetime"},
"date_accepted" : {"coerce" :"utcdatetime"},
"date_submitted" : {"coerce" :"utcdatetime"}
},
"objects" : [
"source", "license_ref"
],
"lists" : {
"identifier" : {"contains" : "object"},
"author" : {"contains" : "object"},
"project" : {"contains" : "object"},
"subject" : {"contains" : "field", "coerce" : "unicode"}
},
"required" : [],
"structs" : {
"source" : {
"fields" : {
"name" : {"coerce" : "unicode"},
},
"lists" : {
"identifier" : {"contains" : "object"}
},
"structs" : {
"identifier" : {
"fields" : {
"type" : {"coerce" : "unicode"},
"id" : {"coerce" : "unicode"}
}
}
}
},
"license_ref" : {
"fields" : {
"title" : {"coerce" : "unicode"},
"type" : {"coerce" : "unicode"},
"url" : {"coerce" : "unicode"},
"version" : {"coerce" : "unicode"}
}
},
"identifier" : {
"fields" : {
"type" : {"coerce" : "unicode"},
"id" : {"coerce" : "unicode"}
}
},
"author" : {
"fields" : {
"name" : {"coerce" : "unicode"},
"affiliation" : {"coerce" : "unicode"},
},
"lists" : {
"identifier" : {"contains" : "object"}
},
"structs" : {
"identifier" : {
"fields" : {
"type" : {"coerce" : "unicode"},
"id" : {"coerce" : "unicode"}
}
}
}
},
"project" : {
"fields" : {
"name" : {"coerce" : "unicode"},
"grant_number" : {"coerce" : "unicode"},
},
"lists" : {
"identifier" : {"contains" : "object"}
},
"structs" : {
"identifier" : {
"fields" : {
"type" : {"coerce" : "unicode"},
"id" : {"coerce" : "unicode"}
}
}
}
}
}
}
}
}
self._add_struct(struct)
super(NotificationMetadata, self).__init__(raw)
@property
def title(self):
"""
The title of the work represented by this metadata
:return: The title
"""
return self._get_single("metadata.title", coerce=dataobj.to_unicode())
@title.setter
def title(self, val):
"""
Set the title of the work represented by this metadata
:param val: the title
"""
self._set_single("metadata.title", val, coerce=dataobj.to_unicode(), allow_none=False, ignore_none=True)
@property
def version(self):
"""
The version of the work represented by this metadata. For example whether it is the publisher's or author's version
:return: The version
"""
return self._get_single("metadata.version", coerce=dataobj.to_unicode())
@version.setter
def version(self, val):
"""
Set the version of the work represented by this metadata
:param val: the version
"""
self._set_single("metadata.version", val, coerce=dataobj.to_unicode())
@property
def type(self):
"""
The publication type of the work represented by this metadata
:return: The publication type
"""
return self._get_single("metadata.type", coerce=dataobj.to_unicode())
@type.setter
def type(self, val):
"""
Set the publication type of the work represented by this metadata
:param val: the publication type
"""
self._set_single("metadata.type", val, coerce=dataobj.to_unicode(), allow_none=False, ignore_none=True)
@property
def publisher(self):
"""
The publisher of the work represented by this metadata
:return: The publisher
"""
return self._get_single("metadata.publisher", coerce=dataobj.to_unicode())
@publisher.setter
def publisher(self, val):
"""
Set the publisher of the work represented by this metadata
:param val: the publisher
"""
self._set_single("metadata.publisher", val, coerce=dataobj.to_unicode(), allow_none=False, ignore_none=True)
@property
def language(self):
"""
The language of the work represented by this metadata. SHOULD be the ISO code for this language, provided it was set
originally via the language setter, but it is not STRICTLY guaranteed.
:return: The language
"""
# Note that in this case we don't coerce to iso language, as it's a slightly costly operation, and all incoming
# data should already be coerced
return self._get_single("metadata.language", coerce=dataobj.to_unicode())
@language.setter
def language(self, val):
"""
Set the title of the work represented by this metadata. This method will attempt to coerce the language to
the appropriate ISO language code, but if it fails it will accept the value anyway.
:param val: the language, ideally as an ISO code, or something that can be converted to it
"""
self._set_single("metadata.language", val, coerce=dataobj.to_isolang(), allow_coerce_failure=True, allow_none=False, ignore_none=True)
@property
def publication_date(self):
"""
The publication date of the work represented by this metadata, as a string, of the form YYYY-MM-DDTHH:MM:SSZ
:return: The publication date string
"""
return self._get_single("metadata.publication_date", coerce=dataobj.date_str())
@publication_date.setter
def publication_date(self, val):
"""
Set the publication of the work represented by this metadata, as a string. It will attempt to coerce to the correct ISO form
(YYYY-MM-DDTHH:MM:SSZ) but will accept the value even if the coerce fails.
:param val: the publication date, ideally in the form YYYY-MM-DDTHH:MM:SSZ, or a similar form that can be read
"""
self._set_single("metadata.publication_date", val, coerce=dataobj.date_str(), allow_coerce_failure=True, allow_none=False, ignore_none=True)
@property
def date_accepted(self):
"""
The accepted-for-publication dateof the work represented by this metadata, as a string, of the form YYYY-MM-DDTHH:MM:SSZ
:return: The accepted date
"""
return self._get_single("metadata.date_accepted", coerce=dataobj.date_str())
@date_accepted.setter
def date_accepted(self, val):
"""
Set the accepted-for-publication date of the work represented by this metadata, as a string. It will attempt to coerce to the correct ISO form
(YYYY-MM-DDTHH:MM:SSZ) but will accept the value even if the coerce fails.
:param val: the accepted date, ideally in the form YYYY-MM-DDTHH:MM:SSZ, or a similar form that can be read
"""
self._set_single("metadata.date_accepted", val, coerce=dataobj.date_str(), allow_coerce_failure=True, allow_none=False, ignore_none=True)
@property
def date_submitted(self):
"""
The date submitted for publication of the work represented by this metadata, as a string, of the form YYYY-MM-DDTHH:MM:SSZ
:return: The submitted date
"""
return self._get_single("metadata.date_submitted", coerce=dataobj.date_str())
@date_submitted.setter
def date_submitted(self, val):
"""
Set the submitted-for-publication date of the work represented by this metadata, as a string. It will attempt to coerce to the correct ISO form
(YYYY-MM-DDTHH:MM:SSZ) but will accept the value even if the coerce fails.
:param val: the submitted date, ideally in the form YYYY-MM-DDTHH:MM:SSZ, or a similar form that can be read
"""
self._set_single("metadata.date_submitted", val, coerce=dataobj.date_str(), allow_coerce_failure=True, allow_none=False, ignore_none=True)
@property
def identifiers(self):
"""
The list of identifier objects for the work represented by this metadata. The returned objects look like:
::
{"type" : "<identifier type>", "id" : "<actual identifier>" }
:return: List of python dict objects containing the identifier information
"""
return self._get_list("metadata.identifier")
def get_identifiers(self, type):
"""
The list of identifiers for the work represented by this metadata, as filtered by type.
Unlike .identifiers, this returns a list of strings of the actual identifiers, rather than the dict representation.
:return: List of identifiers of the requested type
"""
ids = self._get_list("metadata.identifier")
res = []
for i in ids:
if i.get("type") == type:
res.append(i.get("id"))
return res
def add_identifier(self, id, type):
"""
Add an identifier for the work, with the given id and type
:param id: the id for the work (e.g. DOI)
:param type: the type of identifier (e.g "doi")
:return:
"""
if id is None or type is None:
return
uc = dataobj.to_unicode()
obj = {"id" : self._coerce(id, uc), "type" : self._coerce(type, uc)}
self._delete_from_list("metadata.identifier", matchsub=obj, prune=False)
self._add_to_list("metadata.identifier", obj)
@property
def authors(self):
"""
The list of author objects for the work represented by this metadata. The returned objects look like:
::
{
"name" : "<author name>",
"identifier" : [
{"type" : "<identifier type>", "id" : "<actual identifier>"}
],
"affiliation" : "<author affiliation>"
}
:return: List of python dict objects containing the author information
"""
return self._get_list("metadata.author")
@authors.setter
def authors(self, objlist):
"""
Set the supplied list of author objects as the authors for this work.
The structure of each author object will be validated, and the values coerced to unicode where necessary.
Author objects should be of the form:
::
{
"name" : "<author name>",
"identifier" : [
{"type" : "<identifier type>", "id" : "<actual identifier>"}
],
"affiliation" : "<author affiliation>"
}
:param objlist: list of author objects
:return:
"""
# validate the object structure quickly
allowed = ["name", "affiliation", "identifier"]
for obj in objlist:
for k in obj.keys():
if k not in allowed:
raise dataobj.DataSchemaException("Author object must only contain the following keys: {x}".format(x=", ".join(allowed)))
# coerce the values of some of the keys
uc = dataobj.to_unicode()
for k in ["name", "affiliation"]:
if k in obj:
obj[k] = self._coerce(obj[k], uc)
# finally write it
self._set_list("metadata.author", objlist)
def add_author(self, author_object):
"""
Add a single author object to the existing list of author objects.
Additions are not validated or coerced, so use with extreme caution (or not at all)
Author objects should be of the form:
::
{
"name" : "<author name>",
"identifier" : [
{"type" : "<identifier type>", "id" : "<actual identifier>"}
],
"affiliation" : "<author affiliation>"
}
:param author_object: author object to add
:return:
"""
self._delete_from_list("metadata.author", matchsub=author_object)
self._add_to_list("metadata.author", author_object)
@property
def projects(self):
"""
The list of project/funder objects for the work represented by this metadata. The returned objects look like:
Note that this method is "project" rather than "funder" to line up with the RIOXX recommendations
::
{
"name" : "<name of funder>",
"identifier" : [
{"type" : "<identifier type>", "id" : "<funder identifier>"}
],
"grant_number" : "<funder's grant number>"
}
:return: List of python dict objects containing the project/funder information
"""
return self._get_list("metadata.project")
@projects.setter
def projects(self, objlist):
"""
Set the supplied list of project/funder objects as the authors for this work.
The structure of each project object will be validated, and the values coerced to unicode where necessary.
Project objects should be of the form:
::
{
"name" : "<name of funder>",
"identifier" : [
{"type" : "<identifier type>", "id" : "<funder identifier>"}
],
"grant_number" : "<funder's grant number>"
}
:param objlist: list of project objects
:return:
"""
# validate the object structure quickly
allowed = ["name", "grant_number", "identifier"]
for obj in objlist:
for k in obj.keys():
if k not in allowed:
raise dataobj.DataSchemaException("Project object must only contain the following keys: {x}".format(x=", ".join(allowed)))
# coerce the values of some of the keys
uc = dataobj.to_unicode()
for k in ["name", "grant_number"]:
if k in obj:
obj[k] = self._coerce(obj[k], uc)
# finally write it
self._set_list("metadata.project", objlist)
def add_project(self, project_obj):
"""
Add a single project object to the existing list of project objects.
Additions are not validated or coerced, so use with extreme caution (or not at all)
Project objects should be of the form:
::
{
"name" : "<name of funder>",
"identifier" : [
{"type" : "<identifier type>", "id" : "<funder identifier>"}
],
"grant_number" : "<funder's grant number>"
}
:param project_obj: project object to add
:return:
"""
self._delete_from_list("metadata.project", matchsub=project_obj)
self._add_to_list("metadata.project", project_obj)
@property
def subjects(self):
"""
The list of subject strings of the work represented by this metadata
:return: list of subjects
"""
return self._get_list("metadata.subject")
def add_subject(self, kw):
"""
Add a subject keyword to the list of subject keywords
:param kw: new subject keyword
:return:
"""
self._add_to_list("metadata.subject", kw, coerce=dataobj.to_unicode(), unique=True)
@property
def license(self):
"""
The license informaton for the work represented by this metadata
The returned object is as follows:
::
{
"title" : "<name of licence>",
"type" : "<type>",
"url" : "<url>",
"version" : "<version>",
}
:return: The license information as a python dict object
"""
return self._get_single("metadata.license_ref")
@license.setter
def license(self, obj):
"""
Set the licence object
The object will be validated and types coerced as needed.
The supplied object should be structured as follows:
::
{
"title" : "<name of licence>",
"type" : "<type>",
"url" : "<url>",
"version" : "<version>",
}
:param obj: the licence object as a dict
:return:
"""
# validate the object structure quickly
allowed = ["title", "type", "url", "version"]
for k in obj.keys():
if k not in allowed:
raise dataobj.DataSchemaException("License object must only contain the following keys: {x}".format(x=", ".join(allowed)))
# coerce the values of the keys
uc = dataobj.to_unicode()
for k in allowed:
if k in obj:
obj[k] = self._coerce(obj[k], uc)
# finally write it
self._set_single("metadata.license_ref", obj)
def set_license(self, type, url):
"""
Set the licene with the supplied type or url.
:param type: the name/type of the licence (e.g. CC-BY)
:param url: the url where more information about the licence can be found
:return:
"""
uc = dataobj.to_unicode()
type = self._coerce(type, uc)
url = self._coerce(url, uc)
obj = {"title" : type, "type" : type, "url" : url}
self._set_single("metadata.license_ref", obj)
@property
def source_name(self):
"""
The name of the source (e.g. journal name) of the work represented by this metadata
:return: source name
"""
return self._get_single("metadata.source.name", coerce=dataobj.to_unicode())
@source_name.setter
def source_name(self, val):
"""
Set the name of the source (e.g. journal name) of the work represented by this metadata
:param val: name of the source
:return:
"""
self._set_single("metadata.source.name", val, coerce=dataobj.to_unicode())
@property
def source_identifiers(self):
"""
The list of identifier objects for the source (e.g. journal) work represented by this metadata. The returned objects look like:
::
{"type" : "<identifier type>", "id" : "<actual identifier>" }
:return: List of python dict objects containing the identifier information for the source
"""
return self._get_list("metadata.source.identifier")
def add_source_identifier(self, type, id):
"""
Add an identifier for the source (e.g. an ISSN for a journal)
:param type: the type of identifier
:param id: the identifier itself
"""
if id is None or type is None:
return
uc = dataobj.to_unicode()
obj = {"id" : self._coerce(id, uc), "type" : self._coerce(type, uc)}
self._delete_from_list("metadata.source.identifier", matchsub=obj, prune=False)
self._add_to_list("metadata.source.identifier", obj)
class BaseNotification(NotificationMetadata):
"""
Class to provide a baseline for all stored notifications (both routed and unrouted) in the core of the system
In addition to the properties that it gets from the NotificationMetadata, it also adds meta-information
regarding the notification itself, such as related links, embargo information, provider information, etc
See the core system model documentation for details on the JSON structure used by this model.
It provides the basis for all Notification objects that extend from this one.
"""
def __init__(self, raw=None):
"""
Create a new instance of the BaseNotification object, optionally around the
raw python dictionary.
In reality, this class provides a base-class for all other notification-like objects
in this module, so you will never instantiate it directly. See UnroutedNotification
or RoutedNotification instead.
If supplied, the raw dictionary will be validated against the allowed structure of this
object, and an exception will be raised if it does not validate
:param raw: python dict object containing the base notification data
"""
struct = {
"fields" : {
"id" : {"coerce" :"unicode"},
"created_date" : {"coerce" : "utcdatetime"},
"last_updated" : {"coerce" : "utcdatetime"},
"event" : {"coerce" : "unicode"}
},
"objects" : [
"provider", "content", "embargo"
],
"lists" : {
"links" : {"contains" : "object"}
},
"reqired" : [],
"structs" : {
"provider" : {
"fields" : {
"id" : {"coerce" :"unicode"},
"agent" : {"coerce" : "unicode"},
"ref" : {"coerce" : "unicode"},
"route" :{"coerce" : "unicode"}
},
"required" : []
},
"content" : {
"fields" : {
"packaging_format" : {"coerce" :"unicode"}
},
"required" : []
},
"embargo" : {
"fields" : {
"end" : {"coerce" : "utcdatetime"},
"start" : {"coerce" : "utcdatetime"},
"duration" : {"coerce" : "integer"}
}
},
"links" : {
"fields" : {
"type" : {"coerce" :"unicode"},
"format" : {"coerce" :"unicode"},
"access" : {"coerce" :"unicode", "allowed_values" : ["router", "public"]},
"url" : {"coerce" :"url"},
"packaging" : {"coerce" : "unicode"},
"proxy" : {"coerce" : "unicode"}
}
}
}
}
self._add_struct(struct)
super(BaseNotification, self).__init__(raw)
@property
def packaging_format(self):
"""
Get the packaging format identifier of the associated binary content
:return: the packaging format identifier
"""
return self._get_single("content.packaging_format", coerce=dataobj.to_unicode())
@property
def links(self):
"""
Get the list of link objects associated with this notification
Link objects are of the form
::
{
"type" : "<link type: splash|fulltext>",
"format" : "<text/html|application/pdf|application/xml|application/zip|...>",
"access" : "<type of access control on the resource: 'router' (reuqires router auth) or 'public' (no auth)>",
"url" : "<provider's splash, fulltext or machine readable page>",
"packaging" : "<packaging format identifier>",
"proxy": "<the ID of the proxy link>"
}
For more information about links, see the overall system documentation
:return: list of link objects
"""
return self._get_list("links")
def add_link(self, url, type, format, access, packaging=None):
"""
Add a link object to the current list of links
:param url: The url for the resource the link points to
:param type: The type of resource to be retrieved
:param format: The format/mimetype of the resource to be retreived
:param access: The access level of this link: router or public
:param packaging: The packaging format identifier for this resource if required
"""
if access not in ["router", "public"]:
raise dataobj.DataSchemaException("link access must be 'router' or 'public'")
uc = dataobj.to_unicode()
obj = {
"url" : self._coerce(url, uc),
"type" : self._coerce(type, uc),
"format" : self._coerce(format, uc),
"access" : self._coerce(access, uc)
}
if packaging is not None:
obj["packaging"] = self._coerce(packaging, uc)
self._add_to_list("links", obj)
@property
def provider_id(self):
"""
The id of the provider of this notification, which will be their account name
:return: the provider id/account name
"""
return self._get_single("provider.id", coerce=dataobj.to_unicode())
@provider_id.setter
def provider_id(self, val):
"""
Set the id of the provider of this notification, which should be their account name
:param val: the provider id/account name
"""
self._set_single("provider.id", val, coerce=dataobj.to_unicode())
def match_data(self):
"""
Get the match data object which corresponds to the metadata held in this notification
:return: a RoutingMetadata object which contains all the extracted metadata from this notification
"""
md = RoutingMetadata()
# urls - we don't have a specific place to look for these, so we may choose to mine the
# metadata for them later
# authors, and all their various properties
for a in self.authors:
# name
if "name" in a:
md.add_author_id(a.get("name"), "name")
# affiliation (and postcode)
if "affiliation" in a:
aff = a.get("affiliation")
md.add_affiliation(aff)
codes = postcode.extract_all(aff)
for code in codes:
md.add_postcode(code)
# other author ids
for id in a.get("identifier", []):
md.add_author_id(id.get("id"), id.get("type"))
if id.get("type") == "email":
md.add_email(id.get("id"))
# subjects
for s in self.subjects:
md.add_keyword(s)
# grants
for p in self.projects:
if "grant_number" in p:
md.add_grant_id(p.get("grant_number"))
# content type
if self.type is not None:
md.add_content_type(self.type)
return md
class RoutingInformation(dataobj.DataObj):
"""
Class which provides some additional data to any notification regarding the routing status
Any class which extends from this will get the following information added to its datastructure:
::
{
"analysis_date" : "<date the routing analysis was carried out>",
"repositories" : ["<ids of repository user accounts whcih match this notification>"]
}
"""
def __init__(self, raw=None):
"""
Create a new instance of the RoutingInformation object, optionally around the
raw python dictionary.
In reality, this class provides a data extension for other notification-like objects
in this module, so you will never instantiate it directly. See RoutedNotification instead.
If supplied, the raw dictionary will be validated against the allowed structure of this
object, and an exception will be raised if it does not validate
:param raw: python dict object containing the notification data
"""
struct = {
"fields" : {
"analysis_date" : {"coerce" : "utcdatetime"}
},
"lists" : {
"repositories" : {"contains" : "field", "coerce" : "unicode"}
}
}
self._add_struct(struct)
super(RoutingInformation, self).__init__(raw)
@property
def analysis_date(self):
"""
The date this notification was analysed for routing, as a string of the form YYYY-MM-DDTHH:MM:SSZ
:return: the analysis date
"""
return self._get_single("analysis_date", coerce=dataobj.date_str())
@analysis_date.setter
def analysis_date(self, val):
"""
Set the date this notification was analysed for routing, as a string of the form YYYY-MM-DDTHH:MM:SSZ
:param val: the analysis date
"""
self._set_single("analysis_date", val, coerce=dataobj.date_str())
@property
def analysis_datestamp(self):
"""
The date this notification was analysed for routing, as a datetime object
:return: the analysis date
"""
return self._get_single("analysis_date", coerce=dataobj.to_datestamp())
@property
def repositories(self):
"""
List of repository ids to which this notification was routed
:return: the list of repository ids
"""
return self._get_list("repositories", coerce=dataobj.to_unicode())
@repositories.setter
def repositories(self, val):
"""
Set the list of repository ids to which this notification was routed
:param val: the list of repository ids
"""
self._set_list("repositories", val, coerce=dataobj.to_unicode())
class UnroutedNotification(BaseNotification, dao.UnroutedNotificationDAO):
"""
Class which represents a notification that has been received into the system successfully
but has not yet been routed to any repository accounts.
It extends the BaseNotification and does not add any additional information, so see that class's
documentation for details of the data model.
This class also extends a DAO, which means it can be persisted.
"""
def __init__(self, raw=None):
"""
Create a new instance of the UnroutedNotification object, optionally around the
raw python dictionary.
If supplied, the raw dictionary will be validated against the allowed structure of this
object, and an exception will be raised if it does not validate
:param raw: python dict object containing the notification data
"""
super(UnroutedNotification, self).__init__(raw=raw)
@classmethod
def bulk_delete(cls,ids):
"""
Bulk delete all of the unrouted notifications specified by the ID
:param ids: ids of notifications to be deleted
"""
data = ''
for i in ids:
data += json.dumps( {'delete':{'_id':i}} ) + '\n'
r = requests.post(app.config['ELASTIC_SEARCH_HOST'] + '/' + app.config['ELASTIC_SEARCH_INDEX'] + '/unrouted/_bulk', data=data)
return r.json()
def make_routed(self):
"""
Create an instance of an UnroutedNotification from this object
Note that once this is done you'll still need to populate the RoutedNotification with all the appropriate
routing information.
:return: RoutedNotification
"""
d = deepcopy(self.data)
if "targets" in d:
del d["targets"]
routed = RoutedNotification(d)
return routed
def make_failed(self):
"""
Create an instance of a FailedNotification from this object.
This can be done if the object does not route to any repositories on analysis.
:return: FailedNotification
"""
d = deepcopy(self.data)
routed = FailedNotification(d)
return routed
def make_outgoing(self, provider=False):
"""
Create an instance of an OutgoingNotification or ProviderOutgoingNotification (depending on the provider flag supplied)
from this object.
This is suitable for use in exposing data to the API
:return: OutgoingNotification or ProviderOutgoingNotification
"""
d = deepcopy(self.data)
if "last_updated" in d:
del d["last_updated"]
if not provider:
if "provider" in d:
del d["provider"]
if "content" in d and "store_id" in d.get("content", {}):
del d["content"]["store_id"]
# filter out all non-router links if the request is not for the provider copy
if "links" in d:
keep = []
for link in d.get("links", []):
if provider: # if you're the provider keep all the links
if "access" in link:
del link["access"]
keep.append(link)
elif link.get("access") == "router": # otherwise, only share router links
del link["access"]
keep.append(link)
if len(keep) > 0:
d["links"] = keep
else:
if "links" in d:
del d["links"]
# delayed import required because of circular dependencies
from service.models import OutgoingNotification, ProviderOutgoingNotification
if not provider:
return OutgoingNotification(d)
else:
return ProviderOutgoingNotification(d)
class RoutedNotification(BaseNotification, RoutingInformation, dao.RoutedNotificationDAO):
"""
Class which represents a notification that has been received into the system and successfully
routed to one or more repository accounts
It extends the BaseNotification and mixes that with the RoutingInformation, so see both of
those class definitions for the data that is held.
This class also extends a DAO, which means it can be persisted.
"""
def __init__(self, raw=None):
"""
Create a new instance of the RoutedNotification object, optionally around the
raw python dictionary.
If supplied, the raw dictionary will be validated against the allowed structure of this
object, and an exception will be raised if it does not validate
:param raw: python dict object containing the notification data
"""
super(RoutedNotification, self).__init__(raw=raw)
def make_outgoing(self, provider=False):
"""
Create an instance of an OutgoingNotification or ProviderOutgoingNotification (depending on the provider flag supplied)
from this object.
This is suitable for use in exposing data to the API
:return: OutgoingNotification or ProviderOutgoingNotification
"""
d = deepcopy(self.data)
if "last_updated" in d:
del d["last_updated"]
if not provider:
if "provider" in d:
del d["provider"]
if "content" in d and "store_id" in d.get("content", {}):
del d["content"]["store_id"]
if "repositories" in d:
del d["repositories"]
# filter out all non-router links if the request is not for the provider copy
if "links" in d:
keep = []
for link in d.get("links", []):
if provider: # if you're the provider keep all the links
if "access" in link:
del link["access"]
keep.append(link)
elif link.get("access") == "router": # otherwise, only share router links
del link["access"]
keep.append(link)
if len(keep) > 0:
d["links"] = keep
else:
if "links" in d:
del d["links"]
# delayed import required because of circular dependencies
from service.models import OutgoingNotification, ProviderOutgoingNotification
if not provider:
return OutgoingNotification(d)
else:
return ProviderOutgoingNotification(d)
class FailedNotification(BaseNotification, RoutingInformation, dao.FailedNotificationDAO):
"""
Class which represents a notification that has been received into the system but has not
been able to be routed to any repository accounts
It extends the BaseNotification and mixes that with the RoutingInformation, so see both of
those class definitions for the data that is held.
This class is basically the same as the RoutedNotification, but exists to differentiate
itself within the system so that it can be persisted separately.
This class also extends a DAO, which means it can be persisted.
"""
def __init__(self, raw=None):
"""
Create a new instance of the FailedNotification object, optionally around the
raw python dictionary.
If supplied, the raw dictionary will be validated against the allowed structure of this
object, and an exception will be raised if it does not validate
:param raw: python dict object containing the notification data
"""
super(FailedNotification, self).__init__(raw=raw)
class RoutingMetadata(dataobj.DataObj):
"""
Class to represent the metadata that can be extracted from a notification (or associated
binary content) which can be used to determine the routing to repository accounts (by comparison
to a RepositoryConfig object).
See the core system model documentation for details on the JSON structure used by this model.
"""
def __init__(self, raw=None):
"""
Create a new instance of the RoutingMetadata object, optionally around the
raw python dictionary.
If supplied, the raw dictionary will be validated against the allowed structure of this
object, and an exception will be raised if it does not validate
:param raw: python dict object containing the notification data
"""
struct = {
"lists" : {
"urls" : {"contains" : "field", "coerce" : "unicode"},
"emails" : {"contains" : "field", "coerce" : "unicode"},
"affiliations" : {"contains" : "field", "coerce" : "unicode"},
"author_ids" : {"contains" : "object"},
"postcodes" : {"contains" : "field", "coerce" : "unicode"},
"keywords" : {"contains" : "field", "coerce" : "unicode"},
"grants" : {"contains" : "field", "coerce" : "unicode"},
"content_types" : {"contains" : "field", "coerce" : "unicode"}
},
"structs" : {
"author_ids" : {
"fields" : {
"id" : {"coerce" : "unicode"},
"type" : {"coerce" : "unicode"}
}
}
}
}
self._add_struct(struct)
super(RoutingMetadata, self).__init__(raw=raw)
@property
def urls(self):
"""
The URLs in the routing metadata
:return: a list of urls
"""
return self._get_list("urls", coerce=dataobj.to_unicode())
@urls.setter
def urls(self, val):
"""
Set the URLs for this routing metadata
:param val: list of urls
:return:
"""
self._set_list("urls", val, coerce=dataobj.to_unicode())
def add_url(self, val):
"""
Add a url to the existing list of urls for this routing metadata
:param val: a url
:return:
"""
self._add_to_list("urls", val, coerce=dataobj.to_unicode(), unique=True)
@property
def author_ids(self):
"""
Get a list of author id objects in their raw form.
Author ids are represented as follows:
::
{
"id" : "<author id>",
"type" : "<type of author id>"
}
:return: list of author id objects
"""
return self._get_list("author_ids")
def add_author_id(self, id, type):
"""
Add an author id of the specified type
:param id: the author id itself (e.g. an ORCID)
:param type: the type of id (e.g "orcid")
:return:
"""
uc = dataobj.to_unicode()
obj = {"id" : self._coerce(id, uc), "type" : self._coerce(type, uc)}
self._delete_from_list("author_ids", matchsub=obj, prune=False)
self._add_to_list("author_ids", obj)
def get_author_ids(self, type=None):
"""
Get author ids of the specified type, as a list
If the type is not supplied, will be have as self.author_ids
:param type: the type of id to return (e.g. "orcid")
:return: a list of ids as plain strings
"""
if type is None:
return self.author_ids
else:
return [aid for aid in self._get_list("author_ids") if aid.get("type") == type]
@property
def affiliations(self):
"""
List of affiliations in this routing metadata
:return: list of affiliations
"""
return self._get_list("affiliations", coerce=dataobj.to_unicode())
def add_affiliation(self, aff):
"""
Add an affiliation to the existing list in this routing metadata
:param aff: affiliation
:return:
"""
self._add_to_list("affiliations", aff, coerce=dataobj.to_unicode(), unique=True)
@property
def grants(self):
"""
List of grants (i.e. numbers) in this routing metadata
:return: list of grants
"""
return self._get_list("grants", coerce=dataobj.to_unicode())
def add_grant_id(self, gid):
"""
add a grant id to the list of existing grants
:param gid: grant id
:return:
"""
self._add_to_list("grants", gid, coerce=dataobj.to_unicode(), unique=True)
@property
def keywords(self):
"""
List of keywords associated with this routing metadata
:return: list of keywords
"""
return self._get_list("keywords", coerce=dataobj.to_unicode())
@keywords.setter
def keywords(self, val):
"""
Set the list of keywords
:param val: list of keywords
:return:
"""
self._set_list("keywords", val, coerce=dataobj.to_unicode())
def add_keyword(self, kw):
"""
Add a keyword to the existing list
:param kw: keyword
:return:
"""
self._add_to_list("keywords", kw, coerce=dataobj.to_unicode(), unique=True)
@property
def emails(self):
"""
Get list of emails
:return: list of emails
"""
return self._get_list("emails", coerce=dataobj.to_unicode())
def add_email(self, email):
"""
Add an email to the existing list
:param email: email
:return:
"""
self._add_to_list("emails", email, coerce=dataobj.to_unicode(), unique=True)
@property
def content_types(self):
"""
Get list of content types
:return: list of content types
"""
return self._get_list("content_types", coerce=dataobj.to_unicode())
def add_content_type(self, val):
"""
Add a content type to the existing list
:param val: content type
:return:
"""
self._add_to_list("content_types", val, coerce=dataobj.to_unicode(), unique=True)
@property
def postcodes(self):
"""
Get a list of postcodes
:return: list of postcodes
"""
return self._get_list("postcodes", coerce=dataobj.to_unicode())
def add_postcode(self, val):
"""
Add a postcode to the existing list
:param val: postcodee
:return:
"""
self._add_to_list("postcodes", val, coerce=dataobj.to_unicode(), unique=True)
def has_data(self):
"""
Does this RoutingMetadata object currently have any metadata elements set?
:return: True/False whether there is data or not
"""
if len(self.data.keys()) == 0:
return False
for k, v in self.data.iteritems():
if v is not None and len(v) > 0:
return True
return False
def merge(self, other):
"""
Merge the supplied other RoutingMetadata object with this one.
The result will be that this object has any data from the other object that was not already present
:param other: another RoutingMetadata object
:return:
"""
for u in other.urls:
self.add_url(u)
for e in other.emails:
self.add_email(e)
for a in other.affiliations:
self.add_affiliation(a)
for aid in other.get_author_ids():
self.add_author_id(aid.get("id"), aid.get("type"))
for p in other.postcodes:
self.add_postcode(p)
for k in other.keywords:
self.add_keyword(k)
for g in other.grants:
self.add_grant_id(g)
for c in other.content_types:
self.add_content_type(c)
```
#### File: jper/service/reports.py
```python
from service.models import RoutedNotification, Account
import os
from octopus.lib import clcsv
from copy import deepcopy
from datetime import datetime
from octopus.core import app
def delivery_report(from_date, to_date, reportfile):
"""
Generate the monthly report from from_date to to_date. It is assumed that from_date is
the start of a month, and to_date is the end of a month.
Dates must be strings of the form YYYY-MM-DDThh:mm:ssZ
:param from_date: start of month date from which to generate the report
:param to_date: end of month date up to which to generate the report (if this is not specified, it will default to datetime.utcnow())
:param reportfile: file path for existing/new report to be output
:return:
"""
# work out the whole months that we're operating over
frstamp = datetime.strptime(from_date, "%Y-%m-%dT%H:%M:%SZ")
if to_date is None:
tostamp = datetime.utcnow()
else:
tostamp = datetime.strptime(to_date, "%Y-%m-%dT%H:%M:%SZ")
months = range(frstamp.month, tostamp.month + 1)
# prep the data structures where we're going to record the results
result = {}
uniques = {}
for m in months:
uniques[m] = {"md" : 0, "content" : 0}
heis = {}
# go through each routed notification and count against the repository ids whether something is
# a md-only or a with-content notification, and at the same time count the unique md-only vs with-content
# notifications that were routed
q = DeliveryReportQuery(from_date, to_date)
for note in RoutedNotification.scroll(q.query(), page_size=100, keepalive="5m"):
assert isinstance(note, RoutedNotification)
nm = note.analysis_datestamp.month
is_with_content = False
if len(note.links) > 0:
is_with_content = True
uniques[nm]["content"] += 1
else:
uniques[nm]["md"] += 1
for r in note.repositories:
if r not in result:
result[r] = {}
for m in months:
result[r][m] = {"md" : 0, "content" : 0}
if is_with_content:
result[r][nm]["content"] += 1
else:
result[r][nm]["md"] += 1
# now flesh out the report with account names and totals
for k in result.keys():
acc = Account.pull(k)
if acc is None:
heis[k] = k
else:
if acc.repository_name is not None:
heis[k] = acc.repository_name
else:
heis[k] = k
for mon in result[k].keys():
result[k][mon]["total"] = result[k][mon]["md"] + result[k][mon]["content"]
for mon in uniques.keys():
uniques[mon]["total"] = uniques[mon]["md"] + uniques[mon]["content"]
# some constant bits of information we're going to need to convert the results into a table
# suitable for a CSV
month_names = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
headers = ['HEI','ID',
'Jan md-only', "Jan with-content", "Jan Total",
'Feb md-only', "Feb with-content", "Feb Total",
'Mar md-only', "Mar with-content", "Mar Total",
'Apr md-only', "Apr with-content", "Apr Total",
'May md-only', "May with-content", "May Total",
'Jun md-only', "Jun with-content", "Jun Total",
'Jul md-only', "Jul with-content", "Jul Total",
'Aug md-only', "Aug with-content", "Aug Total",
'Sep md-only', "Sep with-content", "Sep Total",
'Oct md-only', "Oct with-content", "Oct Total",
'Nov md-only', "Nov with-content", "Nov Total",
'Dec md-only', "Dec with-content", "Dec Total"]
template = {}
for k in headers:
template[k] = 0
# an interim data-structure that we'll use to store the objects to be written, which we
# can then order by the key (which will be the HEI name)
data = {}
# read any existing data in from the current spreadsheet
if os.path.exists(reportfile):
sofar = clcsv.ClCsv(file_path=reportfile)
for obj in sofar.objects():
# convert all the fields to integers as needed
for k in obj.keys():
if k not in ["HEI", "ID"]:
if obj[k] == "":
obj[k] = 0
else:
try:
obj[k] = int(obj[k])
except:
app.logger.warn(u"Unable to coerce existing report value '{x}' to an integer, so assuming it is 0".format(x=obj[k]))
obj[k] = 0
data[obj.get("HEI")] = obj
# now add any new data from the report
for id, res in result.iteritems():
hei = heis.get(id)
if hei not in data:
data[hei] = deepcopy(template)
data[hei]["HEI"] = hei
data[hei]["ID"] = id
for mon, info in res.iteritems():
mn = month_names[mon - 1]
mdk = mn + " md-only"
ctk = mn + " with-content"
tk = mn + " Total"
data[hei][mdk] = info.get("md")
data[hei][ctk] = info.get("content")
data[hei][tk] = info.get("total")
# remove the "total" and "unique" entries, as we need to re-create them
if "Total" in data:
del data["Total"]
existing_unique = deepcopy(template)
existing_unique["HEI"] = "Unique"
existing_unique["ID"] = ""
if "Unique" in data:
existing_unique = data["Unique"]
del data["Unique"]
# calculate the totals for all columns
totals = {}
for k in headers:
totals[k] = 0
totals["HEI"] = "Total"
totals["ID"] = ""
for hei, obj in data.iteritems():
for k, v in obj.iteritems():
if k in ["HEI", "ID"]:
continue
if isinstance(v, int):
totals[k] += v
data["Total"] = totals
# add the uniques
data["Unique"] = existing_unique
data["Unique"]["HEI"] = "Unique"
for mon, info in uniques.iteritems():
mn = month_names[mon - 1]
mdk = mn + " md-only"
ctk = mn + " with-content"
tk = mn + " Total"
data["Unique"][mdk] = info.get("md")
data["Unique"][ctk] = info.get("content")
data["Unique"][tk] = info.get("total")
orderedkeys = data.keys()
orderedkeys.remove('Unique')
orderedkeys.remove('Total')
orderedkeys.sort()
orderedkeys.append('Total')
orderedkeys.append('Unique')
# remove the old report file, so we can start with a fresh new one
try:
os.remove(reportfile)
except:
pass
out = clcsv.ClCsv(file_path=reportfile)
out.set_headers(headers)
for hk in orderedkeys:
hei = data[hk]
out.add_object(hei)
out.save()
class DeliveryReportQuery(object):
def __init__(self, from_date, to_date):
self.from_date = from_date
self.to_date = to_date
def query(self):
return {
"query" : {
"bool" : {
"must" : [
{
"range" : {
"analysis_date" : {
"gte" : self.from_date,
"lt" : self.to_date
}
}
}
]
}
},
"sort" : [
{"analysis_date" : {"order" : "asc"}}
]
}
```
#### File: service/views/account.py
```python
from __future__ import division
import uuid, json, time, requests, re
from flask import Blueprint, request, url_for, flash, redirect, make_response
from flask import render_template, abort
from service.forms.adduser import AdduserForm
from flask.ext.login import login_user, logout_user, current_user
from octopus.core import app
from octopus.lib import webapp, dates
from service.api import JPER, ValidationException, ParameterException, UnauthorisedException
import pprint
import math
from service import models
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
blueprint = Blueprint('account', __name__)
def _list_request(repo_id=None):
"""
Process a list request, either against the full dataset or the specific repo_id supplied
This function will pull the arguments it requires out of the Flask request object. See the API documentation
for the parameters of these kinds of requests.
:param repo_id: the repo id to limit the request to
:return: Flask response containing the list of notifications that are appropriate to the parameters
"""
since = request.values.get("since")
page = request.values.get("page", app.config.get("DEFAULT_LIST_PAGE_START", 1))
page_size = request.values.get("pageSize", app.config.get("DEFAULT_LIST_PAGE_SIZE", 25))
if since is None or since == "":
return _bad_request("Missing required parameter 'since'")
try:
since = dates.reformat(since)
except ValueError as e:
return _bad_request("Unable to understand since date '{x}'".format(x=since))
try:
page = int(page)
except:
return _bad_request("'page' parameter is not an integer")
try:
page_size = int(page_size)
except:
return _bad_request("'pageSize' parameter is not an integer")
try:
nlist = JPER.list_notifications(current_user, since, page=page, page_size=page_size, repository_id=repo_id)
except ParameterException as e:
return _bad_request(e.message)
resp = make_response(nlist.json())
resp.mimetype = "application/json"
resp.status_code = 200
return resp
@blueprint.before_request
def restrict():
if current_user.is_anonymous():
if not request.path.endswith('login'):
return redirect(request.path.rsplit('/',1)[0] + '/login')
@blueprint.route('/')
def index():
if not current_user.is_super:
abort(401)
users = [[i['_source']['id'],i['_source']['email'],i['_source'].get('role',[])] for i in models.Account().query(q='*',size=1000000).get('hits',{}).get('hits',[])]
return render_template('account/users.html', users=users)
@blueprint.route('/details/<repo_id>', methods=["GET", "POST"])
def details(repo_id):
data = _list_request(repo_id)
acc = models.Account.pull(repo_id)
link = '/account/details'
date = request.args.get('since')
if date == '':
date = '01/08/2015'
if current_user.has_role('admin'):
link +='/' + acc.id + '?since='+date+'&api_key='+current_user.data['api_key']
else:
link += '?since=01/08/2015&api_key='+acc.data['api_key']
results = json.loads(data.response[0])
page_num = int(request.values.get("page", app.config.get("DEFAULT_LIST_PAGE_START", 1)))
num_of_pages = int(math.ceil(results['total']/results['pageSize']))
return render_template('account/details.html',repo=data.response, num_of_pages = num_of_pages, page_num = page_num, link = link,date=date)
@blueprint.route("/configview", methods=["GET","POST"])
@blueprint.route("/configview/<repoid>", methods=["GET","POST"])
def configView(repoid=None):
app.logger.debug(current_user.id + " " + request.method + " to config route")
if repoid is None:
if current_user.has_role('repository'):
repoid = current_user.id
elif current_user.has_role('admin'):
return '' # the admin cannot do anything at /config, but gets a 200 so it is clear they are allowed
else:
abort(400)
elif not current_user.has_role('admin'): # only the superuser can set a repo id directly
abort(401)
rec = models.RepositoryConfig().pull_by_repo(repoid)
if rec is None:
rec = models.RepositoryConfig()
rec.repository = repoid
if request.method == 'GET':
# get the config for the current user and return it
# this route may not actually be needed, but is convenient during development
# also it should be more than just the strings data once complex configs are accepted
resp = make_response(json.dumps(rec.data))
resp.mimetype = "application/json"
return render_template('account/configview.html',repo=resp.response)
elif request.method == 'POST':
if request.json:
saved = rec.set_repo_config(jsoncontent=request.json,repository=repoid)
else:
try:
if request.files['file'].filename.endswith('.csv'):
saved = rec.set_repo_config(csvfile=request.files['file'],repository=repoid)
elif request.files['file'].filename.endswith('.txt'):
saved = rec.set_repo_config(textfile=request.files['file'],repository=repoid)
except:
saved = False
if saved:
return ''
else:
abort(400)
@blueprint.route('/<username>', methods=['GET','POST', 'DELETE'])
def username(username):
acc = models.Account.pull(username)
if acc is None:
abort(404)
elif ( request.method == 'DELETE' or
( request.method == 'POST' and
request.values.get('submit','').split(' ')[0].lower() == 'delete' ) ):
if not current_user.is_super:
abort(401)
else:
acc.remove()
time.sleep(1)
flash('Account ' + acc.id + ' deleted')
return redirect(url_for('.index'))
elif request.method == 'POST':
if current_user.id != acc.id and not current_user.is_super:
abort(401)
if request.values.get('email',False):
acc.data['email'] = request.values['email']
if 'password' in request.values and not request.values['password'].startswith('sha1'):
if len(request.values['password']) < 8:
flash("Sorry. Password must be at least eight characters long", "error")
return render_template('account/user.html', account=acc)
else:
acc.set_password(request.values['password'])
acc.save()
time.sleep(2)
flash("Record updated", "success")
return render_template('account/user.html', account=acc)
elif current_user.id == acc.id or current_user.is_super:
if acc.has_role('repository'):
repoconfig = models.RepositoryConfig().pull_by_repo(acc.id)
else:
repoconfig = None
return render_template('account/user.html', account=acc, repoconfig=repoconfig)
else:
abort(404)
@blueprint.route('/<username>/pubinfo', methods=['POST'])
def pubinfo(username):
acc = models.Account.pull(username)
if current_user.id != acc.id and not current_user.is_super:
abort(401)
if 'embargo' not in acc.data: acc.data['embargo'] = {}
if request.values.get('embargo_duration',False):
acc.data['embargo']['duration'] = request.values['embargo_duration']
else:
acc.data['embargo']['duration'] = 0
if 'license' not in acc.data: acc.data['license'] = {}
if request.values.get('license_title',False):
acc.data['license']['title'] = request.values['license_title']
else:
acc.data['license']['title'] = ""
if request.values.get('license_type',False):
acc.data['license']['type'] = request.values['license_type']
else:
acc.data['license']['type'] = ""
if request.values.get('license_url',False):
acc.data['license']['url'] = request.values['license_url']
else:
acc.data['license']['url'] = ""
if request.values.get('license_version',False):
acc.data['license']['version'] = request.values['license_version']
else:
acc.data['license']['version'] = ""
acc.save()
time.sleep(2);
flash('Thank you. Your publisher details have been updated.', "success")
return redirect(url_for('.username', username=username))
@blueprint.route('/<username>/repoinfo', methods=['POST'])
def repoinfo(username):
acc = models.Account.pull(username)
if current_user.id != acc.id and not current_user.is_super:
abort(401)
if 'repository' not in acc.data: acc.data['repository'] = {}
if request.values.get('repository_software',False):
acc.data['repository']['software'] = request.values['repository_software']
else:
acc.data['repository']['software'] = ''
if request.values.get('repository_url',False):
acc.data['repository']['url'] = request.values['repository_url']
else:
acc.data['repository']['url'] = ''
if request.values.get('repository_name',False):
acc.data['repository']['name'] = request.values['repository_name']
else:
acc.data['repository']['name'] = ''
if 'sword' not in acc.data: acc.data['sword'] = {}
if request.values.get('sword_username',False):
acc.data['sword']['username'] = request.values['sword_username']
else:
acc.data['sword']['username'] = ''
if request.values.get('sword_password',False):
acc.data['sword']['password'] = request.values['sword_password']
else:
acc.data['sword']['password'] = ''
if request.values.get('sword_collection',False):
acc.data['sword']['collection'] = request.values['sword_collection']
else:
acc.data['sword']['collection'] = ''
if request.values.get('packaging',False):
acc.data['packaging'] = request.values['packaging'].split(',')
else:
acc.data['packaging'] = []
acc.save()
time.sleep(2);
flash('Thank you. Your repository details have been updated.', "success")
return redirect(url_for('.username', username=username))
@blueprint.route('/<username>/api_key', methods=['POST'])
def apikey(username):
if current_user.id != username and not current_user.is_super:
abort(401)
acc = models.Account.pull(username)
acc.data['api_key'] = str(uuid.uuid4())
acc.save()
time.sleep(2);
flash('Thank you. Your API key has been updated.', "success")
return redirect(url_for('.username', username=username))
@blueprint.route('/<username>/config', methods=['POST'])
def config(username):
if current_user.id != username and not current_user.is_super:
abort(401)
rec = models.RepositoryConfig().pull_by_repo(username)
if rec is None:
rec = models.RepositoryConfig()
rec.repository = username
try:
if len(request.values.get('url','')) > 1:
url = request.values['url']
fn = url.split('?')[0].split('#')[0].split('/')[-1]
r = requests.get(url)
try:
saved = rec.set_repo_config(jsoncontent=r.json(),repository=username)
except:
strm = StringIO(r.content)
if fn.endswith('.csv'):
saved = rec.set_repo_config(csvfile=strm,repository=username)
elif fn.endswith('.txt'):
saved = rec.set_repo_config(textfile=strm,repository=username)
else:
if request.files['file'].filename.endswith('.csv'):
saved = rec.set_repo_config(csvfile=request.files['file'],repository=username)
elif request.files['file'].filename.endswith('.txt'):
saved = rec.set_repo_config(textfile=request.files['file'],repository=username)
if saved:
flash('Thank you. Your match config has been updated.', "success")
else:
flash('Sorry, there was an error with your config upload. Please try again.', "error")
except:
flash('Sorry, there was an error with your config upload. Please try again.', "error")
time.sleep(1)
return redirect(url_for('.username', username=username))
@blueprint.route('/<username>/become/<role>', methods=['POST'])
@blueprint.route('/<username>/cease/<role>', methods=['POST'])
def changerole(username,role):
acc = models.Account.pull(username)
if acc is None:
abort(404)
elif request.method == 'POST' and current_user.is_super:
if 'become' in request.path:
if role == 'publisher':
acc.become_publisher()
else:
acc.add_role(role)
acc.save()
elif 'cease' in request.path:
if role == 'publisher':
acc.cease_publisher()
else:
acc.remove_role(role)
acc.save()
time.sleep(1)
flash("Record updated", "success")
return redirect(url_for('.username', username=username))
else:
abort(401)
@blueprint.route('/<username>/matches')
def matches():
return redirect(url_for('.username/match.html', username=username))
@blueprint.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('account/login.html')
elif request.method == 'POST':
password = request.values['password']
username = request.values['username']
user = models.Account.pull(username)
if user is None:
user = models.Account.pull_by_email(username)
if user is not None and user.check_password(password):
login_user(user, remember=True)
flash('Welcome back.', 'success')
return redirect(url_for('.username', username=user.id))
else:
flash('Incorrect username/password', 'error')
return render_template('account/login.html')
@blueprint.route('/logout')
def logout():
logout_user()
flash('You are now logged out', 'success')
return redirect('/')
@blueprint.route('/register', methods=['GET', 'POST'])
def register():
if not current_user.is_super:
abort(401)
form = AdduserForm(request.form)
vals = request.json if request.json else request.values
if request.method == 'POST' and form.validate():
#From here!
api_key = str(uuid.uuid4())
account = models.Account()
account.data['email'] = vals['email']
account.data['api_key'] = api_key
account.data['role'] = []
if vals.get('repository_software',False):
account.data['repository'] = {
'software': vals['repository_software']
}
if vals.get('repository_url',False): account.data['repository']['url'] = vals['repository_url']
if vals.get('repository_name',False): account.data['repository']['name'] = vals['repository_name']
if vals.get('sword_username',False):
account.data['sword'] = {
'username': vals['sword_username']
}
if vals.get('sword_password',False): account.data['sword']['password'] = vals['sword_password']
if vals.get('sword_collection',False): account.data['sword']['collection'] = vals['sword_collection']
if vals.get('packaging',False):
account.data['packaging'] = vals['packaging'].split(',')
if vals.get('embargo_duration',False):
account.data['embargo'] = {'duration': vals['embargo_duration']}
if vals.get('license_title',False):
account.data['license'] = {'title': vals['license_title']}
if vals.get('license_type',False):
account.data['license']['type'] = vals['license_type']
if vals.get('license_url',False):
account.data['license']['url'] = vals['license_url']
if vals.get('license_version',False):
account.data['license']['version'] = vals['license_version']
account.set_password(vals['password'])
if vals['radio'] != 'publisher':
account.add_role(vals['radio'])
account.save()
if vals['radio'] == 'publisher':
account.become_publisher()
#To here! it should be a method in model not part of the controller!
time.sleep(1)
flash('Account created for ' + account.id, 'success')
return redirect('/account')
return render_template('account/register.html', vals = vals, form = form)
```
#### File: service/views/more.py
```python
from flask import Blueprint, request, url_for, flash, redirect, make_response
from flask import render_template, abort
from flask.ext.login import login_user, logout_user, current_user
blueprint = Blueprint('more', __name__)
@blueprint.route('/institutions/', methods=['GET','POST'])
def institutions():
'''
'''
return render_template('more/institutions.html', name='Information about Institutions')
@blueprint.route('/publishers/', methods=['GET','POST'])
def publishers():
'''
'''
return render_template('more/publishers.html', name='Information about Publishers')
@blueprint.route('/resources/', methods=['GET','POST'])
def resources():
'''
'''
return render_template('more/resources.html', name="Resources")
``` |
{
"source": "JiscRDSS/rdss-eprints-adaptor",
"score": 2
} |
#### File: rdss-eprints-adaptor/app/dynamodb_client.py
```python
import boto3
import logging
from datetime import datetime, timedelta
from dateutil import parser
class DynamoDBClient(object):
def __init__(self, watermark_table_name, processed_table_name):
self.watermark_table_name = watermark_table_name
self.processed_table_name = processed_table_name
self.client = self._initialise_client()
def _initialise_client(self):
logging.info('Initialising Boto3 DynamoDB client')
return boto3.client('dynamodb')
def fetch_high_watermark(self):
# Query DynamoDB to fetch the high watermark. There should only be one row in this table...
logging.info('Fetching high watermark from table [%s]', self.watermark_table_name)
response = self.client.get_item(
TableName=self.watermark_table_name,
Key={
'Key': {
'S': 'HighWatermark'
}
}
)
# If this is a "first run", then there won't be any data in the DynamoDB table. So check
# first.
if 'Item' in response:
# The high watermark value should be an ISO8001 compliant string.
high_watermark = parser.parse(response['Item']['Value']['S'])
logging.info('Got high watermark [%s]', high_watermark)
return high_watermark
else:
logging.info('No high watermark exists, this is probably a first run')
return None
def update_high_watermark(self, high_watermark):
# Set the high watermark, to be the timestamp given plus 1 second. If we don't add 1
# second, we'll keep fetching the last record over and over.
logging.info(
'Setting high watermark [%s] in table [%s]',
high_watermark,
self.watermark_table_name
)
self.client.put_item(
TableName=self.watermark_table_name,
Item={
'Key': {
'S': 'HighWatermark'
},
'Value': {
'S': (high_watermark + timedelta(seconds=1)).isoformat()
},
'LastUpdated': {
'S': datetime.now().isoformat()
}
}
)
def fetch_processed_status(self, oai_pmh_identifier):
# Query the DynamoDB table to fetch the status of a record with the given identifier.
logging.info(
'Fetching processed record with identifier [%s] from table [%s]',
oai_pmh_identifier,
self.processed_table_name
)
response = self.client.get_item(
TableName=self.processed_table_name,
Key={
'Identifier': {
'S': oai_pmh_identifier
}
}
)
# If this identifier has never been seen before, it won't have a row in the DyanmoDB table.
if 'Item' in response:
status = response['Item']['Status']['S']
logging.info(
'Got processed record status [%s] for identifier [%s]',
status,
oai_pmh_identifier
)
return status
else:
logging.info(
'No processed record exists for identifier [%s]',
oai_pmh_identifier
)
return None
def update_processed_record(self, oai_pmh_identifier, message, status, reason):
# Add or update the row in the DynamoDB table with the given idetnfier.
logging.info(
'Updating processed record [%s] with a status of [%s] (reason: [%s]) in table [%s]',
oai_pmh_identifier,
status,
reason,
self.processed_table_name
)
self.client.put_item(
TableName=self.processed_table_name,
Item={
'Identifier': {
'S': oai_pmh_identifier
},
'Message': {
'S': message
},
'Status': {
'S': status
},
'Reason': {
'S': reason
},
'LastUpdated': {
'S': datetime.now().isoformat()
}
}
)
```
#### File: rdss-eprints-adaptor/app/message_generator.py
```python
import logging
import uuid
from ec2_metadata import ec2_metadata
from jinja2 import select_autoescape, Environment, PackageLoader
from datetime import datetime, timezone
from dateutil import parser
class MessageGenerator(object):
def __init__(self, jisc_id, organisation_name, oai_pmh_provider):
self.jisc_id = jisc_id
self.organisation_name = organisation_name
self.oai_pmh_provider = oai_pmh_provider
self.env = self._initialise_environment()
self.now = datetime.now(timezone.utc).isoformat()
def _initialise_environment(self):
logging.info('Loading templates in directory [templates] from package [app]')
# We use Jinja2 to template the messages, this block prepares the Jinja2 environment.
return Environment(
loader=PackageLoader('app', 'templates'),
autoescape=select_autoescape(
enabled_extensions=('jsontemplate'),
default_for_string=True,
)
)
def _parse_datetime_with_tz(self, datetime_string):
parsed_dt = parser.parse(datetime_string)
if not parsed_dt.tzinfo:
parsed_dt = parsed_dt.replace(tzinfo=timezone.utc)
return parsed_dt.isoformat()
def generate_metadata_create(self, record, s3_objects):
# Generate the message by building up a dict of values and passing this into Jinja2. The
# .jsontemplate file will be parsed and decorated with these values.
logging.info('Fetching template [metadata_create.jsontemplate]')
template = self.env.get_template('metadata_create.jsontemplate')
logging.info('Rendering template using record [%s]', record)
dc_metadata = record['oai_dc']
return template.render({
'messageHeader': {
'messageId': uuid.uuid4(),
'messageTimings': {
'publishedTimestamp': self.now
},
'messageSequence': {
'sequence': uuid.uuid4()
},
'messageHistory': {
'machineId': 'rdss-oai-pmh-adaptor-{}'.format(self.oai_pmh_provider),
'machineAddress': self._get_machine_address(),
'timestamp': self.now
},
'generator': self.oai_pmh_provider
},
'messageBody': {
'objectUuid': uuid.uuid4(),
'objectTitle': self._extract_object_title(dc_metadata),
'objectPersonRole': self._extract_object_person_roles(dc_metadata),
'objectDescription': self._extract_object_description(dc_metadata),
'objectRights': {
'rightsStatement': self._extract_object_rights(dc_metadata)
},
'objectDate': {
'dateValue': self._extract_object_date(dc_metadata),
'dateType': 6
},
'objectKeywords': self._extract_object_keywords(dc_metadata),
'objectCategory': self._extract_object_category(dc_metadata),
'objectIdentifier': self._extract_object_identifier_value(dc_metadata),
'objectRelatedIdentifier': self._extract_object_related_identifier(dc_metadata),
'objectOrganisationRole': self._extract_object_organisation_role(dc_metadata),
'objectFile': self._extract_object_files(s3_objects)
}
})
def _get_machine_address(self):
try:
return ec2_metadata.private_ipv4
except Exception:
logging.exception('An error occurred retrieving EC2 metadata private ipv4 address')
return '0.0.0.0'
def _single_value_from_dc_metadata(self, dc_metadata, key):
values = dc_metadata.get(key)
if not values:
logging.warning('DC metadata [%s] does not contain [\'%s\'] field', dc_metadata, key)
return None
if len(values) > 1:
logging.warning('DC metadata [\'%s\'] has more than 1 value', key)
return values[0]
def _unique_value_list_from_dc_metadata(self, dc_metadata, key):
values = dc_metadata.get(key)
if not values:
logging.warning('DC metadata [%s] does not contain [\'%s\'] field', dc_metadata, key)
return []
return list(set(values))
def _extract_object_title(self, dc_metadata):
return self._single_value_from_dc_metadata(dc_metadata, 'title')
def _extract_object_description(self, dc_metadata):
description = self._single_value_from_dc_metadata(dc_metadata, 'description')
if not description:
description = 'NOT FOUND'
return description
def _extract_object_rights(self, dc_metadata):
rights_statement = self._single_value_from_dc_metadata(dc_metadata, 'rights')
if not rights_statement:
rights_statement = 'NOT FOUND'
return rights_statement
def _extract_object_date(self, dc_metadata):
date_string = self._single_value_from_dc_metadata(dc_metadata, 'date')
if not date_string:
return None
else:
return self._parse_datetime_with_tz(date_string)
def _extract_object_person_roles(self, dc_metadata):
def _object_person_role(name, role_enum):
return {
'person': {
'personUuid': uuid.uuid4(),
'personGivenName': name,
'personOrganisationUnit': {
'organisationUnitUuid': uuid.uuid4(),
'organisation': {
'organisationJiscId': self.jisc_id,
'organisationName': self.organisation_name
}
}
},
'role': role_enum
}
people = dc_metadata.get('creator', []) + dc_metadata.get('contributor', [])
return [_object_person_role(person, 21) for person in set(people)]
def _extract_object_keywords(self, dc_metadata):
return self._unique_value_list_from_dc_metadata(dc_metadata, 'subject')
def _extract_object_category(self, dc_metadata):
return self._unique_value_list_from_dc_metadata(dc_metadata, 'subject')
def _doi_identifier(self, value):
return {
'identifierValue': value,
'identifierType': 4
}
def _extract_object_identifier_value(self, dc_metadata):
return [self._doi_identifier(_id) for _id in
self._unique_value_list_from_dc_metadata(dc_metadata, 'identifier')]
def _extract_object_related_identifier(self, dc_metadata):
return [{
'identifier': self._doi_identifier(rel),
'relationType': 13
} for rel in self._unique_value_list_from_dc_metadata(dc_metadata, 'relation')]
def _extract_object_organisation_role(self, dc_metadata):
publishers = self._unique_value_list_from_dc_metadata(dc_metadata, 'publisher')
if not publishers:
publishers = [self.organisation_name]
return [{
'organisation': {
'organisationJiscId': self.jisc_id,
'organisationName': publisher
},
'role': 5
} for publisher in publishers]
def _extract_object_files(self, s3_objects):
return [{
'fileUuid': uuid.uuid4(),
'fileIdentifier': s3_object['file_path'],
'fileName': s3_object['file_name'],
'fileSize': s3_object['file_size'],
'fileChecksum': {
'checksumUuid': uuid.uuid4(),
'checksumValue': s3_object['file_checksum']
},
'fileStorageLocation': s3_object['download_url'],
'fileStoragePlatform': {
'storagePlatformUuid': uuid.uuid4()
}
} for s3_object in s3_objects]
```
#### File: app/oaiore/reader.py
```python
from lxml import etree
from oaipmh.metadata import MetadataReader, Error, text_type
from oaipmh import common
class OREMetadataReader(MetadataReader):
""" Adds additional field_types to the MetadataReader found in the
pyoai library to translate elements with attributes to dicts for
the OAI-ORE output.
"""
def _element_to_dict(self, element):
""" Converts a childless etree.Element to a dict.
"""
d = {}
if element.attrib:
d.update((k, v) for k, v in element.attrib.items())
if element.text:
text = element.text.strip()
d['text'] = text
return d
def __call__(self, element):
map = {}
# create XPathEvaluator for this element
xpath_evaluator = etree.XPathEvaluator(element,
namespaces=self._namespaces)
e = xpath_evaluator.evaluate
# now extra field info according to xpath expr
for field_name, (field_type, expr) in list(self._fields.items()):
if field_type == 'bytes':
value = str(e(expr))
elif field_type == 'bytesList':
value = [str(item) for item in e(expr)]
elif field_type == 'text':
# make sure we get back unicode strings instead
# of lxml.etree._ElementUnicodeResult objects.
value = text_type(e(expr))
elif field_type == 'textList':
# make sure we get back unicode strings instead
# of lxml.etree._ElementUnicodeResult objects.
value = [text_type(v) for v in e(expr)]
elif field_type == 'dict':
value = [self._element_to_dict(v) for v in e(expr)]
else:
raise Error('Unknown field type: %s' % field_type)
map[field_name] = value
return common.Metadata(element, map)
oai_ore_reader = OREMetadataReader(
fields={
# 'id': ('textList', 'atom:entry/atom:id/text()'),
'link': ('dict', 'atom:entry/atom:link'), # needs more complex query
# 'published': ('textList', 'atom:entry/atom:published/text()'),
# 'updated': ('textList', 'atom:entry/atom:updated/text()'),
# 'title': ('textList', 'atom:entry/atom:title/text()'),
# 'author_name': ('textList', 'atom:entry/atom:author/atom:name/text()'),
},
namespaces={
# v. http://www.openarchives.org/ore/1.0/atom#namespaces
'atom': 'http://www.w3.org/2005/Atom', # Atom namespace
'dc': 'http://purl.org/dc/elements/1.1/', # Dublin Core elements
'dcterms': 'http://purl.org/dc/terms/', # Dublin Core terms
'foaf': 'http://xmlns.com/foaf/0.1/', # FOAF vocabulary terms
'ore': 'http://www.openarchives.org/ore/terms/', # ORE vocabulary terms
'oreatom': 'http://www.openarchives.org/ore/atom/', # ORE Atom elements
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', # RDF vocabulary terms
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#', # RDF vocabulary terms
}
)
```
#### File: rdss-eprints-adaptor/app/oai_pmh_client.py
```python
import logging
from oaipmh.client import Client
from oaipmh.metadata import MetadataRegistry, oai_dc_reader
from oaipmh.error import NoRecordsMatchError
from .oaiore.reader import oai_ore_reader
class OAIPMHClient(object):
def __init__(self, url, use_ore=False):
self.client = self._initialise_client(url)
self.use_ore = use_ore
def _initialise_client(self, url):
registry = MetadataRegistry()
registry.registerReader('oai_dc', oai_dc_reader)
registry.registerReader('ore', oai_ore_reader)
logging.info('Initialising OAI client with URL [%s]', url)
return Client(url, registry)
def fetch_records_from(self, from_datetime, until_datetime=None):
records = self._fetch_records_by_prefix_from('oai_dc', from_datetime, until_datetime)
if not records:
# If we don't get DC records, we won't get anything.
return []
if self.use_ore:
oai_ore_records = self._fetch_records_by_prefix_from(
'ore', from_datetime, until_datetime)
records = self._merge_records(records, oai_ore_records)
records = self._filter_empty_records(records)
for r in records.values():
r['file_locations'] = self._extract_file_locations(r)
return sorted(records.values(), key=lambda k: k['datestamp'])
def _fetch_records_by_prefix_from(self, metadata_prefix, from_datetime, until_datetime=None):
try:
if not until_datetime:
logging.info('Querying for %s records from [%s]', metadata_prefix, from_datetime)
# Fetch all records since the given from_datetime parameter.
records = self.client.listRecords(
metadataPrefix=metadata_prefix, from_=from_datetime)
logging.info('Got %s records since [%s]', metadata_prefix, from_datetime)
else:
logging.info(
'Querying for %s records from [%s] to [%s]', metadata_prefix,
from_datetime, until_datetime)
# Fetch all records between the given from_datetime and the given until_datetime
records = self.client.listRecords(
metadataPrefix=metadata_prefix, from_=from_datetime, until=until_datetime)
logging.info('Got %s records between [%s] and [%s]',
metadata_prefix, from_datetime, until_datetime)
if not records:
return []
else:
return dict(self._structured_record(metadata_prefix, r) for r in records)
except NoRecordsMatchError:
# Annoyingly, the client throws an exception if no records are found...
logging.info('No %s records since [%s]', metadata_prefix, from_datetime)
return []
def _merge_records(self, records_a, records_b):
merged_records = {}
for k, v in records_a.items():
merged_records[k] = {**v, **records_b[k]}
return merged_records
def _filter_empty_records(self, records):
""" Records that have been deleted will exist in the oai-pmh output, but will not have
an `oai_dc` response. This filters them out. """
return {k: v for k, v in records.items() if v.get('oai_dc')}
def _structured_record(self, metadata_prefix, record):
logging.info('Converting record [%s]', record[0].identifier())
record_dict = {
'identifier': record[0].identifier(),
'datestamp': record[0].datestamp(),
metadata_prefix: self._record_metadata_to_dict(record[1])
}
return record[0].identifier(), record_dict
def _record_metadata_to_dict(self, record_metadata):
if record_metadata is not None:
return record_metadata.getMap()
else:
return None
def _extract_file_locations(self, record):
file_locations = []
if self.use_ore:
for l in record['ore'].get('link', []):
relation = l.get('rel', '')
if relation == 'http://www.openarchives.org/ore/terms/aggregates':
file_locations.append(l.get('href', ''))
else:
for identifier in record['oai_dc'].get('identifier', []):
if identifier.startswith(('http://', 'https://')):
file_locations.append(identifier)
return list(filter(None, file_locations))
```
#### File: tests/app/test_kinesis_client.py
```python
import boto3
import json
import time
from app import KinesisClient
from app import PoisonPill
from moto import mock_kinesis
@mock_kinesis
def test_put_message_on_queue():
# Create the Kinesis client we'll be testing against
kinesis_client = KinesisClient(
'rdss-eprints-adaptor-test-stream',
'rdss-eprints-adaptor-invalid-stream'
)
# Create a Boto3 Kinesis client we'll use to cretae the stream
client = boto3.client('kinesis')
client.create_stream(
StreamName='rdss-eprints-adaptor-test-stream',
ShardCount=1
)
# Get a handle on the test JSON message
test_message = _get_test_message()
# Put the test JSON message onto the queue for processing
kinesis_client.put_message_on_queue(json.dumps(test_message))
# Now kill the worker and shut down the client down
kinesis_client.put_message_on_queue(PoisonPill)
# Just a noddy little loop while we wait for the worker to die...
while kinesis_client.queue_worker_thread.isAlive():
time.sleep(0.1)
# Fetch the message from the stream, to ensure it was added
shard_id = client.describe_stream(
StreamName='rdss-eprints-adaptor-test-stream'
)['StreamDescription']['Shards'][0]['ShardId']
shard_iterator = client.get_shard_iterator(
StreamName='rdss-eprints-adaptor-test-stream',
ShardId=shard_id,
ShardIteratorType='TRIM_HORIZON'
)['ShardIterator']
response = client.get_records(
ShardIterator=shard_iterator
)
# Extract the JSON payload and validate it matches the input message
assert len(response['Records']) == 1
json_data = json.loads(response['Records'][0]['Data'])
assert test_message == json_data
def _get_test_message():
return json.load(open('tests/app/data/rdss-message.json'))
```
#### File: tests/app/test_message_validator.py
```python
import pytest
from app import MessageValidator
from jsonschema import ValidationError
def test_validate_message_valid():
# Create the message validator we'll be testing against
message_validator = MessageValidator('3.0.1')
# Get a handle on the test JSON message
test_message = _get_test_message('tests/app/data/rdss-message.json')
# Validate the message
message_validator.validate_message(test_message)
def test_validate_message_invalid():
# Validate that this call to raises a ValidationError
with pytest.raises(ValidationError):
# Create the message validator we'll be testing against
message_validator = MessageValidator('3.0.1')
# Get a handle on the test JSON message
test_message = _get_test_message('tests/app/data/rdss-message-invalid.json')
# Validate the message
message_validator.validate_message(test_message)
def _get_test_message(file_path):
with open(file_path, 'rb') as file:
return file.read()
``` |
{
"source": "JiscRDSS/taxonomyschema",
"score": 2
} |
#### File: taxonomyschema/tests/test_request.py
```python
import pytest
from taxonomyschema.request import Requestor
from requests.exceptions import HTTPError
import responses
@pytest.mark.parametrize(('url', 'data', 'status', 'body', 'retries'), [
('http://example.com/', {'some': 'data'}, 201, 'OK', 3),
('http://example.com/', {'bad': 'data'}, 403,
'{"error": {"code": 123, "message": "fail"}}',
3)
])
@responses.activate
def test_post(url, data, status, body, retries):
responses.add(responses.POST, url,
body=body, status=status,
content_type='application/json')
r = Requestor(url)
if status == 201:
resp = r.update_service(data)
# successful request
assert len(responses.calls) == 1
assert r.retries == 0
assert resp.status_code == status
assert responses.calls[0].request.url == url
assert responses.calls[0].response.text == body
# test retries
r = Requestor(url + 'wont-work', max_retries=retries, sleep=0)
resp = r.update_service(data)
assert r.retries == retries
# test 4xx and 5xx HTTP responses
else:
with pytest.raises(HTTPError):
resp = r.update_service(data)
``` |
{
"source": "JiscSD/rdss-preservica-adaptor",
"score": 2
} |
#### File: rdss-preservica-adaptor/preservicaservice/errors.py
```python
import base64
import binascii
import json
from rdsslib.kinesis.decorators import (
RouterHistoryDecorator,
)
CodeMalformedBody = 'GENERR001'
CodeUnsupportedMessageType = 'GENERR002'
CodeExpiredMessage = 'GENERR003'
CodeMalformedHeader = 'GENERR004'
CodeMaxConnectionTries = 'GENERR005'
CodeUnderlyingSystemError = 'GENERR006'
CodeMalformedJsonBody = 'GENERR007'
CodeFailedTransactionRollback = 'GENERR008'
CodeUnknownError = 'GENERR009'
CodeMaxMessageSendTries = 'GENERR010'
CodeResourceNotFound = 'GENERR011'
CodeResourceAlreadyExists = 'GENERR012'
CodeSDKLibraryError = 'GENERR013'
CodeInvalidChecksum = 'APPERRMET004'
_errors = {
CodeMalformedBody: 'The Message Body is not in the expected format, '
'for example mandatory fields are missing.',
CodeUnsupportedMessageType: 'The provided messageType is not supported.',
CodeExpiredMessage: 'The expiration date of the Message had passed at'
' the point at which delivery was attempted.',
CodeMalformedHeader: 'Invalid, missing or corrupt headers were detected '
'on the Message.',
CodeMaxConnectionTries: 'Maximum number of connection retries exceeded '
'when attempting to send the Message.',
CodeUnderlyingSystemError: 'An error occurred interacting with the '
'underlying system.',
CodeMalformedJsonBody: 'Malformed JSON was detected in the Message Body.',
CodeFailedTransactionRollback: 'An attempt to roll back a '
'transaction failed.',
CodeUnknownError: 'An unexpected or unknown error occurred.',
CodeMaxMessageSendTries: 'Maximum number of Message resend'
'retries exceeded.',
CodeResourceNotFound: 'Resource not found',
CodeResourceAlreadyExists: 'Resource already exists',
CodeSDKLibraryError: 'SDK level error',
CodeInvalidChecksum: 'A file did not match its checksum.',
}
class BaseError(Exception):
code = CodeUnknownError
def __init__(self, details=None):
self.details = details
def export(self, original_record):
try:
rdss_message = base64.b64decode(
original_record.data,
).decode('utf-8')
except (AttributeError, binascii.Error):
rdss_message = '{}'
decorated = json.loads(RouterHistoryDecorator().process(rdss_message))
decorated_header_with_error = dict(
decorated['messageHeader'], **{
'messageType': 'Error',
'errorCode': self.code,
'errorDescription': _errors[self.code] + ' ' + (self.details or ''),
}
)
return dict(
decorated, **{
'messageHeader': decorated_header_with_error,
}
)
class MalformedBodyError(BaseError):
code = CodeMalformedBody
class UnsupportedMessageTypeError(BaseError):
code = CodeUnsupportedMessageType
class ExpiredMessageError(BaseError):
code = CodeExpiredMessage
class MalformedHeaderError(BaseError):
code = CodeMalformedHeader
class MaxConnectionTriesError(BaseError):
code = CodeMaxConnectionTries
class UnderlyingSystemError(BaseError):
code = CodeUnderlyingSystemError
class MalformedJsonBodyError(BaseError):
code = CodeMalformedJsonBody
class FailedTransactionRollbackError(BaseError):
code = CodeFailedTransactionRollback
class UnknownErrorError(BaseError):
code = CodeUnknownError
class MaxMessageSendTriesError(BaseError):
code = CodeMaxMessageSendTries
class ResourceNotFoundError(BaseError):
code = CodeResourceNotFound
class ResourceAlreadyExistsError(BaseError):
code = CodeResourceAlreadyExists
class SDKLibraryError(BaseError):
code = CodeSDKLibraryError
class InvalidChecksumError(BaseError):
code = CodeInvalidChecksum
```
#### File: rdss-preservica-adaptor/preservicaservice/processor.py
```python
import logging
from amazon_kclpy import kcl
from .errors import (
BaseError,
ExpiredMessageError,
MalformedBodyError,
MalformedHeaderError,
UnknownErrorError,
UnsupportedMessageTypeError,
InvalidChecksumError,
)
from .put_stream import PutStream
from .tasks_parser import record_to_task
logger = logging.getLogger(__name__)
class RecordProcessor(kcl.RecordProcessorBase):
""" Records processor which can report failures to specific
kinesis stream.
"""
def __init__(self, config):
"""
:param config: job config object
:type config: preservicaservice.config.Config
"""
self.config = config
self.invalid_stream = PutStream(
config.invalid_stream_name,
config.adaptor_aws_region,
)
self.error_stream = PutStream(
config.error_stream_name,
config.adaptor_aws_region,
)
def initialize(self, shard_id):
pass
def process_records(self, records, checkpointer):
""" Handle list of records
:param records: input records
:param checkpointer: checkpoint object
:return:
"""
logger.debug('received %d records', len(records))
for i, record in enumerate(records):
self.process_record(i, record)
logger.debug('complete')
def shutdown_requested(self, checkpointer):
pass
def shutdown(self, checkpointer, reason):
pass
def process_record(self, index, record):
""" Handle single record.
Make sure it never fails.
:param int index: which item in given batch it is
:param Record record: data to handle
"""
try:
logger.debug('processing record %d', index)
task = record_to_task(record, self.config)
if task:
task.run()
else:
logger.warning('no task out of message')
except (
MalformedBodyError, UnsupportedMessageTypeError,
ExpiredMessageError, MalformedHeaderError, InvalidChecksumError,
) as e:
logger.exception('invalid message')
self.invalid_stream.put(e.export(record))
except BaseError as e:
logger.exception('error handling record')
self.error_stream.put(e.export(record))
except Exception as e:
logger.exception('unexpected error handling error')
self.error_stream.put(UnknownErrorError(str(e)).export(record))
```
#### File: rdss-preservica-adaptor/preservicaservice/tasks_parser.py
```python
import base64
import binascii
import json
import logging
from .errors import (
MalformedJsonBodyError,
MalformedHeaderError,
UnsupportedMessageTypeError
)
from .tasks import SUPPORTED_TASKS
logger = logging.getLogger(__name__)
TYPE_TO_TASKS = {x.TYPE: x for x in SUPPORTED_TASKS}
def decode_record(record):
""" Decode record to json
:param record: input message
:type record: amazon_kclpy.messages.Record
:rtype: dict
:raise: preservicaservice.errors.MalformedJsonBodyError
"""
try:
value = base64.b64decode(record.data)
message = json.loads(value.decode('utf-8'))
except (TypeError, ValueError, binascii.Error):
raise MalformedJsonBodyError()
if not isinstance(message, dict):
raise MalformedJsonBodyError()
return message
def create_supported_tasks(message, config):
""" Build task out of raw json message
:param dict message: raw data
:param preservicaservice.Config config: job config
:rtype: list of preservicaservice.tasks.BaseTask
:raise: preservicaservice.errors.UnsupportedMessageTypeError
:raise: preservicaservice.errors.MalformedHeaderError
"""
try:
message_type = message['messageHeader']['messageType']
except (TypeError, KeyError):
raise MalformedHeaderError()
if not isinstance(message_type, str):
raise MalformedHeaderError()
message_type = message_type.strip()
if message_type not in TYPE_TO_TASKS:
raise UnsupportedMessageTypeError(
'{} is not supported'.format(message_type),
)
return TYPE_TO_TASKS[message_type].build(message, config)
def record_to_task(record, config):
""" Facade to decode record to task instance if possible
:param record: input message
:type record: amazon_kclpy.messages.Record
:param preservicaservice.Config config: job config
:raise: preservicaservice.errors.MalformedJsonBodyError
:raise: preservicaservice.errors.UnsupportedMessageTypeError
:raise: preservicaservice.errors.MalformedHeaderError
:rtype: preservicaservice.tasks.BaseTask
"""
message = decode_record(record)
logger.debug('received message %s', message)
try:
return create_supported_tasks(message, config)
except ValueError as e:
raise MalformedJsonBodyError(str(e))
```
#### File: rdss-preservica-adaptor/preservicaservice/tasks.py
```python
import abc
import datetime
import hashlib
import base64
import logging
import os
import tempfile
import zipfile
import boto3
from .errors import (
MalformedBodyError,
ResourceAlreadyExistsError,
UnderlyingSystemError,
InvalidChecksumError,
)
from .meta import write_object_meta, write_message_meta
from .remote_urls import S3RemoteUrl, HTTPRemoteUrl
from .preservica_s3_bucket import PreservicaS3BucketBuilder
logger = logging.getLogger(__name__)
def get_tmp_file():
return tempfile.NamedTemporaryFile(delete=False).name
class BaseTask(abc.ABC):
"""
Task to run for given input message.
"""
@classmethod
@abc.abstractmethod
def build(cls, message, config):
""" Factory method to produce task from json message.
All validation should happen here
:param dict message: raw message
:raise: preservicaservice.errors.MalformedBodyError if any error
:param config: job environment config
:type config: preservicaservice.config.Config
:raise: ValueError if any error
:return: instance
:rtype: BaseTask
"""
@abc.abstractmethod
def run(self, config):
""" Run task
:param config: job environment config
:type config: preservicaservice.config.Config
:return: True if message handled
:rtype: bool
"""
def require_non_empty_key(message, key1, key2):
""" Require message to have given non empty key
:param dict message: source to look at
:param str key1: 1st key to search
:param str key2: 2nd key in chain
:return: value
:raise: MalformedBodyError in case key is missing
"""
try:
value = message[key1][key2]
if not value:
raise MalformedBodyError('empty {}'.format(key2))
return value
except (KeyError, ValueError, TypeError, AttributeError):
raise MalformedBodyError('missing {}'.format(key2))
def env_prefix_message_key(message, key1, key2, environment):
""" Extracts a value from a message and prefixes it with the environment if the
environment is not "prod".
:param dict message
:param str environment
:return str value
"""
value = require_non_empty_key(message, key1, key2)
if environment != 'prod':
value = '{}-{}'.format(environment, value)
return value
def first_org_id_from_org_roles(org_roles):
""" Return first Jisc ID found in an objectOrganisationRole."""
for role in org_roles:
if not isinstance(role, dict):
continue
org = role.get('organisation')
if not isinstance(org, dict):
continue
org_id = org.get('organisationJiscId')
if not org_id:
continue
return str(org_id).strip()
def first_org_id_from_person_roles(person_roles):
""" Return first Jisc ID found in an objectPersonRole."""
for role in person_roles:
if not isinstance(role, dict):
continue
person = role.get('person')
if not isinstance(person, dict):
continue
org_unit = person.get('personOrganisationUnit', {})
if not isinstance(org_unit, dict):
continue
org = org_unit.get('organisation', {})
org_id = org.get('organisationJiscId')
if not org_id:
continue
return str(org_id).strip()
def require_organisation_id(message):
""" Retrieve Jisc ID from message payload or raise MalformedBodyError."""
message_body = message.get('messageBody')
if not isinstance(message_body, dict):
raise MalformedBodyError('messageBody is not a dict.')
org_roles = message_body.get('objectOrganisationRole', [])
value = first_org_id_from_org_roles(org_roles)
if value:
return value
person_roles = message_body.get('objectPersonRole', [])
value = first_org_id_from_person_roles(person_roles)
if value:
return value
raise MalformedBodyError(
'Unable to determine organisationJiscId org ID. '
'Missing {0} or {1} fields?'.format(
'objectOrganisationRole',
'objectPersonRole',
),
)
def first_role_id_in_roles(roles):
""" Return the first role ID found in list of roles."""
for role in roles:
if not isinstance(role, dict):
continue
role_id = role.get('role')
if not role_id:
continue
return str(role_id).strip()
def require_organisation_role(message):
""" Retrieve role ID from message payload or raise exception."""
message_body = message.get('messageBody')
if not isinstance(message_body, dict):
raise MalformedBodyError('messageBody is not a dict.')
org_roles = message_body.get('objectOrganisationRole', [])
value = first_role_id_in_roles(org_roles)
if value:
return value
person_roles = message_body.get('objectPersonRole', [])
value = first_role_id_in_roles(person_roles)
if value:
return value
raise MalformedBodyError(
'Unable to determine role ID. '
'Missing {0} or {1} fields?'.format(
'objectOrganisationRole',
'objectPersonRole',
),
)
class FileMetadata(object):
""" File object Metadata, not related to AWS metadata. """
_required_attrs = ['fileName']
def __init__(self, **kwargs):
"""
:param str file_name: file name from message
"""
for v in FileMetadata._required_attrs:
if v not in kwargs.keys() or not kwargs.get(v):
raise MalformedBodyError(
'missing {} property from kwargs'.format(v),
)
self.fileName = kwargs.get('fileName')
def generate(self, meta_path):
""" Generate meta for given target file on given path
:param str meta_path: file to write
"""
write_object_meta(meta_path, self.values())
def values(self):
""" Get values for tags """
return self.__dict__.items()
class FileTask(object):
DEFAULT_FILE_SIZE_LIMIT = 5 * 1024 * 1024 * 1024 + 1
def __init__(
self, remote_file, metadata, message_id, object_id, file_checksum,
file_size_limit=DEFAULT_FILE_SIZE_LIMIT,
):
"""
:param remote_file: remote_file.BaseRemoteFile
:param FileMetadata metadata: file related metadata
:param int file_size_limit: max file size limit
"""
self.remote_file = remote_file
self.metadata = metadata
self.file_size_limit = file_size_limit
self.message_id = message_id
self.archive_base_path = object_id
self.file_checksum = file_checksum
def download(self, download_path):
""" Download given path from s3 to temp destination.
:param str download_path: what to download
"""
self.remote_file.download(download_path)
def verify_file_size(self, path):
""" Check given path file size limit and raise if not valid.
:param str path: file to check
:raise: UnderlyingSystemError if file too big
"""
size = os.path.getsize(path)
if size >= self.file_size_limit:
raise UnderlyingSystemError('')
def verify_checksums(self, path):
""" Check given path checksums and raise if not valid.
:param str path: file to check
:raise: InvalidChecksumError if file too big
"""
# Map from RDSS checksumType, which is an integer designed to not change, to a
# string type that is used internally here. The string values here do need to
# match the names in hashlib, but are independent of anything in the messsage
# API spec
CHECKSUM_TYPES = {
1: 'md5',
2: 'sha256',
}
checksums = [
{
'type': CHECKSUM_TYPES[checksum_rdss['checksumType']],
'expected': checksum_rdss['checksumValue'],
'calculated': getattr(hashlib, CHECKSUM_TYPES[checksum_rdss['checksumType']])(),
} for checksum_rdss in self.file_checksum
]
if not checksums:
logger.debug('No checksums received. Skipping verification')
return
# We avoid reading the file into memory at once, and we only
# iterate through the file contents once, even if multiple checkums
# received
def read_chunks(file):
while True:
chunk = file.read(2048)
if not chunk:
break
yield chunk
logger.debug('Opening %s to find its checksums', path)
with open(path, 'rb') as file:
for chunk in read_chunks(file):
for checksum in checksums:
checksum['calculated'].update(chunk)
logger.debug('Calculated checksums %s', checksums)
non_matching_checksums = [
checksum for checksum in checksums
if checksum['expected'] != checksum['calculated'].hexdigest()
]
if non_matching_checksums:
logger.debug(
'Found non matching checksums %s',
non_matching_checksums,
)
raise InvalidChecksumError(
'Found non matching checksums: {}'.format(
non_matching_checksums,
),
)
def zip_bundle(self, zip_path, download_path, meta_path):
""" Zip bundle of file and meta to given file
:param str zip_path: target zip file
:param str download_path: original file
:param str meta_path: meta file
"""
contents = (
(
download_path,
os.path.join(
self.archive_base_path,
os.path.basename(self.remote_file.name),
),
),
(
meta_path,
os.path.join(
self.archive_base_path,
'{}.metadata'.format(os.path.basename(
self.remote_file.name,
)),
),
),
)
with zipfile.ZipFile(
zip_path, 'a', compression=zipfile.ZIP_DEFLATED,
) as f:
for src, dst in contents:
f.write(src, dst)
def run(self, zip_path):
""" Prepare and append files to zip bundle
:param str zip_path: which archive to append data to
"""
download_path = get_tmp_file()
meta_path = get_tmp_file()
try:
self.metadata.generate(meta_path)
self.download(download_path)
# TODO Remove to re-enable checksum and fsize validation
# self.verify_file_size(download_path)
# self.verify_checksums(download_path)
self.zip_bundle(zip_path, download_path, meta_path)
finally:
for path in (download_path, meta_path):
if os.path.exists(path):
os.unlink(path)
class BaseMetadataCreateTask(BaseTask):
"""
Creates a package ready for ingest in preservica
by uploading to the appropriate S3 bucket
"""
UPLOAD_OVERRIDE = False
def __init__(
self, message, file_tasks, destination_bucket, message_id, role, object_id,
):
"""
:param dict message: source message
:param file_tasks: files to include in bundle wrapped in tasks
:type file_tasks: list of FileTask
:param boto3.S3.Bucket: destination_bucket
:param str message_id: message header id
:param str role: tag role
"""
self.message = message
self.file_tasks = file_tasks
self.destination_bucket = destination_bucket
self.message_id = message_id
self.object_id = object_id
self.role = role
@classmethod
def build(cls, message, config):
"""
:param dict message: raw message
:param config: job environment config
:type config: preservicaservice.config.Config
:return:
"""
organisation_id = require_organisation_id(message)
role = require_organisation_role(message)
upload_url = config.organisation_buckets.get(organisation_id)
if upload_url:
session = boto3.Session()
s3 = session.resource('s3')
destination_bucket = s3.Bucket(upload_url.host)
else:
if config.environment != 'prod':
bucket_jisc_id = 'jisc'
else:
bucket_jisc_id = organisation_id
bucket_builder = PreservicaS3BucketBuilder(
config.preservica_base_url,
config.environment,
config.adaptor_aws_region,
)
destination_bucket = bucket_builder.get_bucket(bucket_jisc_id)
if not destination_bucket:
logger.warning(
'No Preservica S3 bucket available for %s', bucket_jisc_id,
)
return None
message_id = require_non_empty_key(
message, 'messageHeader', 'messageId',
).strip()
try:
objects = message['messageBody']['objectFile']
except KeyError:
raise MalformedBodyError('missing objectFile')
message_id = env_prefix_message_key(
message, 'messageHeader', 'messageId', config.environment,
)
object_id = env_prefix_message_key(
message, 'messageBody', 'objectUuid', config.environment,
)
if not isinstance(objects, list):
raise MalformedBodyError('expected objectFile as list')
file_tasks = []
for obj in objects:
file_tasks.append(cls.build_file_task(obj, message_id, object_id))
return cls(
message,
file_tasks,
destination_bucket,
message_id,
role,
object_id,
)
@classmethod
def build_file_task(cls, object_file, message_id, object_id):
try:
url = object_file['fileStorageLocation']
file_name = object_file['fileName']
# This is missing in prod samvera and figshare messages, defaulting
# to 2 as they'll both have http storage locations.
storage_platform = object_file.get('fileStoragePlatform', {})
storage_type = storage_platform.get('storagePlatformType', 2)
file_checksum = object_file['fileChecksum']
except (TypeError, KeyError) as exception:
raise MalformedBodyError(
'Unable to parse file: {}'.format(str(exception)),
)
try:
storage_types = {
1: S3RemoteUrl,
2: HTTPRemoteUrl,
}
remote_file_class = storage_types[storage_type]
except KeyError:
raise MalformedBodyError(
'Unsupported storagePlatformType ({})'.format(storage_type),
)
try:
remote_file = remote_file_class.parse(url, file_name)
except ValueError:
raise MalformedBodyError('invalid value in fileStorageLocation')
return FileTask(
remote_file,
FileMetadata(**object_file),
message_id,
object_id,
file_checksum,
)
def run(self):
zip_path = get_tmp_file()
try:
# message level meta
self.bundle_meta(zip_path)
# per file data
for task in self.file_tasks:
task.run(zip_path)
# target s3 upload
self.upload_bundle(
self.destination_bucket,
zip_path,
self.collect_meta(zip_path),
self.UPLOAD_OVERRIDE,
)
finally:
if os.path.exists(zip_path):
os.unlink(zip_path)
def bundle_meta(self, zip_path):
""" Generate root metadata file for given message
:param str zip_path: target zip file
"""
with tempfile.NamedTemporaryFile() as tmp_file:
meta_path = tmp_file.name
write_message_meta(meta_path, self.message)
with zipfile.ZipFile(
zip_path, 'a', compression=zipfile.ZIP_DEFLATED,
) as f:
f.write(
meta_path,
'{0}/{0}.metadata'.format(self.object_id),
)
def _generate_md5_checksum(self, file_path, buf_size=4096):
""" Generates a MD5 checksum for inclusion in the upload to s3.
Uses an iterator over file contents for consistent memory usage.
"""
md5_checksum = hashlib.md5()
with open(file_path, 'rb') as f_in:
for file_chunk in iter(lambda: f_in.read(buf_size), b''):
md5_checksum.update(file_chunk)
return base64.b64encode(md5_checksum.digest()).decode('utf-8')
def upload_bundle(self, destination_bucket, zip_path, metadata, override):
""" Upload given zip to target
:param destination_bucket: target s3 bucket
:type destination_bucket: boto3.S3.Bucket
:param str zip_path: source file
:param dict metadata: metadata to set on s3 object
:param bool override: don't fail if file exists
:return:
"""
md5_checksum = self._generate_md5_checksum(zip_path)
metadata['md5chksum'] = md5_checksum
if not override:
if list(destination_bucket.objects.filter(Prefix=self.bundle_name)):
# TODO: clarify exception
raise ResourceAlreadyExistsError('object already exists is s3')
with open(zip_path, 'rb') as data:
destination_bucket.put_object(
Body=data,
Key=self.bundle_name,
ContentMD5=md5_checksum,
Metadata=metadata,
)
@property
def bundle_name(self):
return self.message_id
def collect_meta(self, zip_file_path):
""" S3 object metadata
:param zip_file_path:
:rtype: dict of (str, str)
"""
size_uncompressed = 0
with zipfile.ZipFile(zip_file_path) as f:
for info in f.infolist():
size_uncompressed += info.file_size
# make sure all values are strings
return {
'key': self.message_id,
'bucket': self.destination_bucket.name,
'status': 'ready',
'name': '{}.zip'.format(self.bundle_name),
'size': str(os.stat(zip_file_path).st_size),
'size_uncompressed': str(size_uncompressed),
'createddate': datetime.datetime.now().isoformat(),
'createdby': self.role,
}
class MetadataCreateTask(BaseMetadataCreateTask):
TYPE = 'MetadataCreate'
SUPPORTED_TASKS = (
MetadataCreateTask,
)
```
#### File: rdss-preservica-adaptor/tests/test_meta.py
```python
import pytest
from preservicaservice import meta
@pytest.mark.parametrize(
'data, contents', [
(
(('foo', 'bar',),),
(
'<oai_dc:dc xmlns:dc="http://purl.org/dc/elements/1.1/"'
' xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"'
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
' xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/'
' http://www.openarchives.org/OAI/2.0/oai_dc.xsd">\n'
' <dc:foo>bar</dc:foo>\n'
'</oai_dc:dc>\n'
),
),
],
)
def test_write_object_meta(temp_file, data, contents):
meta.write_object_meta(temp_file, data)
with open(temp_file) as f:
assert f.read() == contents
@pytest.mark.parametrize(
'data, contents', [
(
{'foo': {'bar': 'baz'}},
(
'<?xml version="1.0" encoding="UTF-8" ?>'
'<root xmlns="http://jisc.ac.uk/#rdss/schema">'
'<foo type="dict">'
'<bar type="str">baz</bar>'
'</foo>'
'</root>'
),
),
],
)
def test_write_message_meta(temp_file, data, contents):
meta.write_message_meta(temp_file, data)
with open(temp_file) as f:
assert f.read() == contents
```
#### File: rdss-preservica-adaptor/tests/test_put_stream.py
```python
import boto3
import moto
import pytest
from preservicaservice import put_stream
from preservicaservice.errors import (
MaxMessageSendTriesError,
ResourceNotFoundError
)
def test_exponential_generator():
expected = [200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200, 102400]
for i, actual in enumerate(
put_stream.exponential_generator('x', ValueError, multiplier=100),
):
assert actual == expected.pop(0)
if not expected:
break
def test_exponential_generator_raises():
with pytest.raises(ValueError, match='x'):
list(put_stream.exponential_generator('x', ValueError))
@moto.mock_kinesis
def test_put():
client = boto3.client('kinesis', 'us-west-1')
client.create_stream(StreamName='in', ShardCount=1)
put_stream.PutStream('in', 'us-west-1').put('{abc}')
desc = client.describe_stream(StreamName='in')
shard_id = desc['StreamDescription']['Shards'][0]['ShardId']
shard_iterator = client.get_shard_iterator(
StreamName='in',
ShardId=shard_id,
ShardIteratorType='TRIM_HORIZON',
)
resp = client.get_records(
ShardIterator=shard_iterator['ShardIterator'],
Limit=10,
)
records = resp['Records']
actual = list(map(lambda x: x['Data'], records))
assert actual == [b'{abc}']
@moto.mock_kinesis
def test_init_missing_stream():
with pytest.raises(ResourceNotFoundError):
put_stream.PutStream('in', 'us-west-1', number_of_tries=1)
@moto.mock_kinesis
def test_put_or_fail_missing_stream():
client = boto3.client('kinesis', 'us-west-1')
client.create_stream(StreamName='in', ShardCount=1)
stream = put_stream.PutStream('in', 'us-west-1', number_of_tries=1)
client.delete_stream(StreamName='in')
with pytest.raises(
MaxMessageSendTriesError,
match='gave up writing data to stream in after 1 tries',
):
stream.put_or_fail('{abc}')
@moto.mock_kinesis
def test_put_missing_stream():
client = boto3.client('kinesis', 'us-west-1')
client.create_stream(StreamName='in', ShardCount=1)
stream = put_stream.PutStream('in', 'us-west-1', number_of_tries=1)
client.delete_stream(StreamName='in')
stream.put('{abc}')
``` |
{
"source": "JiscSD/rdss-pure-adaptor",
"score": 2
} |
#### File: adaptor/messages/message_header.py
```python
import re
import uuid
import datetime
import subprocess
RDSS_ERROR_CODES = {
'GENERR001': 'The Message Body is not in the expected format, for example'
'mandatory fields are missing.',
'GENERR002': 'The provided messageType is not supported.',
'GENERR003': 'The expiration date of the Message had passed at the point'
'at which delivery was attempted.',
'GENERR004': 'Invalid, missing or corrupt headers were detected on the'
'Message.',
'GENERR005': 'Maximum number of connection retries exceeded when'
'attempting to send the Message.',
'GENERR006': 'An error occurred interacting with the underlying system.',
'GENERR007': 'Malformed JSON was detected in the Message Body.',
'GENERR008': 'An attempt to roll back a transaction failed.',
'GENERR009': 'An unexpected or unknown error occurred.',
'GENERR010': 'Received an invalid / malformed UUID.',
'APPERRMET001': 'Received a Metadata UPDATE with a datasetUuid that does'
'not exist.',
'APPERRMET002': 'Received a Metadata DELETE with a datasetUuid that does'
'not exist.',
'APPERRMET003': 'Received a Metadata READ with a datasetUuid that does'
'not exist.',
'APPERRVOC002': 'Received a Vocabulary READ with a vocabularyId that does'
'not exist.',
}
class RDSSMessageHeader(object):
MESSAGE_CLASSES = (
'Command',
'Event',
'Document'
)
MESSAGE_TYPES = (
'VocabularyRead',
'VocabularyPatch',
'MetadataRead',
'MetadataCreate',
'MetadataUpdate',
'MetadataDelete'
)
MESSAGE_API_VERSION = '1.2.1'
UUID_REGEX = re.compile(r'''
^[0-9a-f]{8}
-
[0-9a-f]{4}
-
[1-5][0-9a-f]{3}
-
[89ab][0-9a-f]{3}
-
[0-9a-f]{12}
$
''', re.VERBOSE | re.IGNORECASE)
def __init__(self, instance_id):
self._machine_id = instance_id
self._machine_address = self._get_machine_ip()
def _get_machine_ip(self):
return subprocess.check_output(
['sh', '-c', "/sbin/ip route|awk '/default/ { print $3 }'"]
).decode('utf-8').strip()
def _message_id(self):
return str(uuid.uuid4())
def _correlation_id(self, correlation_id):
if not self.UUID_REGEX.match(correlation_id):
error_str = '{} is not a valid UUID.'.format(correlation_id)
raise ValueError(error_str)
return correlation_id
def _message_class(self, message_class):
if message_class not in self.MESSAGE_CLASSES:
error_str = '{} is not among supported message classes:{}'.format(
message_class, self.MESSAGE_CLASSES)
raise ValueError(error_str)
return message_class
def _message_type(self, message_type):
if message_type not in self.MESSAGE_TYPES:
error_str = '{} is not among supported message types:{}'.format(
message_type, self.MESSAGE_TYPES)
raise ValueError(error_str)
return message_type
def _message_timings(self, now, expiration=None):
timings = {
'publishedTimestamp': now.isoformat(),
}
if expiration:
timings['expirationTimestamp'] = expiration
return timings
def _message_sequence(self, sequence_identifier, position, total):
return {
'sequence': sequence_identifier,
'position': position,
'total': total
}
def _message_history(self, now):
return [{
'machineId': self._machine_id,
'machineAddress': self._machine_address,
'timestamp': now.isoformat()
}]
def _error_code(self, error_code):
if error_code not in RDSS_ERROR_CODES.keys():
error_str = '{} is not a valid RDSS error code.'.format(
error_code)
raise ValueError(error_str)
return error_code
def generate(self,
message_class,
message_type,
correlation_id=None,
return_address=None,
message_sequence=None,
error_code=None,
error_description=None):
now = datetime.datetime.now(datetime.timezone.utc)
fields = {
'messageId': self._message_id(),
'messageClass': self._message_class(message_class),
'messageType': self._message_type(message_type),
'messageTimings': self._message_timings(now),
'messageHistory': self._message_history(now),
'version': self.MESSAGE_API_VERSION,
}
if correlation_id:
fields['correlationId'] = self._correlation_id(correlation_id)
if return_address:
fields['returnAddress'] = return_address
if message_sequence:
fields['messageSequence'] = self._message_sequence(
*message_sequence)
if error_code:
fields['errorCode'] = self._error_code(error_code)
if error_description:
fields['errorDescription'] = error_description
return fields
```
#### File: adaptor/messages/message.py
```python
import json
import logging
from .validator import RDSSMessageValidator
from .message_header import RDSSMessageHeader
logger = logging.getLogger(__name__)
message_validator = RDSSMessageValidator()
class BaseRDSSMessageCreator:
message_class = ''
message_type = ''
def __init__(self, instance_id):
self._header = RDSSMessageHeader(instance_id)
def generate(self, message_body):
message_header = self._header.generate(
self.message_class,
self.message_type,
)
logger.info('Generating message %s', message_header['messageId'])
return RDSSMessage(message_header, message_body)
class MetadataCreate(BaseRDSSMessageCreator):
message_class = 'Event'
message_type = 'MetadataCreate'
class MetadataUpdate(BaseRDSSMessageCreator):
message_class = 'Event'
message_type = 'MetadataUpdate'
class RDSSMessage:
def __init__(self, message_header, message_body):
self._message = {
'messageHeader': message_header,
'messageBody': message_body
}
self.validation_errors = []
self.validate_body()
def _set_error(self, error_code, error_description):
logger.info('Setting the following error on message: %s - %s',
error_code, error_description)
self._message['messageHeader']['errorCode'] = error_code
self._message['messageHeader']['errorDescription'] = error_description
def validate_body(self):
body_errors = message_validator.message_body_errors(
self._message['messageBody']
)
if body_errors:
self._set_error('GENERR001', ' | '.join(body_errors))
self.validation_errors.extend(body_errors)
@property
def is_valid(self):
return not self.validation_errors
@property
def as_json(self):
return json.dumps(self._message)
```
#### File: adaptor/tests/test_state_store.py
```python
import boto3
import moto
import pytest
import dateutil
from collections import namedtuple
from ..state_storage import AdaptorStateStore, DatasetState
@pytest.fixture
def modified_date():
return dateutil.parser.parse('2016-07-05T15:53:57.883+0000')
@pytest.fixture
def mock_dataset(modified_date):
Dataset = namedtuple('Dataset',
['uuid',
'modified_date',
'query_dataset_json',
'local_file_checksums'
]
)
return Dataset(
'a_mock_uuid',
modified_date,
lambda x: 'A dataset title',
{'a_file_name.zip', 'a_file_checksum'}
)
@pytest.fixture
def dataset_state(mock_dataset):
return DatasetState.create_from_dataset(mock_dataset)
class TestStateStore:
def setup(self):
self.mock = moto.mock_dynamodb2()
self.mock.start()
self.table_name = 'state_storage_test'
self.ddb = boto3.resource('dynamodb')
self.ddb.create_table(
TableName=self.table_name,
KeySchema=[
{
'AttributeName': 'uuid',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'uuid',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
def test_adaptor_state_store(self, dataset_state):
state_store = AdaptorStateStore(self.table_name)
state_store.put_dataset_state(dataset_state)
store_dataset_state = state_store.get_dataset_state(dataset_state.uuid)
assert dataset_state.json == store_dataset_state.json
assert dataset_state == store_dataset_state
def test_adaptor_state_store_latest(self, dataset_state, modified_date):
state_store = AdaptorStateStore(self.table_name)
state_store.update_latest_modified(dataset_state)
latest_datetime = state_store.latest_modified_datetime()
assert latest_datetime == modified_date
def teardown(self):
self.mock.stop()
```
#### File: rdss-pure-adaptor/pure_adaptor/pure_adaptor.py
```python
import logging
import os
import sys
from processor import PureAdaptor
logger = logging.getLogger(__name__)
log_formatter = logging.Formatter('%(asctime)s %(name)s:'
' [%(levelname)s] %(message)s')
std_out_handler = logging.StreamHandler(sys.stdout)
std_out_handler.setFormatter(log_formatter)
std_out_handler.setLevel(logging.INFO)
logger.addHandler(std_out_handler)
def all_env_vars_exist(var_names):
""" Ensure all environment variables exist and return them.
"""
env_vars = {name: os.environ.get(name) for name in var_names}
if not all(env_vars.values()):
missing = (name for name, exists in env_vars.items() if not exists)
logger.error('The following env variables have not been set: %s',
', '.join(missing))
sys.exit(2)
return env_vars
def main():
required_env_variables = (
'PURE_API_VERSION',
'PURE_API_URL',
'PURE_API_KEY',
'INSTANCE_ID',
'RDSS_INTERNAL_INPUT_STREAM',
'RDSS_MESSAGE_INVALID_STREAM',
'RDSS_MESSAGE_ERROR_STREAM',
)
env_vars = all_env_vars_exist(required_env_variables)
try:
adaptor = PureAdaptor(
api_version=env_vars['PURE_API_VERSION'],
api_url=env_vars['PURE_API_URL'],
api_key=env_vars['PURE_API_KEY'],
instance_id=env_vars['INSTANCE_ID'],
input_stream=env_vars['RDSS_INTERNAL_INPUT_STREAM'],
invalid_stream=env_vars['RDSS_MESSAGE_INVALID_STREAM'],
error_stream=env_vars['RDSS_MESSAGE_ERROR_STREAM'],
)
except Exception:
logging.exception('Cannot run the Pure Adaptor.')
sys.exit(1)
adaptor.run()
if __name__ == '__main__':
main()
```
#### File: pure/base/api.py
```python
import abc
class BasePureAPI(abc.ABC):
""" An Abstract Base Class for interactions with PURE APIs """
@abc.abstractmethod
def changed_datasets(self, since_datetime=None):
pass
@abc.abstractmethod
def list_all_datasets(self):
pass
@abc.abstractmethod
def get_dataset(self, uuid):
pass
@abc.abstractmethod
def download_file(self, url, dest):
pass
```
#### File: pure/base/download_manager.py
```python
import abc
class BasePureDownloadManager(abc.ABC):
@abc.abstractproperty
def temp_dir(self):
pass
@abc.abstractmethod
def download_file(self, url, file_name):
pass
```
#### File: pure/base/models.py
```python
import abc
class BasePureDataset(abc.ABC):
@abc.abstractproperty
def doi_upload_key(self):
""" The DOI for this dataset if available, or another key in the
format "no_doi/<header_identifier>"
:returns: string
"""
pass
@abc.abstractproperty
def original_metadata(self):
""" The original metadata that this class is instantiated from
for upload as original_pure_metadata.json.
:returns: string
"""
pass
@abc.abstractproperty
def rdss_canonical_metadata(self):
""" The metadata for this dataset mapped to the schema from the
canonical data model.
:returns: string
"""
pass
@abc.abstractproperty
def files(self):
""" A list of urls and file names for all files in this dataset.
:returns: [(string,string),]
"""
pass
@abc.abstractproperty
def modified_date(self):
""" The last updated date for the dataset as a python datetime object.
: returns: datetime.datetime
"""
pass
```
#### File: v59/tests/test_models.py
```python
import datetime
from ..models import PureDataset, ws_url_remap
class TestPureDataset():
def setup(self):
self.now = datetime.datetime.now(datetime.timezone.utc)
self.uuid = 'a_test_uuid'
self.doi = 'a/test/doi'
self.url_name_pairs = [
('https://pure_endpoint_url.ac.uk/ws/files/an_id/'
'test_dataset_file_one.txt',
'test_dataset_file_one.txt'),
('https://pure_endpoint_url.ac.uk/ws/files/an_id/'
'test_dataset_file_two.txt',
'test_dataset_file_two.txt')]
self.mock_dataset = {
'uuid': self.uuid,
'doi': self.doi,
'info': {
'modifiedDate': self.now.isoformat()
},
'documents': [{'title': n, 'url': u}
for u, n in self.url_name_pairs]
}
self.pure_dataset = PureDataset(self.mock_dataset)
def test_uuid(self):
assert self.pure_dataset.uuid == self.uuid
def test_modified_date(self):
assert self.pure_dataset.modified_date == self.now
def test_files(self):
assert self.pure_dataset.files == [
(ws_url_remap(u), n) for u, n in self.url_name_pairs]
def test_original_metadata(self):
assert self.pure_dataset.original_metadata == self.mock_dataset
def test_doi_key(self):
assert self.pure_dataset.doi_upload_key == self.doi
def test_no_doi_key(self):
no_doi_ds = self.mock_dataset
no_doi_ds['doi'] = ''
no_doi_pds = PureDataset(no_doi_ds)
assert no_doi_pds.doi_upload_key == 'no_doi/{}'.format(self.uuid)
def teardown(self):
pass
def test_ws_url_remap():
url = 'https://pure_endpoint_url.ac.uk/ws/files/an_id/test_file.pdf'
url_map = 'http://pure_endpoint_url.ac.uk/portal/files/an_id/test_file.pdf'
assert ws_url_remap(url) == url_map
``` |
{
"source": "JiscSD/rdss-shared-libraries",
"score": 3
} |
#### File: rdsslib/kinesis/client.py
```python
import json
import logging
from .errors import MaxRetriesExceededException, DecoratorApplyException
MAX_ATTEMPTS = 6
class KinesisClient(object):
def __init__(self, writer, reader):
"""
Writes and reads messages to and from Kinesis streams
:param writer: handles writing of payloads to Kinesis stream
:param reader: handles reading of payloads from Kinesis stream
:type writer: writer.StreamWriter
:type reader: reader.StreamReader
"""
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
self.writer = writer
self.reader = reader
def write_message(self, stream_names, payload, max_attempts=MAX_ATTEMPTS):
"""Write a payload into each stream in stream_names
:param stream_names: Kinesis streams to write to
:param payload: JSON payload
:param max_attempts: maximum number of times to attempt writing
:type stream_names: list of str
:type payload: str
"""
for stream_name in stream_names:
self.writer.put_stream(stream_name, payload, max_attempts)
def read_messages(self, stream_name, seq_number=None):
"""Continuous loop that reads messages from stream_name
:param stream_name: Name of Kinesis stream to read from
:param seq_number: Optional seq number
:type stream_name: str
:return message_gen: Yields messages read from Kinesis stream
:rtype message_gen: generator
"""
message_gen = self.reader.read_stream(
stream_name, seq_number=seq_number)
return message_gen
class EnhancedKinesisClient(KinesisClient):
def __init__(self, writer, reader, error_handler, decorators=None):
"""
Writes and reads messages to and from Kinesis streams with
error handling and message decoration
:param writer: Writes messages to Kinesis stream
:param reader: Reads messages from Kinesis stream
:param error_handler: Handles messages with errors
:param decorators: Enhance messages with extra fields
:type writer: writer.StreamWriter
:type reader: reader.StreamReader
:type error_handler: handlers.MessageErrorHandler
:type decorators: list
"""
super().__init__(writer, reader)
if decorators:
self.decorators = decorators
else:
self.decorators = []
self.error_handler = error_handler
def _apply_decorators(self, payload):
"""
Applies a sequence of decorators that
enhance and modify the contents of a payload
:param payload: Undecorated JSON payload
:type payload: str
:return payload: Decorated JSON payload
:rtype payload: str
"""
decorated_payload = payload
for decorator in self.decorators:
try:
decorated_payload = decorator.process(payload)
except Exception:
self.logger.warning(
'Failed to apply decorator {}'.format(decorator.name))
raise DecoratorApplyException()
return decorated_payload
def write_message(self, stream_names, payload, max_attempts=MAX_ATTEMPTS):
"""Write a payload into each stream in stream_names
:param stream_names: Kinesis streams to write to
:param payload: JSON payload
:param max_attempts: Max number of times to attempt writing
:type stream_names: list of str
:type payload: str
:type max_attempts: int
"""
try:
json.loads(payload)
except json.decoder.JSONDecodeError:
self.error_handler.handle_invalid_json(payload)
return
decorated_payload = self._apply_decorators(payload)
for stream_name in stream_names:
try:
super().write_message([stream_name],
decorated_payload,
max_attempts)
except MaxRetriesExceededException as e:
stream_name = e.args[0]
error_code = 'GENERR005'
error_description = 'Maximum retry attempts {0} exceed'\
'for stream {1}'.format(max_attempts,
stream_name)
self.error_handler.handle_error(decorated_payload,
error_code,
error_description)
def handle_error(self, payload, error_code, error_description):
""" Allows errors to be posted to the stream occurring from
activities like payload validation
:param payload: JSON payload
:param error_code: Error Code
:param error_description: Description Of Error
"""
self.error_handler.handle_error(payload, error_code, error_description)
```
#### File: rdsslib/kinesis/factory.py
```python
import boto3
from .client import KinesisClient, EnhancedKinesisClient
from .decorators import RouterHistoryDecorator
from .handlers import MessageErrorHandler
from .reader import StreamReader
from .writer import StreamWriter
def kinesis_client_factory(
client_type,
invalid_stream_name='invalid_stream',
error_stream_name='error_stream',
read_interval=0.2):
""" Create customised instances of KinesisClient or its subclasses
:param client_type: Specifies the type of client that the factory
should construct
:return: An instance of Kinesis client
:rtype: client.KinesisClient or client.EnhancedKinesisClient
"""
boto_client = boto3.client('kinesis')
writer = StreamWriter(client=boto_client)
reader = StreamReader(client=boto_client, read_interval=read_interval)
if client_type == 'basic':
return KinesisClient(writer=writer,
reader=reader)
elif client_type == 'enhanced':
decorators = [RouterHistoryDecorator()]
handler = MessageErrorHandler(invalid_stream_name=invalid_stream_name,
error_stream_name=error_stream_name,
writer=writer)
return EnhancedKinesisClient(writer=writer,
reader=reader,
error_handler=handler,
decorators=decorators)
```
#### File: tests/kinesis/test_handlers.py
```python
import json
import pytest
from rdsslib.kinesis.client import EnhancedKinesisClient
from rdsslib.kinesis import handlers
from .kinesis_helpers import MockStreamWriter
class TestMessageErrorHandler(object):
def setup(self):
self.mock_writer = MockStreamWriter()
self.handler = handlers.MessageErrorHandler(
invalid_stream_name='invalid_stream',
error_stream_name='error_stream',
writer=self.mock_writer
)
@pytest.fixture
def serialised_payload(self):
return json.dumps({
'messageHeader': {
'id': '90cbdf86-6892-4bf9-845f-dbd61eb80065'
},
'messageBody': {
'some': 'message'
}
})
def test_invalid_json_handling(self, serialised_payload):
self.handler.handle_invalid_json(serialised_payload)
payload = self.mock_writer.streams['invalid_stream'][0]
assert json.loads(payload)['messageBody'] == {'some': 'message'}
def test_error_handling_with_valid_json(self, serialised_payload):
self.handler.handle_error(
serialised_payload, 'ERROR', 'Error occurred')
payload = self.mock_writer.streams['error_stream'][0]
assert json.loads(payload)['messageBody'] == {'some': 'message'}
def test_error_handling_from_client_with_valid_json(self,
serialised_payload):
mock_client = EnhancedKinesisClient(None,
None,
self.handler,
None)
error = 'Generated Test Message to Error'
mock_client.handle_error(payload=serialised_payload,
error_code='TESTERR001',
error_description=error)
payload = self.mock_writer.streams['error_stream'][0]
assert json.loads(payload)['messageBody'] == {'some': 'message'}
```
#### File: tests/kinesis/test_reader.py
```python
import boto3
import json
from .kinesis_helpers import KinesisMixin
from moto import (
mock_kinesis,
)
import pytest
from rdsslib.kinesis import reader
from unittest.mock import (
patch,
)
class TestStreamReader(KinesisMixin):
"""Test Kinesis Stream Reader."""
@pytest.fixture
def client(self):
return boto3.client('kinesis')
def test_read_stream_returns_message(self, serialised_payload, client):
s_reader = reader.StreamReader(client=client, read_interval=0.2)
s_reader.client.create_stream(StreamName='test_stream', ShardCount=1)
s_reader.client.put_record(StreamName='test_stream',
Data=serialised_payload,
PartitionKey='testkey')
record_gen = s_reader.read_stream('test_stream')
msg = next(record_gen)
decoded = json.loads(msg['Data'].decode('utf-8'))
assert decoded['messageBody'] == {'some': 'message'}
@mock_kinesis
def test_read_interval_respected(self):
stream_name = 'test-stream'
kinesis_client = boto3.client('kinesis', region_name='us-east-1')
kinesis_client.create_stream(StreamName=stream_name, ShardCount=1)
kinesis_client.put_record(
StreamName=stream_name,
PartitionKey=stream_name,
Data=json.dumps({'some': 'data'})
)
s_reader = reader.StreamReader(client=kinesis_client, read_interval=20)
records = s_reader.read_stream(stream_name)
seconds_slept = None
# The production code is in an infinite loop of sleeps, so we patch
# sleep to throw an exception to get out of it in the test
def mock_sleep(seconds):
nonlocal seconds_slept
seconds_slept = seconds
raise Exception()
next(records)
with patch('time.sleep', side_effect=mock_sleep):
try:
next(records)
except Exception:
pass
assert seconds_slept == 20
``` |
{
"source": "jiseongg/ParaDySE",
"score": 3
} |
#### File: ParaDySE/bin/calc_dominator.py
```python
import argparse
import sys
import pdb
import logging
TARGET_F_NAME = 'HM_TARGET'
class FunctionInfo:
def __init__(self, fid, fname, entry_node):
self.fid = fid # Function ID which is a number increasing from 1.
self.fname = fname
self.entry_node = entry_node # ID of the entry node in CFG.
self.branches = {}
self.covered_branches = set([])
def set_branches(self, branches):
self.branches = branches
class ProgramInfo:
def __init__(self):
self.flist = []
self.branches = {}
self.bpair = {}
self.btofid = {}
def build(self, path_to_branches, path_to_cfg_func_map):
# Fill in basic function information such as fid, fname, entry_node.
fid = 1
with open(path_to_cfg_func_map) as fin:
for line in fin:
cols = line.strip().split()
fname = cols[0]
entry_node = int(cols[1])
finfo = FunctionInfo(fid, fname, entry_node)
self.flist.append(finfo)
fid += 1
# Fill in branch information.
with open(path_to_branches) as fin:
while 1:
line = fin.readline()
if not line:
break
cols = line.strip().split()
fid = int(cols[0])
numbranch = int(cols[1])
fun_branches = {}
while numbranch > 0:
line = fin.readline()
cols = line.strip().split()
b1 = int(cols[0])
b2 = int(cols[1])
assert b1 not in self.branches
assert b2 not in self.branches
self.branches[b1] = True
self.branches[b2] = True
fun_branches[b1] = True
fun_branches[b2] = True
self.btofid[b1] = fid
self.btofid[b2] = fid
assert b1 not in self.bpair
assert b2 not in self.bpair
self.bpair[b1] = b2
self.bpair[b2] = b1
numbranch -= 1
self.flist[fid-1].set_branches(fun_branches)
def print_info(self):
for finfo in self.flist:
print finfo.fid, finfo.fname, finfo.entry_node, len(finfo.branches)
def get_entry_nodes(self):
# Return a list of entry_node of functions.
entry_nodes = []
for finfo in self.flist:
entry_nodes.append(finfo.entry_node)
return entry_nodes
def get_fname(self, entry_node):
for finfo in self.flist:
if finfo.entry_node == entry_node:
return finfo.fname
def get_num_branches_f(self, entry_node):
for finfo in self.flist:
if finfo.entry_node == entry_node:
return len(finfo.branches)
def compact_cfg(G, RG, branches):
# Edge based compaction.
# If a node has one outgoing edge and the successor node has one incoming edge
# then we can merge the two nodes into one.
logging.info('Number of nodes before compaction:%d', len(G))
CHANGED = True
while CHANGED:
CHANGED = False
for (node, succs) in G.items():
if len(succs) == 1:
succ = succs[0]
if len(RG[succ]) == 1 and succ not in branches:
G[node] = G[succ]
del G[succ]
CHANGED = True
break
logging.info('Number of nodes after compaction:%d', len(G))
def print_graph_info(G):
print "Number of nodes: %d" %(len(G.keys()),)
def num_func(path_to_cfg, path_to_branches, path_to_cfg_func_map):
# Print the number of functions having branches.
pinfo = ProgramInfo()
pinfo.build(path_to_branches, path_to_cfg_func_map)
print len([function for function in pinfo.flist if len(function.branches) > 0])
def num_br_func(path_to_cfg, path_to_branches, path_to_cfg_func_map):
# For each function, print the number of branches.
pinfo = ProgramInfo()
pinfo.build(path_to_branches, path_to_cfg_func_map)
for func in sorted(pinfo.flist, key=lambda f: f.fname):
print func.fname, len(func.branches)
def print_branch(path_to_cfg, path_to_branches, path_to_cfg_func_map):
# Print all the branch ids.
pinfo = ProgramInfo()
pinfo.build(path_to_branches, path_to_cfg_func_map)
for bid in sorted(pinfo.branches.keys()):
print bid,
def cov_br_func(path_to_cfg, path_to_branches, path_to_cfg_func_map, path_to_covered_branches):
# For each function, print the number of branches.
pinfo = ProgramInfo()
pinfo.build(path_to_branches, path_to_cfg_func_map)
fin = open(path_to_covered_branches)
for br in fin:
# get funid of br
funid = pinfo.btofid[int(br.strip())]
pinfo.flist[funid-1].covered_branches.add(br)
for func in sorted(pinfo.flist, key=lambda f: f.fname):
print func.fname, "%d/%d" %(len(func.covered_branches), len(func.branches))
def calc_dominator(path_to_cfg, path_to_branches, path_to_cfg_func_map):
# Calculate dominator and write dominator and dominator tree.
pinfo = ProgramInfo()
pinfo.build(path_to_branches, path_to_cfg_func_map)
logging.info("Number of branches: %d", len(pinfo.branches))
# pinfo.print_info()
# Graph; key:node, value:list of successors.
G = build_graph(path_to_cfg)
logging.info("Number of nodes in ICFG: %d", len(G.keys()))
# Due to cil transformation, unreachable nodes could be introduced.
# Remove them.
unreachable_nodes = find_unreachable_nodes(G, pinfo.get_entry_nodes())
for node in unreachable_nodes:
del G[node]
logging.info("Number of unreachable nodes in ICFG: %d", len(unreachable_nodes))
# Reversed graph; key:node, value:list of predecessors.
RG = build_reverse_graph(G)
compact_cfg(G, RG, pinfo.branches)
sanity_check_compaction(G, set(pinfo.branches.keys()) - set(unreachable_nodes))
RG = build_reverse_graph(G) # Rebuild reverse graph after compaction.
# Find root nodes of disconnected graphs in G.
root_nodes = find_root_nodes(RG, pinfo.get_entry_nodes())
logging.info("Number of root nodes: %d", len(root_nodes))
for root_node in root_nodes:
logging.info("Root node: %s (%d)", pinfo.get_fname(root_node),
pinfo.get_num_branches_f(root_node))
dominator = {}
# DFS order dominator
for root in sorted(root_nodes):
dfs_nodes = []
dfs_visited = {}
# get reachable nodes from a root
dfs(root, G, dfs_nodes, dfs_visited)
# make a sub graph
sub_g = {}
for node in dfs_nodes:
sub_g[node] = G[node]
sub_r = build_reverse_graph(sub_g)
dom = dominator2(root, sub_g, sub_r, dfs_nodes)
for k in dom.keys():
if k not in dominator:
dominator[k] = dom[k]
else:
dominator[k] = dominator[k] | dom[k]
print_dom(dominator, pinfo.branches)
print_dom_tree(RG, dominator, pinfo.branches)
#make_target_file(RG, pinfo)
def make_target_file(G, pinfo):
#Find entry_node of target function.
for fun in pinfo.flist:
if fun.fname == TARGET_F_NAME:
entry_node = fun.entry_node
target_branches = []
for node in G[entry_node]:
t_branch = first_reachable_branches(G, pinfo, node)
target_branches.append(t_branch)
logging.info('Number of targets in CFG: %d', len(G[entry_node]))
with open('target', 'w') as fout:
for t_branch in target_branches:
for t in t_branch:
fout.write('%d ' %(t,))
fout.write('\n')
fout.close()
def first_reachable_branches(G, pinfo, node):
#Find all first reachable branch in G from node.
# If node itself is a branch then return it.
if node in pinfo.branches:
return [node]
visited = set([node])
q = [node]
branches = []
while q:
node = q.pop(0)
for succ in G[node]:
if succ not in visited:
visited.add(succ)
if succ in pinfo.branches:
branches.append(succ)
else:
q.append(succ)
return branches
def sanity_check_compaction(G, branches):
# Branch nodes must survive after compaction.
for br in branches:
if br not in G:
print "Branch missing after compaction:", br
def print_dom(dom, branches):
# dom includes dominator information for all nodes in ICFG.
# Print dom informatin for branch nodes only.
fout = open('dominator', 'w')
for node in sorted(dom.keys()):
if node not in branches: continue
l = [str(x) for x in dom[node] if x in branches]
fout.write("%s %s\n" %(node, ' '.join(l)) )
fout.close()
def print_dom_tree(RG, dom, branches):
# dom includes dominator information for all nodes in ICFG.
# Print dominator tree information for branch nodes only.
# For each branch, find immediate dominator along a DFS path in RG.
fout = open('dominator_tree', 'w')
for node in sorted(dom.keys()):
if node not in branches: continue # This is not branch node.
if len(dom[node]) == 1: continue # This branch has no dominator.
DFSStack = [node]
DFSVisited = {}
while DFSStack:
current_node = DFSStack.pop(-1)
DFSVisited[current_node] = True
if (current_node != node and current_node in branches
and current_node in dom[node]):
# Found the immediate dominator.
fout.write("%s %s\n" %(node, current_node) )
break
for pred in RG[current_node]:
if pred not in DFSVisited:
DFSStack.append(pred)
fout.close()
# functions for logging
def _print_branches(branches):
for node in sorted(branches.keys()):
print node, branches[node]
def _print_dominator(nodes, domin):
for n in nodes:
print n,":", sorted(domin[n])
def find_root_nodes(r, fids):
# CFG 가 함수 콜 edge 를 포함하고 있을 경우
# 각 함수별로 CFG 가 나누어 지지 않고 큰 하나의 그래프로 여러 함수 CFG가 연결될 수 있다
# 이를 고려해서 전체 그래프에서 루트노드만 찾아낸다
# 각 함수의 entry 노드가 predecessor 가 없을 경우 그 노드는 루트 노드이다
roots = list()
for fid in fids:
if fid in r and len(r[fid]) == 0:
roots.append(fid)
#print 'root node:', fid, fun_id[fid]
return roots
def find_unreachable_nodes(G, entry_nodes):
# Do DFS search and find all visited nodes.
dfs_visited = dict()
for entry_node in entry_nodes:
if entry_node not in dfs_visited:
_dfs(entry_node, G, dfs_visited)
all_nodes = G.keys()
return set(all_nodes) - set(dfs_visited.keys())
def _dfs(node, G, dfs_visited):
assert node not in dfs_visited
dfs_visited[node] = True
for child in G[node]:
if child not in dfs_visited:
_dfs(child, G, dfs_visited)
def dominator2(root, graph, rgraph, dfs_order):
change = True
dom = {}
for node in dfs_order:
if node == root:
dom[node] = set([node])
else:
dom[node] = set(dfs_order)
while change:
change = False
for node in dfs_order:
if node == root: continue
t = set(dfs_order)
for p in rgraph[node]:
t = t & dom[p]
t.add(node)
if len(t ^ dom[node]) > 0:
change = True
dom[node] = t
return dom
def dfs(node, G, dfs_nodes, dfs_visited):
#print "dfs_visited:", node
dfs_visited[node] = True
dfs_nodes.append(node)
for child in G[node]:
if child not in dfs_visited:
dfs(child, G, dfs_nodes, dfs_visited)
def build_graph(path_to_cfg):
fin = open(path_to_cfg)
G = dict()
for line in fin:
cols = line.strip().split()
cols = [int(x) for x in cols]
G[cols[0]] = cols[1:]
fin.close()
sanity_check(G)
return G
def build_reverse_graph(G):
r = dict()
sanity_check(G)
for k in G.keys():
r[k] = list()
for node in G.keys():
for child in G[node]:
r[child].append(node)
sanity_check(r)
return r
def sanity_check(G):
allchildnodes = set()
for k in G.keys():
allchildnodes = allchildnodes | set(G[k])
if len(allchildnodes - set(G.keys())) > 0:
print "SANITY problem: Some nodes are not key of Graph."
print set(G.keys()) ^ allchildnodes
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculate dominators.')
parser.add_argument("--cfg", default="./cfg", help="path to cfg file")
parser.add_argument("--branches", default="./branches", help="path to branch file")
parser.add_argument("--cfg_func_map", default="./cfg_func_map", help="path to cfg_func_map file")
parser.add_argument("--covered_branches", default="./covered_branches", help="path to covered branches file")
parser.add_argument("--log", default='INFO', choices=['INFO', 'DEBUG'], help="logging level")
# Operational mode. "dominator" mode calculates dominators which is the default.
# "numfunc" mode calculates the number of functions having branches.
parser.add_argument("--mode", default='dominator',
choices=[
# Calculate dominator and write dominator and dominator tree.
'dominator',
# Print the number of functions having branches.
'numfunc',
# For each function, print the number of branches.
'num_br_func',
# Print the number of covered branches for each function.
'cov_br_func',
# Print branch id.
'print_branch',
],
help="Operational mode")
args = parser.parse_args()
logger = logging.getLogger('root')
FORMAT = "[%(funcName)-20s] %(message)s"
if args.log == 'INFO':
logging.basicConfig(level=logging.INFO, format=FORMAT)
elif args.log == 'DEBUG':
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
if args.mode == 'dominator':
calc_dominator(args.cfg, args.branches, args.cfg_func_map)
elif args.mode == 'numfunc':
num_func(args.cfg, args.branches, args.cfg_func_map)
elif args.mode == 'num_br_func':
num_br_func(args.cfg, args.branches, args.cfg_func_map)
elif args.mode == 'cov_br_func':
cov_br_func(args.cfg, args.branches, args.cfg_func_map, args.covered_branches)
elif args.mode == 'print_branch':
print_branch(args.cfg, args.branches, args.cfg_func_map)
else:
print 'Please select proper mode.'
```
#### File: ParaDySE/scripts/fullauto.py
```python
from multiprocessing import Process
import signal
import subprocess
import os
import sys
import random
import json
import argparse
import datetime
import shutil
import re
start_time = datetime.datetime.now()
date = start_time.strftime('%m')
day = start_time.strftime('%m%d')
configs = {
'script_path': os.path.abspath(os.getcwd()),
'date': date,
'day': day,
'top_dir': os.path.abspath('../experiments/')
}
def load_pgm_config(config_file):
with open(config_file, 'r') as f:
parsed = json.load(f)
return parsed
def run_all(pgm_config, n_iter, n_groups):
#make final_log folder
for k in range(1, 2):
final_dir = "/".join([configs['top_dir'], str(k)+ pgm_config['pgm_name']+"__all__logs"])
os.makedirs(final_dir)
print final_dir
#make timelog and w_folder
all_dir = "/".join([final_dir, "all_logs"])
os.makedirs(all_dir)
time_dir = "/".join([final_dir, "time_logs"])
os.makedirs(time_dir)
print time_dir
w_dir = "/".join([final_dir, "w_"+ pgm_config['pgm_name']])
os.makedirs(w_dir)
print w_dir
scr_dir = configs['script_path']
os.chdir(scr_dir)
print scr_dir
genw_cmd = " ".join(["python", "subscripts/gen_weights.py", args.n_iter])
print genw_cmd
os.system(genw_cmd)
cpw_final_cmd = " ".join(["cp -r", "1_weights", w_dir])
print cpw_final_cmd
os.system(cpw_final_cmd)
f = open(pgm_config['pgm_name']+"_topcheck_log", 'a')
f.writelines(["0\n"])
f.close()
for i in range(1, 20):
#find - check - refine
find_cmd = " ".join(["python", "subscripts/1find.py", args.pgm_config, args.n_iter, args.n_parallel, str(i)])
print find_cmd
os.system(find_cmd)
check_cmd = " ".join(["python", "subscripts/2check.py", args.pgm_config, str(i)])
print check_cmd
os.system(check_cmd)
refine_cmd = " ".join(["python", "subscripts/3refine.py", args.pgm_config, args.n_iter, str(i)])
print refine_cmd
os.system(refine_cmd)
#clean folder
find_dir = "/".join([configs['top_dir'], configs['date']+"__find"+str(i), pgm_config['pgm_name']])
os.chdir(find_dir)
print find_dir
for j in range(1, n_parallel+1):
rm_cmd = " ".join(["rm -rf", str(j)])
print rm_cmd
os.system(rm_cmd)
#copy_find_logs
findtotal_cmd = "__".join([pgm_config['pgm_name'],"find"+str(i), "total.log"])
cp_findtotal_cmd = " ".join(["cp -r", findtotal_cmd, time_dir])
print cp_findtotal_cmd
os.system(cp_findtotal_cmd)
findlogs_cmd = "__".join([pgm_config['pgm_name'],"find"+str(i), "logs/*.log"])
cp_findlogs_cmd = " ".join(["cp -r", findlogs_cmd, all_dir])
print cp_findlogs_cmd
os.system(cp_findlogs_cmd)
#copy_check_logs
check_dir = "/".join([configs['top_dir'], configs['date']+"__check"+str(i), pgm_config['pgm_name']])
os.chdir(check_dir)
print check_dir
checktotal_cmd = "__".join([pgm_config['pgm_name'],"check"+str(i), "total.log"])
cp_checktotal_cmd = " ".join(["cp -r", checktotal_cmd, time_dir])
print cp_checktotal_cmd
os.system(cp_checktotal_cmd)
checklogs_cmd = "__".join([pgm_config['pgm_name'],"check"+str(i), "logs/*.log"])
cp_checklogs_cmd = " ".join(["cp -r", checklogs_cmd, all_dir])
print cp_checklogs_cmd
os.system(cp_checklogs_cmd)
#check saturation
os.chdir(scr_dir)
print scr_dir
f = open(pgm_config['pgm_name']+"_topcheck_log", 'r')
lines = f.readlines()
if int(float(lines[len(lines)-1]))<=int(float(lines[len(lines)-2])):
print "Saturation !!\n"
f.close()
break
f.close()
#copy_w
cpw2_final_cmd= " ".join(["cp -r", str(i+1)+"_weights", w_dir])
print cpw2_final_cmd
os.system(cpw2_final_cmd)
os.chdir(scr_dir)
#the best w's average coverage
f = open("top2w_"+ pgm_config['pgm_name']+"_log", 'r')
lines = f.readlines()
topw = []
n = len(lines)
topw = lines[len(lines)-2].split()
cp_topw = " ".join(["cp", str(n-1)+"_weights/" + topw[2], pgm_config['pgm_dir']+pgm_config['exec_dir']+"/best.w"])
mv_topw = " ".join(["cp", str(n-1)+"_weights/" + topw[2], final_dir+"/best.w"])
os.system(cp_topw)
os.system(mv_topw)
# average = " ".join(["python", "100.py",args.pgm_config, "20", "10", "1", "ours"])
# os.system(average)
f.close()
#cp information
for i in range(1, 10):
rm_cmd = " ".join(["rm -rf", str(i)+"_weights"])
print rm_cmd
os.system(rm_cmd)
cp_topcheck = " ".join(["mv", pgm_config['pgm_name']+"_topcheck_log", final_dir])
os.system(cp_topcheck)
cp_topw = " ".join(["mv", "top2w_"+ pgm_config['pgm_name']+"_log", final_dir])
os.system(cp_topw)
#rm inform(experiments)
ex_dir = configs['top_dir']
os.chdir(ex_dir)
log_100 = "/".join([configs['day']+"__ours1", pgm_config['pgm_name'], "logs"])
mv_100 = " ".join(["mv", log_100, final_dir])
print mv_100
#os.system(mv_100)
rm_folder = " ".join(["rm -rf", configs['date']+"*"])
os.system(rm_folder)
print "#############################################"
print "Successfully Generate a Search Heuristic!!!!!"
print "#############################################"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("pgm_config")
parser.add_argument("n_iter")
parser.add_argument("n_parallel")
args = parser.parse_args()
pgm_config = load_pgm_config(args.pgm_config)
n_iter = int(args.n_iter)
n_parallel = int(args.n_parallel)
run_all(pgm_config, n_iter, n_parallel)
```
#### File: scripts/subscripts/check_w.py
```python
import os
import sys
import random
import json
import argparse
import datetime
import shutil
import re
import subprocess
from multiprocessing import Process, Value
start_time = datetime.datetime.now()
__run_count = Value('i', 0)
configs = {
'script_path': os.path.abspath(os.getcwd()),
'crest_path': os.path.abspath('../bin/run_crest'),
'n_exec': 4000,
'date': datetime.datetime.now().strftime('%m'),
'top_dir': os.path.abspath('../experiments/')
}
def load_pgm_config(config_file):
with open(config_file, 'r') as f:
parsed = json.load(f)
return parsed
def run_all(pgm_config, n_iter, weights, trial):
top_dir = "/".join([configs['top_dir'], configs['date']+"__check"+str(trial), pgm_config['pgm_name']])
log_dir = top_dir + "/" + "__".join([pgm_config['pgm_name'],"check"+str(trial), "logs"])
os.makedirs(log_dir)
procs = []
for w_idx in weights:
procs.append(Process(target=running_function,
args=(pgm_config, top_dir, log_dir, w_idx, n_iter, trial)))
for p in procs:
p.start()
def running_function(pgm_config, top_dir, log_dir, weight_idx, n_iter, trial):
# Prepare directory copies for each weights
instance_dir = "/".join([top_dir, str(weight_idx)])
dir_cp_cmd = " ".join(["cp -r", pgm_config['pgm_dir'], instance_dir])
os.system(dir_cp_cmd)
os.chdir(instance_dir)
os.chdir(pgm_config['exec_dir'])
os.mkdir("logs")
check_log = open(top_dir + "/" + "./" + "__".join([pgm_config['pgm_name'],"check"+str(trial), "total.log"]), 'a')
for iter in range(1, n_iter+1):
(run_cmd, log) = gen_run_cmd(pgm_config, weight_idx, iter)
os.system(run_cmd)
current_time = datetime.datetime.now()
elapsed_time = str((current_time - start_time).total_seconds())
grep_command = " ".join(["grep", '"It: 4000"', log])
grep_line = (subprocess.Popen(grep_command, stdout=subprocess.PIPE,shell=True).communicate())[0]
with __run_count.get_lock():
__run_count.value = __run_count.value + 1
log_to_write = ", ".join([elapsed_time.ljust(10), str(__run_count.value).ljust(10), grep_line]).strip() + '\n'
if log_to_write != "":
check_log.write(log_to_write)
check_log.flush()
shutil.move(log, log_dir)
def gen_run_cmd(pgm_config, weight_idx, iter):
crest = configs['crest_path']
pgm_name = pgm_config['pgm_name']
exec_cmd = pgm_config['exec_cmd']
n_exec = str(configs['n_exec'])
if (pgm_config['pgm_name']).find('expat-') >= 0:
input = "expat.input"
if (pgm_config['pgm_name'] == 'grep-2.2'):
input = "grep.input"
if pgm_config['pgm_name'] == 'gawk-3.0.3':
input = "gawk.input"
if (pgm_config['pgm_name']).find('sed-') >= 0:
input = "sed.input"
if pgm_config['pgm_name'] == 'vim-5.7':
input = "vim.input"
if pgm_config['pgm_name'] == 'tree-1.6.0':
input = "tree.input"
if pgm_config['pgm_name'] == 'replace':
input = "replace.input"
if pgm_config['pgm_name'] == 'floppy':
input = "floppy.input"
if pgm_config['pgm_name'] == 'cdaudio':
input = "cdaudio.input"
if pgm_config['pgm_name'] == 'kbfiltr':
input = "kbfiltr.input"
log = "logs/" + "__".join([pgm_name+"check"+args.trial, str(weight_idx), "ours", str(iter)]) + ".log"
weight = configs['script_path'] +"/"+ str(trial)+ "_weights/" + str(weight_idx) + ".weight"
run_cmd = " ".join([crest, exec_cmd, input, log, n_exec, "-param", weight])
print run_cmd
return (run_cmd, log)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("pgm_config")
parser.add_argument("n_iter")
parser.add_argument("trial")
parser.add_argument("-l", type=lambda s:[int(item) for item in s.split(',')])
args = parser.parse_args()
pgm_config = load_pgm_config(args.pgm_config)
n_iter = int(args.n_iter)
trial = int(args.trial)
weights = args.l
run_all(pgm_config, n_iter, weights, trial)
```
#### File: scripts/subscripts/refine_w.py
```python
import random
import os
import sys
weight_file1 = sys.argv[1]
weight_file2 = sys.argv[2]
n_iter = int(sys.argv[3])
trial = int(sys.argv[4])
def read_weights(w_file):
with open(w_file, 'r') as f:
lines = f.readlines()
w = [float(l) for l in lines]
#print w
return w
def refine(weight1, weight2):
weight = []
n=1
for (w1, w2) in zip(weight1, weight2):
if (w1 > 0) and (w2 > 0):
# print(str(n) + ':[ ' + str(min(w1,w2)) + ', 10 ]');
w = random.uniform(min(w1, w2), 10)
elif (w1 < 0) and (w2 < 0):
# print(str(n) + ':[ -10, ' + str(max(w1,w2)) + ' ]');
w = random.uniform(-10, max(w1, w2))
else:
# print(str(n) + '[ -10, 10 ]')
w = random.uniform(-10, 10)
weight.append(w)
n=n+1
return weight
def gen_weight_file(n_weights):
w1 = read_weights(weight_file1)
w2 = read_weights(weight_file2)
for idx in range(1, n_weights+1):
weights = refine(w1, w2)
fname = str(trial+1) + "_weights/" + str(idx) + ".weight"
with open(fname, 'w') as f:
for w in weights:
f.write(str(w) + "\n")
os.mkdir(str(trial+1)+"_weights")
gen_weight_file(n_iter)
``` |
{
"source": "jishadav/fundmaster",
"score": 4
} |
#### File: jishadav/fundmaster/database_helpers.py
```python
import sqlite3
import datetime
def string_to_date(date_string):
"""Convert the date string to datetime object"""
return datetime.datetime.strptime(date_string, '%Y-%m-%d').date()
class Db():
'''
Data base helper functions
'''
def __init__(self):
self.database = "fundmaster_db.db"
self.databse_connect()
self.databse_init()
def databse_connect(self):
"""Connect to the SQLite3 database."""
self.connection = sqlite3.connect(self.database)
self.cursor = self.connection.cursor()
def databse_init(self):
"""Initialise the SQLite3 database."""
self.cursor.execute("CREATE TABLE IF NOT EXISTS company (\
symbol TEXT UNIQUE NOT NULL, security_code TEXT, \
name TEXT, industry TEXT)")
self.cursor.execute("CREATE TABLE IF NOT EXISTS stock_prices (\
symbol TEXT NOT NULL references company(symbol) \
ON DELETE CASCADE, \
date TEXT, \
open REAL, high REAL, low REAL, \
close REAL, volume REAL, \
CONSTRAINT unq UNIQUE (symbol, date))")
self.cursor.execute("CREATE TABLE IF NOT EXISTS fundamentals (\
symbol TEXT NOT NULL references company(symbol) \
ON DELETE CASCADE, \
date TEXT NOT NULL, \
sales REAL, net_profit REAL, eps REAL, url TEXT, \
CONSTRAINT unq UNIQUE (symbol, date))")
def close(self):
self.connection.close()
def insert_company_data(self, symbol, security_code, name, industry):
"""Insert the company data to db"""
try:
self.cursor.execute("INSERT INTO company (symbol, security_code, name, industry) \
VALUES (:symbol, :security_code, \
:name, :industry)",
{"symbol": symbol, "security_code":
security_code, "name": name,
"industry": industry})
self.connection.commit()
except sqlite3.IntegrityError as e:
pass
def get_company_details_from_sec_code(self, security_code):
'''Return the Company details for the symbol'''
self.cursor.execute(
"SELECT security_code, symbol, name FROM company where security_code = :security_code",
{"security_code": security_code})
details = self.cursor.fetchone()
if details:
company = {
'security_code': details[0], 'symbol': details[1], 'name': details[2]}
return company
else:
return False
def check_fundamental_url_already_inserted(self, url):
'''Check whether the fundamental url is already scraped and updated the details into db'''
self.cursor.execute(
"SELECT * FROM fundamentals WHERE url = :url", {"url": url})
res = self.cursor.fetchall()
if len(res) == 0:
return False
else:
return True
def insert_fundamentals_data(self, fundamental_dict):
"""Insert the index data to db"""
try:
print(fundamental_dict)
self.cursor.execute("INSERT INTO fundamentals (symbol, date, sales, net_profit, eps, url) \
VALUES (:symbol, :date, :sales, :net_profit, :eps, :url)",
{"symbol": fundamental_dict['symbol'], "date": fundamental_dict['date'],
"sales": fundamental_dict['sales'],
"net_profit": fundamental_dict['net_profit'], "eps": fundamental_dict['eps'],
"url": fundamental_dict['url']})
self.connection.commit()
except sqlite3.IntegrityError as e:
pass
def get_companies_with_fundamentals(self):
self.cursor.execute(
"SELECT DISTINCT symbol FROM fundamentals")
res = self.cursor.fetchall()
if len(res) == 0:
return False
else:
return [r[0] for r in res]
def check_bhav_copy_already_inserted(self, date):
'''Check whether the fundamental url is already scraped and updated the details into db'''
self.cursor.execute(
"SELECT * FROM stock_prices WHERE date = :date", {"date": date})
res = self.cursor.fetchall()
if len(res) == 0:
return False
else:
return True
def insert_stock_price_data(self, symbol, date, open_, high, low, close, volume):
"""Insert the stock data to db"""
try:
self.cursor.execute("INSERT INTO stock_prices (symbol, date, open, high, low, close, volume) \
VALUES (:symbol, :date, :open, :high, :low, :close, :volume)",
{"symbol": symbol, "date": date, "open": open_, "high": high, "low": low, "close": close, "volume": volume})
self.connection.commit()
except sqlite3.IntegrityError as e:
pass
def get_available_dates(self, symbol):
""" Return the period end date of quarters from the fundamentals tabls for the symbol provided"""
self.cursor.execute(
"SELECT date FROM fundamentals WHERE symbol = :symbol ORDER BY date", {"symbol": symbol})
res = self.cursor.fetchall()
if len(res) == 0:
return False
dates = [string_to_date(date[0]) for date in res]
return dates
def get_over_the_period_fundamental_data(self, symbol, cur_date, over_period_date):
"""
Return the fundamental data for the current date and an year before that for the symbol provided
"""
self.cursor.execute("SELECT s.sales, l.sales, s.net_profit, l.net_profit, s.eps, l.eps \
FROM fundamentals s JOIN fundamentals l \
on s.symbol = l.symbol \
WHERE s.symbol = :symbol \
AND s.date = :over_period_date and l.date = :cur_date ",
{"symbol": symbol, "cur_date": cur_date,
"over_period_date": over_period_date})
result = self.cursor.fetchone() # (cur_quarter_sales, over_the_year_quarter_sales, cur_quarter_net profie, over_the_year_net_profit, cur_quarter_eps, over_the_year_quarter_eps)
return result
def get_stock_price_for_period(self, symbol, start_date, end_date):
"""
Return the stock prices over a period provided
"""
self.cursor.execute(
"SELECT avg(close) FROM stock_prices WHERE symbol = :symbol AND date >= :start_date AND date <= :end_date ORDER BY date", {"symbol": symbol, "start_date": start_date, "end_date": end_date})
res = self.cursor.fetchone()
return res[0]
``` |
{
"source": "jishanshaikh4/lib-math",
"score": 3
} |
#### File: Ceil-and-floor/Python3/ceil.py
```python
import os
import sys
def ceil(n):
return -1
if __name__=="__name__":
# DO NOTHING.
```
#### File: Ceil-and-floor/Python3/floor.py
```python
import os
import sys
def floor(n):
return -1
if __main__=="__main__":
# DO NOTHING FOR NOW :)
```
#### File: set/Python3/set.py
```python
import os
import sys
class set:
int n;
int[n] arr;
# Add more information?
def push_back(s, a)
def delete_element(s, b)
def size_of_set(s)
def max_element_of_set(s)
def min_element_of_set(s)
if __main__ == "__main__":
# Utility code here.
``` |
{
"source": "JishantSingh/Ro-Create-Table-From-Sheet",
"score": 2
} |
#### File: JishantSingh/Ro-Create-Table-From-Sheet/create_table_from_sheet.py
```python
from __future__ import print_function, unicode_literals
import os
import io
import re
import sys
import json
import dateutil.parser
import snowflake.connector
import pygsheets
DEFAULT_DB_CONFIG_FILENAME = os.path.abspath('db.json')
DEFAULT_SERVICE_ACCOUNT_FILE = os.path.abspath('service-account.json')
DEFAULT_SCOPES = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/spreadsheets'
]
def read_db_config(filename=None):
"""Read and return a JSON object from `filename`."""
filename = filename or DEFAULT_DB_CONFIG_FILENAME
with open(filename, 'r') as infile:
return json.load(infile)
def chop_at_blank(row):
"""Chop `row` off at its first empty element."""
result = []
for item in row:
if item == '':
break
result.append(item)
return result
def drop_empty_rows(rows):
"""Return `rows` with all empty rows removed."""
return [row for row in rows if any(val.strip() for val in row)]
def _read_worksheet(sheet_id, worksheet_id=None, service_account_file=None,
scopes=None):
service_account_file = service_account_file or DEFAULT_SERVICE_ACCOUNT_FILE
scopes = scopes or DEFAULT_SCOPES
api = pygsheets.authorize(service_account_file=service_account_file,
scopes=scopes)
sheet = api.open_by_key(sheet_id)
worksheet_id = worksheet_id or 0
if isinstance(worksheet_id, int):
worksheet = sheet[worksheet_id]
elif isinstance(worksheet_id, str):
worksheet = sheet.worksheet_by_title(worksheet_id)
else:
raise Exception('Invalid ID for worksheet: {!r}'.format(worksheet_id))
title = worksheet.title
rows = list(worksheet)
headers = chop_at_blank(rows[0])
data = drop_empty_rows(rows[1:])
return {'title': title, 'headers': headers, 'data': data}
def headers_to_keys(headers):
"""Convert row headers to object keys."""
regex = re.compile(r'[^a-z0-9_]+')
return [regex.sub('_', header.lower()) for header in headers]
def apply_coercions_1(obj, coercions):
"""Return `obj` with `coercions` applied."""
result = {}
for key, val in obj.items():
target = coercions.get(key)
if target in ('int', 'integer'):
val = re.sub(r'[,$]', '', val)
val = int(val) if val else None
elif target == 'float':
val = re.sub(r'[,$]', '', val)
val = float(val) if val else None
elif target == 'date':
val = dateutil.parser.parse(val) if val.strip() else None
val = val.strftime('%Y-%m-%d')
elif target in ('datetime', 'timestamp'):
val = dateutil.parser.parse(val) if val.strip() else None
val = val.strftime('%Y-%m-%d %H:%M:%S')
elif target is not None:
print('Unknown coercion target {!r}'.format(target),
file=sys.stderr)
result[key] = val
return result
def apply_coercions(data, coercions):
"""Return `data` with `coercions` applied to each object."""
return [apply_coercions_1(obj, coercions) for obj in data]
def read_worksheet(sheet_id, worksheet_id=None, coercions=None,
service_account_file=None, scopes=None):
"""Read a worksheet and return a dict.
The dict will have two keys: `title` (the title of the worksheet) and
`data` (a list of dicts, one for each row, mapping column names to values).
The `sheet_id` should be the ID as used by Google Sheets, not the title.
The `worksheet_id` can be either an integer (the ordinal position of the
worksheet) or a string (its title).
"""
objects = []
payload = _read_worksheet(sheet_id, worksheet_id=worksheet_id,
service_account_file=service_account_file,
scopes=scopes)
headers = payload['headers']
keys = headers_to_keys(headers)
for row in payload['data']:
objects.append(dict(zip(keys, row)))
if coercions:
objects = apply_coercions(objects, coercions)
return {'title': payload['title'], 'data': objects}
def build_create_table(schema, table):
"""Return the CREATE TABLE statement as a string."""
return """CREATE OR REPLACE TABLE {}.{} (
source string,
imported_at timestamp_tz,
data variant
);
""".format(schema, table)
def build_insert_rows(schema, table, payload):
"""Return the INSERT INTO statement as a string."""
out = io.StringIO()
out.write('INSERT INTO {}.{}\n'.format(schema, table))
out.write('SELECT column1, column2, parse_json(column3)\n')
out.write('FROM VALUES\n')
title = payload['title']
data = payload['data']
count = len(data)
for i, obj in enumerate(data):
out.write("('{}', current_timestamp, '{}')".format(
title, json.dumps(obj)
))
if i != count - 1:
out.write(',')
out.write('\n')
return out.getvalue()
def load_sheet(schema, table, sheet_id, worksheet=None, coercions=None,
service_account_file=None, config_file=None,
verbose=False, dry_run=False):
"""Load ``schema.table`` from `sheet_id`."""
if isinstance(coercions, str):
coercions = json.loads(coercions)
config_file = config_file or DEFAULT_DB_CONFIG_FILENAME
config = read_db_config(config_file)
payload = read_worksheet(sheet_id, worksheet_id=worksheet,
service_account_file=service_account_file,
coercions=coercions)
create_table = build_create_table(schema, table)
insert_rows = build_insert_rows(schema, table, payload)
with snowflake.connector.connect(**config) as connection:
cursor = connection.cursor()
for statement in create_table, insert_rows:
if verbose:
print(statement)
if not dry_run:
cursor.execute(statement)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--schema', required=True)
parser.add_argument('--table', required=True)
parser.add_argument('--sheet', required=True)
parser.add_argument('--worksheet')
parser.add_argument('--coercions')
parser.add_argument('--db-config')
parser.add_argument('--service-account-file')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--dry-run', action='store_true')
args = parser.parse_args()
load_sheet(args.schema, args.table, args.sheet,
worksheet=args.worksheet,
coercions=args.coercions,
service_account_file=args.service_account_file,
config_file=args.db_config,
verbose=args.verbose,
dry_run=args.dry_run)
``` |
{
"source": "Jishin4477/Djangobot",
"score": 2
} |
#### File: Djangobot/chatbot_tutorial/views.py
```python
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework import permissions
from .serializers import UserSerializer, GroupSerializer, GetUserSerialiser
from rest_framework.response import Response
import json
import requests
import random
from django.http.response import HttpResponse
from django.shortcuts import render
from django.template import loader
from chatbot_tutorial.models import AllCalls, GetUser
from django.template.loader import render_to_string
from .serializers import AllCallsSerialiser
from datetime import datetime
def chat(request):
print("entered first")
print(request.user)
get_user, created = GetUser.objects.get_or_create(user=request.user)
user_data = GetUser.objects.get(user=request.user)
user_data.date = datetime.now()
user_data.save()
context = {}
return render(request, 'chatbot_tutorial/chatbot.html', context)
def respond_to_websockets(message):
print("entered here")
jokes = {
'stupid': ["""Yo' Mama is so stupid, she needs a recipe to make ice cubes.""",
"""Yo' Mama is so stupid, she thinks DNA is the National Dyslexics Association."""],
'fat': ["""Yo' Mama is so fat, when she goes to a restaurant, instead of a menu, she gets an estimate.""",
""" Yo' Mama is so fat, when the cops see her on a street corner, they yell, "Hey you guys, break it up!" """],
'dumb': ["""Yo' Mama is so dumb, when God was giving out brains, she thought they were milkshakes and asked for extra thick.""",
"""Yo' Mama is so dumb, she locked her keys inside her motorcycle."""]
}
result_message = {
'type': 'text'
}
if 'fat' in message['text']:
result_message['text'] = random.choice(jokes['fat'])
user = GetUser.objects.all().order_by('-date').first()
fat_click = AllCalls.objects.create(entered_val=message['text'], user=user.user)
elif 'stupid' in message['text']:
result_message['text'] = random.choice(jokes['stupid'])
user = GetUser.objects.all().order_by('-date').first()
stupid_data = AllCalls.objects.create(entered_val=message['text'], user=user.user)
elif 'dumb' in message['text']:
result_message['text'] = random.choice(jokes['dumb'])
user = GetUser.objects.all().order_by('-date').first()
dumb_data = AllCalls.objects.create(entered_val=message['text'], user=user.user)
elif message['text'] in ['hi', 'hey', 'hello']:
result_message['text'] = "Hello to you too! If you're interested in yo mama jokes, just tell me fat, stupid or dumb and i'll tell you an appropriate joke."
else:
result_message['text'] = "I don't know any responses for that. If you're interested in yo mama jokes tell me fat, stupid or dumb."
return result_message
def show(request):
get_user = GetUser.objects.all()
context = []
for user in get_user:
fat_data = AllCalls.objects.filter(entered_val__icontains='fat', user=user.user).count()
stupid_data = AllCalls.objects.filter(entered_val__icontains='stupid', user=user.user).count()
dumb_data = AllCalls.objects.filter(entered_val__icontains='dumb', user=user.user).count()
context.append({
"user": user, 'fat': fat_data, 'stupid': stupid_data, 'dumb': dumb_data
})
print(context)
template = loader.get_template('chatbot_tutorial/all_data.html')
response_body = template.render({"obj":context})
return HttpResponse(response_body)
def clear(request):
fat_data = AllCalls.objects.filter(entered_val__icontains='fat').delete()
stupid_data = AllCalls.objects.filter(entered_val__icontains='stupid').delete()
dumb_data = AllCalls.objects.filter(entered_val__icontains='dumb').delete()
template = loader.get_template('chatbot_tutorial/clear_data.html')
response_body = template.render()
return HttpResponse(response_body)
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
``` |
{
"source": "JishinMaster/CLBlast",
"score": 2
} |
#### File: generator/generator/pyclblast.py
```python
import os
NL = os.linesep
SEPARATOR = "####################################################################################################"
def to_np_dtype(flavour):
return {
"S": "float32",
"D": "float64",
"C": "complex64",
"Z": "complex128",
"H": "float16",
}[flavour.precision_name]
def cl_type(flavour):
return {
"S": "cl_float",
"D": "cl_double",
"C": "cl_float2",
"Z": "cl_double2",
"H": "cl_half",
}[flavour.precision_name]
def scalar_cython_conversion(scalar, flavour):
scalar_type = flavour.alpha_cl if scalar == "alpha" else flavour.beta_cl
if scalar_type == "float":
return "<cl_float>" + scalar
if scalar_type == "double":
return "<cl_double>" + scalar
if scalar_type in ["cl_float2", "float2"]:
return "<cl_float2>cl_float2(x=" + scalar + ".real,y=" + scalar + ".imag)"
if scalar_type in ["cl_double2", "double2"]:
return "<cl_double2>cl_double2(x=" + scalar + ".real,y=" + scalar + ".imag)"
if scalar_type in ["cl_half", "half"]:
return "<cl_half>" + scalar
raise RuntimeError("Could not convert flavour '%s:%s'" % (flavour.precision_name, scalar_type))
def generate_pyx(routine):
result = ""
if routine.implemented and routine.plain_name() and routine.level in ["1", "2a", "2b", "3", "x"]:
if routine.level == "x" and routine.batched == 0:
return result # level-X routines that are non-batched are not supported at the moment
indent = " "
result += SEPARATOR + NL
result += "# " + routine.description + ": " + routine.short_names() + NL
result += SEPARATOR + NL
result += NL
# Reference C definition
result += "cdef extern from \"clblast_c.h\":" + NL
np_dtypes = []
for flavour in routine.flavours:
if flavour.precision_name in ["S", "D", "C", "Z", "H"]:
result += indent + "CLBlastStatusCode CLBlast" + flavour.name + routine.plain_name() + "("
result += ", ".join(routine.arguments_def_c(flavour)) + ","
result += "cl_command_queue* queue, cl_event* event)" + NL
np_dtypes.append(to_np_dtype(flavour))
result += "" + NL
# Function definition
buffers = routine.inputs[:] + routine.outputs[:]
result += "def " + routine.plain_name() + "(queue, "
result += ", ".join(routine.arguments_python()) + "):" + NL
# Documentation
result += indent + "\"\"\"" + NL
result += indent + "x" + routine.upper_name() + ": " + routine.description + NL
result += indent + "\"\"\"" + NL
result += NL
# Data types and checks
result += indent + "dtype = check_dtype([" + ", ".join(buffers) + "], "
result += "[" + ", ".join(['"%s"' % d for d in np_dtypes]) + "])" + NL
for buf in buffers:
if buf in routine.buffers_vector():
result += indent + "check_vector("
else:
result += indent + "check_matrix("
result += buf + ", \"" + buf + "\")" + NL
result += NL
# Batched checks
if routine.batched == 1: # batched but not strided-batched
lists = [b + "_offsets" for b in buffers] + [s + "s" for s in routine.scalars]
result += indent + "if " + " != ".join(["len(" + l + ")" for l in lists]) + ":" + NL
result += indent + indent + "raise RuntimeError(\"PyCLBlast: 'CLBlastX" + routine.plain_name() + "' failed: length of batch-sized arguments " + ", ".join(lists) + " should be equal\")" + NL
result += indent + "batch_count = len(" + lists[0] + ")" + NL
result += NL
# Batched list to pointer conversions
for buf in buffers:
result += indent + "cdef size_t *" + buf + "_offsets_c = <size_t *> PyMem_Malloc(batch_count * sizeof(size_t))" + NL
result += indent + "for i in range(batch_count):" + NL
result += indent + indent + "" + buf + "_offsets_c[i] = " + buf + "_offsets[i]" + NL
for scalar in routine.scalars:
result += indent + "cdef void *" + scalar + "s_c = <void *> PyMem_Malloc(batch_count * sizeof(dtype_size[dtype]))" + NL
result += indent + "for i in range(batch_count):" + NL
if_prefix = ""
for flavour in routine.flavours:
if flavour.precision_name in ["S", "D", "C", "Z", "H"]:
np_dtype = to_np_dtype(flavour)
result += indent + indent + if_prefix + "if dtype == np.dtype(\"" + np_dtype + "\"):" + NL
scalar_converted = scalar_cython_conversion(scalar + "s[i]", flavour)
result += indent + indent + indent + "(<" + cl_type(flavour) + "*>" + scalar + "s_c)[i] = " + scalar_converted + NL
if_prefix = "el"
result += NL
# Buffer transformation
for buf in buffers:
result += indent + "cdef cl_mem " + buf + "_buffer = <cl_mem><size_t>" + buf + ".base_data.int_ptr" + NL
result += NL
result += indent + "cdef cl_command_queue command_queue = <cl_command_queue><size_t>queue.int_ptr" + NL
result += indent + "cdef cl_event event = NULL" + NL
for option in routine.options:
if option == "a_transpose":
result += indent + "a_transpose = CLBlastTransposeYes if a_transp else CLBlastTransposeNo" + NL
if option == "b_transpose":
result += indent + "b_transpose = CLBlastTransposeYes if b_transp else CLBlastTransposeNo" + NL
if option == "ab_transpose":
result += indent + "ab_transpose = CLBlastTransposeYes if ab_transp else CLBlastTransposeNo" + NL
if option == "side":
result += indent + "side = CLBlastSideRight if right_side else CLBlastSideLeft" + NL
if option == "triangle":
result += indent + "triangle = CLBlastTriangleLower if lower_triangle else CLBlastTriangleUpper" + NL
if option == "diagonal":
result += indent + "diagonal = CLBlastDiagonalUnit if unit_diagonal else CLBlastDiagonalNonUnit" + NL
result += "" + NL
result += indent + "cdef CLBlastStatusCode err" + NL
if_prefix = ""
for flavour in routine.flavours:
if flavour.precision_name in ["S", "D", "C", "Z", "H"]:
np_dtype = to_np_dtype(flavour)
if routine.batched != 1: # regular or strided-batched
argument_names = [x.
replace("layout", "CLBlastLayoutRowMajor").
replace("alpha", scalar_cython_conversion("alpha", flavour)).
replace("beta", scalar_cython_conversion("beta", flavour))
for x in routine.arguments()]
else: # batched but not strided-batched
argument_names = [x.
replace("layout", "CLBlastLayoutRowMajor").
replace("_cpp", "_c").
replace("_offsets", "_offsets_c").
replace("alphas_c", "<" + cl_type(flavour) + "*>alphas_c").
replace("betas_c", "<" + cl_type(flavour) + "*>betas_c")
for x in routine.arguments()]
if routine.batched > 0:
argument_names.append("batch_count")
result += indent + if_prefix + "if dtype == np.dtype(\"" + np_dtype + "\"):" + NL
result += indent + indent + "err = CLBlast" + flavour.name + routine.plain_name()
result += "(" + ", ".join(argument_names) + ", &command_queue, &event)" + NL
if_prefix = "el"
result += indent + "else:" + NL
result += indent + indent + "raise ValueError(\"PyCLBlast: Unrecognized data-type '%s'\" % dtype)" + NL
result += NL
# Cleaning up
if routine.batched == 1: # batched but not strided-batched
for array in [b + "_offset" for b in buffers] + routine.scalars:
result += indent + "PyMem_Free(" + array + "s_c)" + NL
result += NL
result += indent + "if err != CLBlastSuccess:" + NL
result += indent + indent + "raise RuntimeError(\"PyCLBlast: 'CLBlastX" + routine.plain_name() + "' failed: %s\" % get_status_message(err))" + NL
result += indent + "return cl.Event.from_int_ptr(<size_t>event)" + NL
result += NL
return result
``` |
{
"source": "jishminor/model_analyzer",
"score": 2
} |
#### File: monitor/dcgm/dcgm_monitor.py
```python
from model_analyzer.record.types.gpu_free_memory import GPUFreeMemory
from model_analyzer.record.types.gpu_used_memory import GPUUsedMemory
from model_analyzer.record.types.gpu_utilization import GPUUtilization
from model_analyzer.monitor.gpu_monitor import GPUMonitor
from model_analyzer.model_analyzer_exceptions import \
TritonModelAnalyzerException
import model_analyzer.monitor.dcgm.dcgm_agent as dcgm_agent
import model_analyzer.monitor.dcgm.dcgm_fields as dcgm_fields
import model_analyzer.monitor.dcgm.dcgm_field_helpers as dcgm_field_helpers
import model_analyzer.monitor.dcgm.dcgm_structs as structs
from multiprocessing.pool import ThreadPool
import time
class DCGMMonitor(GPUMonitor):
"""
Use DCGM to monitor GPU metrics
"""
# Mapping between the DCGM Fields and Model Analyzer Records
model_analyzer_to_dcgm_field = {
GPUUsedMemory: dcgm_fields.DCGM_FI_DEV_FB_USED,
GPUFreeMemory: dcgm_fields.DCGM_FI_DEV_FB_FREE,
GPUUtilization: dcgm_fields.DCGM_FI_DEV_GPU_UTIL
}
def __init__(self, gpus, frequency, metrics, dcgmPath=None):
"""
Parameters
----------
frequency : int
Sampling frequency for the metric
metrics : list
List of Record types to monitor
dcgmPath : str (optional)
DCGM installation path
"""
super().__init__(gpus, frequency, metrics)
structs._dcgmInit(dcgmPath)
dcgm_agent.dcgmInit()
# Start DCGM in the embedded mode to use the shared library
self.dcgm_handle = dcgm_handle = dcgm_agent.dcgmStartEmbedded(
structs.DCGM_OPERATION_MODE_MANUAL)
# Create DCGM monitor group
self.group_id = dcgm_agent.dcgmGroupCreate(dcgm_handle,
structs.DCGM_GROUP_EMPTY,
"triton-monitor")
# Add the GPUs to the group
for gpu in self._gpus:
dcgm_agent.dcgmGroupAddDevice(dcgm_handle, self.group_id,
gpu.device_id())
frequency = int(self._frequency * 1000)
fields = []
try:
for metric in metrics:
fields.append(self.model_analyzer_to_dcgm_field[metric])
except KeyError:
dcgm_agent.dcgmShutdown()
raise TritonModelAnalyzerException(
f'{metric} is not supported by Model Analyzer DCGM Monitor')
self.dcgm_field_group_id = dcgm_agent.dcgmFieldGroupCreate(
dcgm_handle, fields, 'triton-monitor')
self.group_watcher = dcgm_field_helpers.DcgmFieldGroupWatcher(
dcgm_handle, self.group_id, self.dcgm_field_group_id.value,
structs.DCGM_OPERATION_MODE_MANUAL, frequency, 3600, 0, 0)
def _monitoring_iteration(self):
self.group_watcher.GetMore()
def _collect_records(self):
records = []
for gpu in self._gpus:
device_id = gpu.device_id()
metrics = self.group_watcher.values[device_id]
# Find the first key in the metrics dictionary to find the
# dictionary length
if len(list(metrics)) > 0:
for metric_type in self._metrics:
dcgm_field = self.model_analyzer_to_dcgm_field[metric_type]
for measurement in metrics[dcgm_field].values:
# DCGM timestamp is in nanoseconds
records.append(
metric_type(value=float(measurement.value),
device=gpu,
timestamp=measurement.ts))
return records
def destroy(self):
"""
Destroy the DCGMMonitor. This function must be called
in order to appropriately deallocate the resources.
"""
dcgm_agent.dcgmShutdown()
super().destroy()
```
#### File: model_analyzer/output/output_writer.py
```python
from abc import ABC, abstractmethod
class OutputWriter(ABC):
"""
Interface that receives a table
and writes the table to a file or stream.
"""
@abstractmethod
def write(self, out):
"""
Writes the output to a file
(stdout, .txt, .csv etc.)
Parameters
----------
out : str
The string to be written out
Raises
------
TritonModelAnalyzerException
If there is an error or exception while writing
the output.
"""
```
#### File: model_analyzer/reports/report_manager.py
```python
from model_analyzer.result.constraint_manager import ConstraintManager
from model_analyzer.record.metrics_manager import MetricsManager
from model_analyzer.result.result_table import ResultTable
from .pdf_report import PDFReport
import os
import logging
from numba import cuda
from collections import defaultdict
class ReportManager:
"""
Manages the building and export of
various types of reports
"""
def __init__(self, config):
"""
Parameters
----------
config : AnalyzerConfig
The model analyzer's config containing information
about the kind of reports to generate
"""
self._measurements = defaultdict(list)
self._config = config
self._constraint_strs = self._build_constraint_strings()
self._reports_export_directory = os.path.join(config.export_path,
'reports')
os.makedirs(self._reports_export_directory, exist_ok=True)
def add_result(self, result):
"""
Adds measurements on which the report manager
can do complex analyses or with which it can
build tables and add to reports
Parameters
----------
result: ModelResult
result to be added to report
"""
for measurement in result.top_n_measurements(n=1):
model_name = result.model_name()
model_config = result.model_config()
self._measurements[model_name].append((model_config, measurement))
def export_summary(self, statistics):
"""
Write a PDF summary to disk
Parameters
----------
statistics: AnalyzerStatistics
Object containing all necessary
information about this analyzer run
"""
for model_name in self._measurements:
model_report_dir = os.path.join(self._reports_export_directory,
model_name)
os.makedirs(model_report_dir, exist_ok=True)
output_filename = os.path.join(model_report_dir,
'result_summary.pdf')
summary = self._build_summary_report(model_name=model_name,
statistics=statistics)
logging.info(f"Exporting Summary Report to {output_filename}...")
summary.write_report(filename=output_filename)
def _build_summary_report(self, model_name, statistics):
"""
Builder method for a summary
report.
"""
summary = PDFReport()
total_measurements = statistics.total_measurements(model_name)
total_configurations = statistics.total_configurations(model_name)
num_best_configs = min(self._config.top_n_configs,
total_configurations)
gpu_names, max_memories = self._get_gpu_stats(model_name=model_name)
static_batch_size = self._measurements[model_name][0][1].perf_config(
)['batch-size']
constraint_str = self._constraint_strs[
model_name] if self._constraint_strs else "None"
table, summary_sentence = self._build_summary_table(
model_name=model_name,
num_measurements=total_measurements,
gpu_name=gpu_names)
# Add summary sections
summary.add_title(title="Result Summary")
summary.add_subheading(f"Model: {model_name}")
summary.add_paragraph(f"GPUS: {gpu_names}")
summary.add_paragraph(f"Total Available GPU Memory: {max_memories}")
summary.add_paragraph(
f"Client Request Batch Size: {static_batch_size}")
summary.add_paragraph(
f"Request Protocol: {self._config.client_protocol.upper()}")
summary.add_paragraph(f"Constraint targets: {constraint_str}")
summary.add_paragraph(summary_sentence)
summary.add_paragraph(
f"Curves corresponding to the {num_best_configs} best model "
f"configurations out of a total of {total_configurations} are "
"shown in the plots.")
throughput_latency_plot = os.path.join(self._config.export_path,
'plots', model_name,
'throughput_v_latency.png')
caption1 = f"Throughput vs. Latency curves for {num_best_configs} configurations of {model_name}"
memory_latency_plot = os.path.join(self._config.export_path, 'plots',
model_name, 'gpu_mem_v_latency.png')
caption2 = f"GPU Memory vs. Latency curves for {num_best_configs} configurations of {model_name}"
summary.add_images([throughput_latency_plot, memory_latency_plot],
[caption1, caption2])
summary.add_paragraph(
"The maximum GPU memory consumption for each of the above points is"
f" shown in the second plot. The GPUs {gpu_names} have"
f" a total available memory of {max_memories} respectively.")
summary.add_paragraph(
"The following table summarizes each configuration at the measurement"
" that optimizes the desired metrics under the given constraints in"
" decreasing order of throughput.")
summary.add_table(table=table)
return summary
def _build_summary_table(self, model_name, num_measurements, gpu_name):
"""
Creates a result table corresponding
to the best measurements for a particular
model
"""
summary_table = ResultTable(headers=[
'Model Config Name', 'Max Dynamic Batch Size', 'Instance Count',
'p99 Latency (ms)', 'Throughput (infer/sec)',
'Max GPU Memory Usage (MB)', 'Average GPU Utilization (%)'
],
title="Report Table")
best = True
summary_sentence = ""
for model_config, measurement in self._measurements[model_name]:
dynamic_batching_str = model_config.dynamic_batching_string()
dynamic_batch_phrase = "dynamic batching disabled" \
if dynamic_batching_str == "Disabled" \
else f"max dynamic batch size of {dynamic_batching_str}"
instance_group_str = model_config.instance_group_string()
if best:
model_config_dict = model_config.get_config()
platform = model_config_dict[
'backend'] if 'backend' in model_config_dict else model_config_dict[
'platform']
summary_sentence = (
f"In {num_measurements} measurements, "
f"{instance_group_str} model instances "
f"with {dynamic_batch_phrase} "
f"on platform {platform} delivers "
f"maximum throughput under the given constraints on GPU {gpu_name}."
)
best = False
row = [
model_config.get_field('name'), dynamic_batching_str,
instance_group_str,
measurement.get_value_of_metric('perf_latency').value(),
measurement.get_value_of_metric('perf_throughput').value(),
measurement.get_value_of_metric('gpu_used_memory').value(),
round(
measurement.get_value_of_metric('gpu_utilization').value(),
1)
]
summary_table.insert_row_by_index(row)
return summary_table, summary_sentence
def _get_gpu_stats(self, model_name):
"""
Gets names and memory infos
of GPUs used in best measurements
"""
gpu_names = []
max_memories = []
seen_gpus = set()
for _, measurement in self._measurements[model_name]:
for gpu in cuda.gpus:
if gpu.id in measurement.gpus_used(
) and gpu.id not in seen_gpus:
seen_gpus.add(gpu.id)
gpu_names.append((gpu.name).decode('ascii'))
with gpu:
mems = cuda.current_context().get_memory_info()
# convert bytes to GB
max_memories.append(round(mems.total / (2**30), 1))
return ','.join(gpu_names), ','.join(
[str(x) + ' GB' for x in max_memories])
def _build_constraint_strings(self):
"""
Constructs constraint strings to show the constraints under which
each model is being run.
"""
constraint_strs = {}
for model_name, model_constraints in ConstraintManager.get_constraints_for_all_models(
self._config).items():
strs = []
if model_constraints:
for metric, constraint in model_constraints.items():
metric_header = MetricsManager.get_metric_types(
[metric])[0].header(aggregation_tag='')
for constraint_type, constraint_val in constraint.items():
# String looks like 'Max p99 Latency : 99 ms'
metric_header_name = metric_header.rsplit(' ', 1)[0]
metric_unit = metric_header.rsplit(' ', 1)[1][1:-1]
strs.append(
f"{constraint_type.capitalize()} {metric_header_name} : {constraint_val} {metric_unit}"
)
constraint_strs[model_name] = ', '.join(strs)
return constraint_strs
```
#### File: model_analyzer/reports/report.py
```python
from abc import ABC, abstractmethod
class Report(ABC):
"""
Defines functions that need to
be implemented by all report
types
This will be a html
"""
@abstractmethod
def add_title(self, title):
"""
Parameters
----------
title: str
The title of the report
"""
@abstractmethod
def add_subheading(self, subheading):
"""
Parameters
----------
subheading: str
The subheading of the given section
"""
@abstractmethod
def add_images(self, images, image_captionss):
"""
Parameters
----------
images: list of str
The fullpaths to the image to
be added to this image row
image_captions : list of str
List of image captions
"""
@abstractmethod
def add_paragraph(self, paragraph):
"""
Parameters
----------
title: paragraph
The text to add to
the report as a paragraph
"""
@abstractmethod
def write_report(self, filename):
"""
Write the report to disk with
filename
Parameters
----------
filename : str
The name of the report
"""
```
#### File: qa/L0_unit_tests/count_tests.py
```python
import argparse
import os
import importlib
import inspect
import sys
sys.path.insert(0, '../../')
def args():
parser = argparse.ArgumentParser('test_counter')
parser.add_argument('--path',
help='Path to use for counting the tests',
type=str)
opt = parser.parse_args()
return opt
if __name__ == "__main__":
number_of_tests = 0
opt = args()
path = opt.path
for file_path in os.listdir(path):
# All the test files start with "Test"
if file_path.startswith('test_'):
module_name = 'tests.' + file_path.split('.')[0]
module = importlib.import_module(module_name)
classes = inspect.getmembers(module, inspect.isclass)
for class_tuple in classes:
class_name = class_tuple[0]
class_object = class_tuple[1]
# All the test classes start with "Test"
if class_name.startswith('Test'):
methods = inspect.getmembers(class_object,
inspect.isroutine)
for method_tuple in methods:
method_name = method_tuple[0]
if method_name.startswith('test_'):
number_of_tests += 1
# Print the number of tests
print(number_of_tests)
```
#### File: model_analyzer/tests/test_run_config_generator.py
```python
from .common import test_result_collector as trc
from .mocks.mock_config import MockConfig
from .mocks.mock_model_config import MockModelConfig
from .mocks.mock_client import MockTritonClientMethods
from model_analyzer.config.input.config import AnalyzerConfig
from model_analyzer.cli.cli import CLI
from model_analyzer.triton.client.grpc_client import TritonGRPCClient
from model_analyzer.config.run.run_search import RunSearch
from model_analyzer.config.run.run_config_generator \
import RunConfigGenerator
from unittest.mock import mock_open, patch
import yaml
class TestRunConfigGenerator(trc.TestResultCollector):
def _evaluate_config(self, args, yaml_content):
mock_config = MockConfig(args, yaml_content)
mock_config.start()
config = AnalyzerConfig()
cli = CLI(config)
cli.parse()
mock_config.stop()
return config
def test_parameter_sweep(self):
args = [
'model-analyzer', '--model-repository', 'cli_repository', '-f',
'path-to-config-file', '--model-names', 'vgg11',
'--run-config-search-disable'
]
yaml_content = ''
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client = MockTritonClientMethods()
mock_client.start()
client = TritonGRPCClient('localhost:8000')
run_search = RunSearch(16, 1, 16)
# When there is not any sweep_parameter the length of
# run_configs should be equal to the length of different
# sweep configurations per model
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 1)
mock_model_config.stop()
mock_client.stop()
yaml_content = yaml.dump({
'concurrency': [2, 3, 4],
'batch_sizes': [4, 5, 6]
})
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 9)
mock_model_config.stop()
mock_client.stop()
yaml_content = """
model_names:
-
vgg_16_graphdef:
model_config_parameters:
instance_group:
-
kind: KIND_GPU
count: 1
-
kind: KIND_CPU
count: 1
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 1)
mock_model_config.stop()
mock_client.stop()
args = [
'model-analyzer', '--model-repository', 'cli_repository', '-f',
'path-to-config-file', '--run-config-search-disable'
]
yaml_content = """
model_names:
-
vgg_16_graphdef:
model_config_parameters:
instance_group:
-
-
kind: KIND_GPU
count: 1
-
-
kind: KIND_CPU
count: 1
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_client.start()
mock_model_config.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 2)
mock_model_config.stop()
mock_client.stop()
yaml_content = """
model_names:
-
vgg_16_graphdef:
model_config_parameters:
instance_group:
-
-
kind: KIND_GPU
count: 1
-
kind: KIND_CPU
count: 1
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 1)
mock_model_config.stop()
mock_client.stop()
yaml_content = """
model_names:
-
vgg_16_graphdef:
model_config_parameters:
instance_group:
-
-
kind: [KIND_GPU, KIND_CPU]
count: [1, 2, 3]
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 6)
mock_model_config.stop()
mock_client.stop()
yaml_content = """
concurrency: [1, 2, 3]
batch_sizes: [2, 3, 4]
model_names:
-
vgg_16_graphdef:
model_config_parameters:
instance_group:
-
-
kind: [KIND_GPU, KIND_CPU]
count: [1, 2, 3]
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 54)
instance_groups = []
for run_config in run_configs:
instance_group = run_config.model_config().get_config(
)['instance_group']
instance_groups.append(instance_group)
expected_instance_groups = [[{
'count': 1,
'kind': 'KIND_GPU'
}], [{
'count': 2,
'kind': 'KIND_GPU'
}], [{
'count': 3,
'kind': 'KIND_GPU'
}], [{
'count': 1,
'kind': 'KIND_CPU'
}], [{
'count': 2,
'kind': 'KIND_CPU'
}], [{
'count': 3,
'kind': 'KIND_CPU'
}]]
self.assertTrue(len(expected_instance_groups), instance_groups)
for instance_group in instance_groups:
self.assertIn(instance_group, expected_instance_groups)
mock_model_config.stop()
mock_client.stop()
yaml_content = """
concurrency: [1, 2, 3]
batch_sizes: [2, 3, 4]
model_names:
-
vgg_16_graphdef:
model_config_parameters:
instance_group:
-
kind: [KIND_GPU, KIND_CPU]
count: [1, 2, 3]
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 54)
instance_groups = []
for run_config in run_configs:
instance_group = run_config.model_config().get_config(
)['instance_group']
instance_groups.append(instance_group)
expected_instance_groups = [[{
'count': 1,
'kind': 'KIND_GPU'
}], [{
'count': 2,
'kind': 'KIND_GPU'
}], [{
'count': 3,
'kind': 'KIND_GPU'
}], [{
'count': 1,
'kind': 'KIND_CPU'
}], [{
'count': 2,
'kind': 'KIND_CPU'
}], [{
'count': 3,
'kind': 'KIND_CPU'
}]]
self.assertTrue(len(expected_instance_groups), instance_groups)
for instance_group in instance_groups:
self.assertIn(instance_group, expected_instance_groups)
mock_model_config.stop()
mock_client.stop()
yaml_content = """
concurrency: [1, 2, 3]
batch_sizes: [2, 3, 4]
model_names:
-
vgg_16_graphdef:
model_config_parameters:
dynamic_batching:
preferred_batch_size: [ 4, 8 ]
max_queue_delay_microseconds: 100
instance_group:
-
kind: [KIND_GPU, KIND_CPU]
count: [1, 2, 3]
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 54)
instance_groups = []
for run_config in run_configs:
instance_group = run_config.model_config().get_config(
)['instance_group']
instance_groups.append(instance_group)
expected_instance_groups = 9 * [[{
'count': 1,
'kind': 'KIND_GPU'
}], [{
'count': 2,
'kind': 'KIND_GPU'
}], [{
'count': 3,
'kind': 'KIND_GPU'
}], [{
'count': 1,
'kind': 'KIND_CPU'
}], [{
'count': 2,
'kind': 'KIND_CPU'
}], [{
'count': 3,
'kind': 'KIND_CPU'
}]]
self.assertEqual(len(expected_instance_groups),
len(instance_groups))
for instance_group in instance_groups:
self.assertIn(instance_group, expected_instance_groups)
mock_model_config.stop()
mock_client.stop()
yaml_content = """
concurrency: [1, 2, 3]
batch_sizes: [2, 3, 4]
model_names:
-
vgg_16_graphdef:
model_config_parameters:
dynamic_batching:
preferred_batch_size: [[ 4, 8 ], [ 5, 6 ]]
max_queue_delay_microseconds: [100, 200]
instance_group:
-
kind: [KIND_GPU, KIND_CPU]
count: [1, 2, 3]
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 216)
instance_groups = []
dynamic_batchings = []
for run_config in run_configs:
instance_group = run_config.model_config().get_config(
)['instance_group']
dynamic_batching = run_config.model_config().get_config(
)['dynamic_batching']
dynamic_batchings.append(dynamic_batching)
instance_groups.append(instance_group)
expected_instance_groups = [[{
'count': 1,
'kind': 'KIND_GPU'
}], [{
'count': 2,
'kind': 'KIND_GPU'
}], [{
'count': 3,
'kind': 'KIND_GPU'
}], [{
'count': 1,
'kind': 'KIND_CPU'
}], [{
'count': 2,
'kind': 'KIND_CPU'
}], [{
'count': 3,
'kind': 'KIND_CPU'
}]]
expected_dynamic_batchings = [{
'preferred_batch_size': [4, 8],
'max_queue_delay_microseconds':
'100'
}, {
'preferred_batch_size': [4, 8],
'max_queue_delay_microseconds':
'200'
}, {
'preferred_batch_size': [5, 6],
'max_queue_delay_microseconds':
'100'
}, {
'preferred_batch_size': [5, 6],
'max_queue_delay_microseconds':
'200'
}]
self.assertEqual(
len(instance_groups), 9 * len(expected_instance_groups) *
len(expected_dynamic_batchings))
for instance_group in instance_groups:
self.assertIn(instance_group, expected_instance_groups)
for dynamic_batching in dynamic_batchings:
self.assertIn(dynamic_batching, expected_dynamic_batchings)
mock_model_config.stop()
mock_client.stop()
yaml_content = """
model_names:
-
vgg_16_graphdef:
model_config_parameters:
dynamic_batching:
-
preferred_batch_size: [ 4, 8 ]
max_queue_delay_microseconds: 100
-
preferred_batch_size: [ 5, 6 ]
max_queue_delay_microseconds: 200
instance_group:
-
kind: [KIND_GPU, KIND_CPU]
count: [1, 2, 3]
"""
config = self._evaluate_config(args, yaml_content)
mock_model_config = MockModelConfig()
mock_model_config.start()
mock_client.start()
with patch('model_analyzer.triton.model.model_config.open',
mock_open()):
for model in config.model_names:
run_config_generator = RunConfigGenerator(model,
config,
client,
None,
None,
None,
run_search,
generate_only=True)
run_configs = run_config_generator.get_run_configs()
self.assertEqual(len(run_configs), 12)
instance_groups = []
dynamic_batchings = []
for run_config in run_configs:
instance_group = run_config.model_config().get_config(
)['instance_group']
dynamic_batching = run_config.model_config().get_config(
)['dynamic_batching']
dynamic_batchings.append(dynamic_batching)
instance_groups.append(instance_group)
expected_instance_groups = [[{
'count': 1,
'kind': 'KIND_GPU'
}], [{
'count': 2,
'kind': 'KIND_GPU'
}], [{
'count': 3,
'kind': 'KIND_GPU'
}], [{
'count': 1,
'kind': 'KIND_CPU'
}], [{
'count': 2,
'kind': 'KIND_CPU'
}], [{
'count': 3,
'kind': 'KIND_CPU'
}]]
expected_dynamic_batchings = [{
'preferred_batch_size': [4, 8],
'max_queue_delay_microseconds':
'100'
}, {
'preferred_batch_size': [5, 6],
'max_queue_delay_microseconds':
'200'
}]
self.assertEqual(
len(instance_groups),
len(expected_instance_groups) *
len(expected_dynamic_batchings))
for instance_group in instance_groups:
self.assertIn(instance_group, expected_instance_groups)
for dynamic_batching in dynamic_batchings:
self.assertIn(dynamic_batching, expected_dynamic_batchings)
mock_model_config.stop()
mock_client.stop()
```
#### File: model_analyzer/tests/test_triton_server.py
```python
import unittest
from .mocks.mock_server_docker import MockServerDockerMethods
from .mocks.mock_server_local import MockServerLocalMethods
from .common import test_result_collector as trc
from model_analyzer.triton.server.server_factory import TritonServerFactory
from model_analyzer.triton.server.server_config import TritonServerConfig
from model_analyzer.model_analyzer_exceptions \
import TritonModelAnalyzerException
# Test parameters
MODEL_REPOSITORY_PATH = 'test_repo'
TRITON_LOCAL_BIN_PATH = 'test_bin_path/tritonserver'
TRITON_DOCKER_BIN_PATH = 'tritonserver'
TRITON_IMAGE = 'test_image'
CONFIG_TEST_ARG = 'exit-on-error'
CLI_TO_STRING_TEST_ARGS = {
'allow-grpc': True,
'min-supported-compute-capability': 7.5,
'metrics-port': 8000,
'model-repository': MODEL_REPOSITORY_PATH
}
class TestTritonServerMethods(trc.TestResultCollector):
def setUp(self):
# Mock
self.server_docker_mock = MockServerDockerMethods()
self.server_local_mock = MockServerLocalMethods()
self.server_docker_mock.start()
self.server_local_mock.start()
# server setup
self.server = None
def test_server_config(self):
# Create a TritonServerConfig
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
# Check config initializations
self.assertIsNone(server_config[CONFIG_TEST_ARG],
msg="Server config had unexpected initial"
f"value for {CONFIG_TEST_ARG}")
# Set value
server_config[CONFIG_TEST_ARG] = True
# Test get again
self.assertTrue(server_config[CONFIG_TEST_ARG],
msg=f"{CONFIG_TEST_ARG} was not set")
# Try to set an unsupported config argument, expect failure
with self.assertRaises(TritonModelAnalyzerException,
msg="Expected exception on trying to set"
"unsupported argument in Triton server"
"config"):
server_config['dummy'] = 1
# Reset test arg
server_config[CONFIG_TEST_ARG] = None
# Finally set a couple of args and then check the cli string
for arg, value in CLI_TO_STRING_TEST_ARGS.items():
server_config[arg] = value
cli_string = server_config.to_cli_string()
for argstring in cli_string.split():
# Parse the created string
arg, value = argstring.split('=')
arg = arg[2:]
# Make sure each parsed arg was in test dict
self.assertIn(arg,
CLI_TO_STRING_TEST_ARGS,
msg=f"CLI string contained unknown argument: {arg}")
# Make sure parsed value is the one from dict, check type too
test_value = CLI_TO_STRING_TEST_ARGS[arg]
self.assertEqual(
test_value,
type(test_value)(value),
msg=f"CLI string contained unknown value: {value}")
def test_create_server(self):
# Create a TritonServerConfig
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
gpus = ['all']
# Run for both types of environments
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
# Try to create a server without specifying model repository and expect
# error
server_config['model-repository'] = None
with self.assertRaises(
AssertionError,
msg="Expected AssertionError for trying to create"
"server without specifying model repository."):
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
with self.assertRaises(
AssertionError,
msg="Expected AssertionError for trying to create"
"server without specifying model repository."):
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
def test_start_stop_gpus(self):
# Create a TritonServerConfig
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
gpus = ['all']
# Create server in docker, start , wait, and stop
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
# Start server check that mocked api is called
self.server.start()
self.server_docker_mock.assert_server_process_start_called_with(
TRITON_DOCKER_BIN_PATH + ' ' + server_config.to_cli_string(),
MODEL_REPOSITORY_PATH, TRITON_IMAGE, 8000, 8001, 8002)
self.server_docker_mock.raise_exception_on_container_run()
with self.assertRaises(TritonModelAnalyzerException):
self.server.start()
self.server_docker_mock.stop_raise_exception_on_container_run()
# Stop container and check api calls
self.server.stop()
self.server_docker_mock.assert_server_process_terminate_called()
# Create local server which runs triton as a subprocess
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
# Check that API functions are called
self.server.start()
self.server_local_mock.assert_server_process_start_called_with(cmd=[
TRITON_LOCAL_BIN_PATH, '--model-repository', MODEL_REPOSITORY_PATH
])
self.server.stop()
self.server_local_mock.assert_server_process_terminate_called()
def test_get_logs(self):
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
gpus = ['all']
# Check docker server logs
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
self.server.start()
self.server.stop()
self.server_docker_mock.assert_server_process_terminate_called()
self.assertEqual(self.server.logs(), "Triton Server Test Log")
# Create local server logs
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
self.server.start()
self.server.stop()
self.server_local_mock.assert_server_process_terminate_called()
self.assertEqual(self.server.logs(), "Triton Server Test Log")
def test_cpu_stats(self):
server_config = TritonServerConfig()
server_config['model-repository'] = MODEL_REPOSITORY_PATH
gpus = ['all']
# Test local server cpu_stats
self.server = TritonServerFactory.create_server_local(
path=TRITON_LOCAL_BIN_PATH, config=server_config)
self.server.start()
_, _ = self.server.cpu_stats()
self.server_local_mock.assert_cpu_stats_called()
self.server.stop()
# Test docker server cpu stats
self.server = TritonServerFactory.create_server_docker(
image=TRITON_IMAGE, config=server_config, gpus=gpus)
self.server.start()
# The following needs to be called as it resets exec_run return value
self.server_docker_mock.assert_server_process_start_called_with(
TRITON_DOCKER_BIN_PATH + ' ' + server_config.to_cli_string(),
MODEL_REPOSITORY_PATH, TRITON_IMAGE, 8000, 8001, 8002)
_, _ = self.server.cpu_stats()
self.server_docker_mock.assert_cpu_stats_called()
self.server.stop()
def tearDown(self):
# In case test raises exception
if self.server is not None:
self.server.stop()
# Stop mocking
self.server_docker_mock.stop()
self.server_local_mock.stop()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Jishnu70055/ev_gn",
"score": 2
} |
#### File: doctype/trip_sheet/trip_sheet.py
```python
from os import name
import frappe
from frappe.model.document import Document
def create_sales_invoice(self, data, gst_template):
if data.customer_rate_type == 'Rent':
rent = float(data.customer_rate/data.customer_quantity)
if gst_template == 'GST 5% - ET':
charge_type = "On Net Total"
account_head = "CGST - ET"
description = "CGST"
rate = 2.5
charge_type_2 = "On Net Total"
account_head_2 = "SGST - ET"
description_2 = "SGST"
rate_2 = 2.5
sales_invoice = frappe.get_doc({
"doctype":"Sales Invoice",
"naming_series" : "SRET-.YY.-",
"customer":data.customer,
"site":data.customer_site,
"trip_entry_date" : self.date ,
"customer_group":'All Customer Groups',
"no_of_trips": data.trip,
"vehicle_number": self.vehicle,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET",
"trip_id": self.name,
"taxes_and_charges": gst_template,
"bill_of_lading":data.bill_of_lading,
"invoice_no":data.invoice_no,
"dispatch_doc_no":data.dispatch_doc_no,
"taxes": [{
"charge_type": charge_type,
"account_head": account_head,
"description": description,
"rate": rate
},
{
"charge_type": charge_type_2,
"account_head": account_head_2,
"description": description_2,
"rate": rate_2
}]
})
sales_invoice.append("items",{
"item_code":data.item,
"qty": data.trip * data.customer_quantity,
"uom": data.uom,
"rate": rent,
"amount":data.trip * data.customer_amount,
})
sales_invoice.save()
for tax_data in sales_invoice.taxes:
if tax_data.account_head == "CGST - ET":
cgst_amount = tax_data.tax_amount
cgst_rate = tax_data.rate
cgst_total = tax_data.total
if tax_data.account_head == "SGST - ET":
sgst_amount = tax_data.tax_amount
sgst_rate = tax_data.rate
sgst_total = tax_data.total
delivery_challan = frappe.get_doc({
"doctype":"Delivery Challan",
"challan_date":sales_invoice.trip_entry_date,
"customer":data.customer,
"tax_invoice_number":data.invoice_no,
"vehicle_number":self.vehicle,
"site":data.customer_site,
"total_amount":sales_invoice.base_total,
"total_tax":sales_invoice.base_total_taxes_and_charges,
"total_value":sales_invoice.base_rounded_total,
"sales_taxes_and_charges": [{
"charge_type": charge_type,
"account_head": account_head,
"description": description,
"rate": rate,
"tax_amount":cgst_amount,
"total":cgst_total
},
{
"charge_type": charge_type_2,
"account_head": account_head_2,
"description": description_2,
"rate": rate_2,
"tax_amount":sgst_amount,
"total":sgst_total
}],
"delivery_challan_item":[{
"item":data.item,
"quantity":data.trip * data.customer_quantity,
"rate":rent,
"taxable_value":data.trip * data.customer_amount,
}]
})
delivery_challan.save()
# delivery_challan.append("items_1",{
# "item_code":data.item,
# "qty": data.trip * data.customer_quantity,
# "uom": data.uom,
# "rate": data.customer_rate,
# "amount":data.trip * data.customer_amount,
# })
# delivery_challan.append("sales_taxes_and_charges": {
# "charge_type": charge_type,
# "account_head": account_head,
# "description": description,
# "rate": rate
# },
# {
# "charge_type": charge_type_2,
# "account_head": account_head_2,
# "description": description_2,
# "rate": rate_2
# })
delivery_challan.save()
delivery_challan.challan_no = delivery_challan.name
delivery_challan.sales_invoice_id = sales_invoice.name
delivery_challan.save()
sales_invoice.challan_no = delivery_challan.name
sales_invoice.submit()
delivery_challan.submit()
return sales_invoice.name
else:
sales_invoice = frappe.get_doc({
"doctype":"Sales Invoice",
"customer":data.customer,
"site":data.customer_site,
"trip_entry_date" : self.date ,
"customer_group":'All Customer Groups',
"no_of_trips": data.trip,
"vehicle_number": self.vehicle,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET",
"trip_id": self.name,
})
sales_invoice.append("items",{
"item_code":data.item,
"qty": data.trip * data.customer_quantity,
"uom": data.uom,
"rate": rent,
"amount":data.trip * data.customer_amount,
})
sales_invoice.save()
sales_invoice.submit()
return sales_invoice.name
else :
if gst_template == 'GST 5% - ET':
charge_type = "On Net Total"
account_head = "CGST - ET"
description = "CGST"
rate = 2.5
charge_type_2 = "On Net Total"
account_head_2 = "SGST - ET"
description_2 = "SGST"
rate_2 = 2.5
sales_invoice = frappe.get_doc({
"doctype":"Sales Invoice",
"naming_series" : "SRET-.YY.-",
"customer":data.customer,
"site":data.customer_site,
"trip_entry_date" : self.date ,
"customer_group":'All Customer Groups',
"no_of_trips": data.trip,
"vehicle_number": self.vehicle,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET",
"trip_id": self.name,
"taxes_and_charges": gst_template,
"bill_of_lading":data.bill_of_lading,
"invoice_no":data.invoice_no,
"dispatch_doc_no":data.dispatch_doc_no,
"taxes": [{
"charge_type": charge_type,
"account_head": account_head,
"description": description,
"rate": rate
},
{
"charge_type": charge_type_2,
"account_head": account_head_2,
"description": description_2,
"rate": rate_2
}]
})
sales_invoice.append("items",{
"item_code":data.item,
"qty": data.trip * data.customer_quantity,
"uom": data.uom,
"rate": data.customer_rate,
"amount":data.trip * data.customer_amount,
})
sales_invoice.save()
for tax_data in sales_invoice.taxes:
if tax_data.account_head == "CGST - ET":
cgst_amount = tax_data.tax_amount
cgst_rate = tax_data.rate
cgst_total = tax_data.total
if tax_data.account_head == "SGST - ET":
sgst_amount = tax_data.tax_amount
sgst_rate = tax_data.rate
sgst_total = tax_data.total
delivery_challan = frappe.get_doc({
"doctype":"Delivery Challan",
"challan_date":sales_invoice.trip_entry_date,
"customer":data.customer,
"tax_invoice_number":data.invoice_no,
"vehicle_number":self.vehicle,
"site":data.customer_site,
"total_amount":sales_invoice.base_total,
"total_tax":sales_invoice.base_total_taxes_and_charges,
"total_value":sales_invoice.base_rounded_total,
"sales_taxes_and_charges": [{
"charge_type": charge_type,
"account_head": account_head,
"description": description,
"rate": rate,
"tax_amount":cgst_amount,
"total":cgst_total
},
{
"charge_type": charge_type_2,
"account_head": account_head_2,
"description": description_2,
"rate": rate_2,
"tax_amount":sgst_amount,
"total":sgst_total
}],
"delivery_challan_item":[{
"item":data.item,
"quantity":data.trip * data.customer_quantity,
"rate":data.customer_rate,
"taxable_value":data.trip * data.customer_amount,
}]
})
delivery_challan.save()
# delivery_challan.append("items_1",{
# "item_code":data.item,
# "qty": data.trip * data.customer_quantity,
# "uom": data.uom,
# "rate": data.customer_rate,
# "amount":data.trip * data.customer_amount,
# })
# delivery_challan.append("sales_taxes_and_charges": {
# "charge_type": charge_type,
# "account_head": account_head,
# "description": description,
# "rate": rate
# },
# {
# "charge_type": charge_type_2,
# "account_head": account_head_2,
# "description": description_2,
# "rate": rate_2
# })
delivery_challan.save()
delivery_challan.challan_no = delivery_challan.name
delivery_challan.sales_invoice_id = sales_invoice.name
delivery_challan.save()
sales_invoice.challan_no = delivery_challan.name
sales_invoice.submit()
delivery_challan.submit()
return sales_invoice.name
else:
sales_invoice = frappe.get_doc({
"doctype":"Sales Invoice",
"customer":data.customer,
"site":data.customer_site,
"trip_entry_date" : self.date ,
"customer_group":'All Customer Groups',
"no_of_trips": data.trip,
"vehicle_number": self.vehicle,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET",
"trip_id": self.name,
})
sales_invoice.append("items",{
"item_code":data.item,
"qty": data.trip * data.customer_quantity,
"uom": data.uom,
"rate": data.customer_rate,
"amount":data.trip * data.customer_amount,
})
sales_invoice.save()
sales_invoice.submit()
return sales_invoice.name
def create_purchase_invoice(supplier, site, rate, quantity, amount, trip, date, item, uom, vehicle, name):
purchase_invoice = frappe.get_doc({
"doctype":"Purchase Invoice",
"supplier": supplier,
"supplier_site": site,
"date": date,
"total": trip * amount,
"no_of_trips": trip,
"trip_entry_date" : date,
"vehicle": vehicle,
"cost_center": "Vehicle - ET",
"paid_amount": trip * amount,
"trip_id": name
})
purchase_invoice.append("items",{
"item_code": item,
"qty":trip * quantity,
"rate":rate,
"amount":trip * amount,
"uom": uom
})
purchase_invoice.submit()
return purchase_invoice.name
def create_payment_entry(self, data, sales_invoice, amount, mode):
payment_entry = frappe.get_doc({
"doctype": "Payment Entry",
"mode_of_payment": mode,
"party_type": "Customer",
"party": data.customer,
"paid_to": "Cash - ET",
"paid_amount": amount,
"received_amount": amount,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET"
})
payment_entry.append("references",{
"reference_doctype": "Sales Invoice",
"reference_name": sales_invoice,
"allocated_amount": amount
})
payment_entry.insert()
payment_entry.submit()
return payment_entry
def create_expense(data, self):
bata_amount = data.bata_amount * data.trip
driver = frappe.get_doc('Driver',data.driver)
driver_employee = driver.employee
if bata_amount != 0 :
expense = frappe.get_doc({
'doctype': 'Journal Entry',
'posting_date': self.date,
"accounts":[
{
"account": "<NAME> - ET",
"party_type" : "Employee",
"party" : driver_employee,
"credit_in_account_currency": bata_amount,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET"
},
{
"account": "Transit Charge - ET",
"debit_in_account_currency": bata_amount,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET"
}
]
})
expense.insert()
expense.submit()
def calculate_net_balance(self, data):
vehicle = frappe.get_doc('Vehicle', self.vehicle)
for row in vehicle.vehicle_owner:
share_amount_trips = data.net_total * data.trip
share_amount = share_amount_trips * row.share_percentage / 100
journal_entry = frappe.get_doc({
'doctype': 'Journal Entry',
'posting_date': self.date,
"accounts":[
{
"account": "Cost of Vehicle Rent - ET",
"debit_in_account_currency": share_amount,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET"
},
{
"account": "Vehicle Owners - ET",
"party_type": "Supplier",
"party": row.share_holder,
"credit_in_account_currency": share_amount,
"vehicle": self.vehicle,
"cost_center": "Vehicle - ET"
}
]
})
journal_entry.insert()
journal_entry.submit()
class TripSheet(Document):
# calculating total balance of vehicle
def validate(self):
self.total = 0
for row in self.trip_details:
self.total = self.total + row.net_total
if row.supplier_partner_amount:
row.total_supplier_amount = row.supplier_partner_amount + row.supplier_amount
def before_submit(self):
for data in self.trip_details:
if data.gst_percentage == 5:
gst_template = "GST 5% - ET"
sales_invoice = create_sales_invoice(self, data, gst_template)
else:
gst_template = None
sales_invoice = create_sales_invoice(self, data, gst_template)
data.sales_invoice_id = sales_invoice
purchase_invoice = create_purchase_invoice(data.supplier, data.supplier_site, data.supplier_rate, data.supplier_quantity, data.supplier_amount, data.trip, self.date, data.item, data.uom, self.vehicle, self.name)
data.purchase_invoice_id = purchase_invoice
if data.supplier_partner:
purchase_invoice_partner = create_purchase_invoice(data.supplier_partner, data.supplier_site, data.supplier_partner_rate, data.supplier_partner_quantity, data.supplier_partner_amount, data.trip, self.date, data.item, data.uom, self.vehicle, self.name)
data.partner_purchase_invoice_id = purchase_invoice_partner
if data.paid_amount:
amount_paid = data.paid_amount
payment_mode = data.payment_method
payment_entry = create_payment_entry(self, data, sales_invoice, amount_paid, payment_mode)
expense = create_expense(data, self)
balance = calculate_net_balance(self, data)
``` |
{
"source": "jishnu7/js.io",
"score": 2
} |
#### File: tests/functional/test_compile_hookbox.py
```python
import os
from subprocess import PIPE
from subprocess import Popen
from pyjsiocompile import compile
class TestGetSource(object):
def setup(self):
self.compile_args = ['tests/data/hookbox.pkg', '--vv',
'-e', 'node',
'-o', 'tests/data/hookbox.compiled.js', '-d']
self.smoke_test_script_path = 'tests/data/exercise_hookbox.js'
def teardown(self):
if os.path.exists(self.smoke_test_script_path):
os.remove(self.smoke_test_script_path)
def test_hookbox_no_smoke_for_remote_jsio(self):
compile.main(self.compile_args)
hookbox_exercising_js = """
var sys = require('sys');
require.paths.push('tests/data');
require('hookbox.compiled');
"""
smoke_test_script_file = file(self.smoke_test_script_path, 'w')
smoke_test_script_file.write(hookbox_exercising_js)
smoke_test_script_file.close()
run_command = ['node', self.smoke_test_script_path]
expected_result = ""
output, error = \
Popen(run_command, stdout=PIPE, stderr=PIPE).communicate()
assert not error, error
assert expected_result == output, repr(output)
def test_hookbox_no_smoke_for_local_jsio(self):
compile.main(self.compile_args + ['-j', 'tests/data/jsio'])
hookbox_exercising_js = """
var sys = require('sys');
require.paths.push('tests/data');
require('hookbox.compiled');
"""
smoke_test_script_file = file(self.smoke_test_script_path, 'w')
smoke_test_script_file.write(hookbox_exercising_js)
smoke_test_script_file.close()
run_command = ['node', self.smoke_test_script_path]
expected_result = ""
output, error = \
Popen(run_command, stdout=PIPE, stderr=PIPE).communicate()
print output
assert not error, error
assert expected_result == output, repr(output)
def test_hookbox_bad_import_raises_error(self):
compile.main(self.compile_args)
hookbox_exercising_js = """
require('nonexistent.js')
"""
smoke_test_script_file = file(self.smoke_test_script_path, 'w')
smoke_test_script_file.write(hookbox_exercising_js)
smoke_test_script_file.close()
run_command = ['node', self.smoke_test_script_path]
expected_result = ""
output, error = \
Popen(run_command, stdout=PIPE, stderr=PIPE).communicate()
assert error
```
#### File: pyjsiocompile/tests/test_main.py
```python
import os
import mock
from pyjsiocompile import compile
class TestMain(object):
def setup(self):
self.jsio_path = \
os.path.join(os.path.dirname(__file__), 'data', 'hookbox.pkg')
self.old_compile_source = compile.compile_source
compile.compile_source = mock.Mock()
def teardown(self):
compile.compile_source = self.old_compile_source
def test_invalid_position_arguments(self):
try:
compile.main([])
raise Exception("an exception should have been raised")
except SystemExit, exc:
assert "1" == str(exc), str(exc)
def test_valid_position_arguments(self):
compile.compile_source.return_value = "xxxx"
compile.main([self.jsio_path])
assert compile.compile_source.called
def test_path_for_jsio_js(self):
class Options(object):
jsio = 'jsio'
assert 'jsio/jsio.js' == compile.path_for_module('jsio',
prefix='jsio'), \
compile.path_for_module('jsio', prefix='jsio')
def test_path_for_jsio_csp_client_js(self):
class Options(object):
jsio = 'jsio'
assert 'jsio/csp/client.js' == \
compile.path_for_module('jsio.csp.client', prefix='jsio'), \
compile.path_for_module('jsio', prefix='jsio')
def test_join_module_path_jsio_csp_client_jsio(self):
module_path = compile.joinModulePath('jsio.csp.client', 'jsio')
assert 'jsio' == module_path, module_path
def test_join_module_path_jsio_csp_client_full_path(self):
module_path = compile.joinModulePath('jsio', 'jsio.csp.client')
assert 'jsio.csp.client' == module_path, module_path
def test_join_module_path_dots_and_external_pkg(self):
module_path = compile.joinModulePath('jsio.csp.client', '..utf8')
assert 'jsio.utf8' == module_path, module_path
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.