metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JDESLOIRES/eo-flow",
"score": 2
}
|
#### File: eoflow/base/custom_training.py
```python
import tensorflow as tf
from sklearn.utils import shuffle
import numpy as np
import pickle
import os
import time
from . import Configurable
# class BaseCustomModel(tf.keras.Model, Configurable):
@tf.function
def train_step(model,
optimizer,
loss,
metric,
train_ds):
# pb_i = Progbar(len(list(train_ds)), stateful_metrics='acc')
for x_batch_train, y_batch_train in train_ds: # tqdm
with tf.GradientTape() as tape:
y_preds = model(x_batch_train,
training=True)
cost = loss(y_batch_train, y_preds)
grads = tape.gradient(cost, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
metric.update_state(y_batch_train, y_preds)
# pb_i.add(1, values=[metric.result().numpy()])
# Function to run the validation step.
@tf.function
def val_step(model, val_ds, val_acc_metric):
for x, y in val_ds:
y_preds = model.call(x, training=False)
val_acc_metric.update_state(y, y_preds)
def training_loop(model,
x_train, y_train,
x_val, y_val,
batch_size,
num_epochs, optimizer,
path_directory,
saving_step=10,
loss=tf.keras.losses.Huber(),
metric=tf.keras.metric.MeanSquaredError(),
function=np.min):
train_acc_results, val_acc_results = ([np.inf] for i in range(2))
val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(8)
for epoch in range(num_epochs + 1):
x_train, y_train = shuffle(x_train, y_train)
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
train_step(model,
optimizer,
loss,
metric,
train_ds)
# End epoch
metric_result = metric.result().numpy()
train_acc_results.append(metric_result)
metric.reset_states()
if epoch % saving_step == 0:
val_step(model, val_ds, metric)
val_metric_result = metric.result().numpy()
print(
"Epoch {0}: Train metric {1}, Val metric {2}".format(
str(epoch),
str(metric_result),
str(round(val_metric_result, 4)),
))
if val_metric_result < function(val_acc_results):
model.save_weights(os.path.join(path_directory, 'model'))
val_acc_results.append(val_metric_result)
metric.reset_states()
# History of the training
losses = dict(train_loss_results=metric,
val_acc_results=val_metric_result
)
with open(os.path.join(path_directory, 'history.pickle'), 'wb') as d:
pickle.dump(losses, d, protocol=pickle.HIGHEST_PROTOCOL)
```
#### File: eoflow/models/layers.py
```python
import tensorflow as tf
from tensorflow.keras.layers import Activation, SpatialDropout1D, Lambda, UpSampling2D, AveragePooling2D
from tensorflow.keras.layers import Conv1D, BatchNormalization, LayerNormalization
class ResidualBlock(tf.keras.layers.Layer):
""" Code taken from keras-tcn implementation on available on
https://github.com/philipperemy/keras-tcn/blob/master/tcn/tcn.py#L140 """
def __init__(self,
dilation_rate,
nb_filters,
kernel_size,
padding,
activation='relu',
dropout_rate=0,
kernel_initializer='he_normal',
kernel_regularizer = 0,
use_batch_norm=False,
use_layer_norm=False,
last_block=True,
**kwargs):
""" Defines the residual block for the WaveNet TCN
:param dilation_rate: The dilation power of 2 we are using for this residual block
:param nb_filters: The number of convolutional filters to use in this block
:param kernel_size: The size of the convolutional kernel
:param padding: The padding used in the convolutional layers, 'same' or 'causal'.
:param activation: The final activation used in o = Activation(x + F(x))
:param dropout_rate: Float between 0 and 1. Fraction of the input units to drop.
:param kernel_initializer: Initializer for the kernel weights matrix (Conv1D).
:param use_batch_norm: Whether to use batch normalization in the residual layers or not.
:param use_layer_norm: Whether to use layer normalization in the residual layers or not.
:param last_block: Whether to add a residual connection to the convolution layer or not.
:param kwargs: Any initializers for Layer class.
"""
self.dilation_rate = dilation_rate
self.nb_filters = nb_filters
self.kernel_size = kernel_size
self.padding = padding
self.activation = activation
self.dropout_rate = dropout_rate
self.use_batch_norm = use_batch_norm
self.use_layer_norm = use_layer_norm
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
self.last_block = last_block
self.residual_layers = []
self.shape_match_conv = None
self.res_output_shape = None
self.final_activation = None
super(ResidualBlock, self).__init__(**kwargs)
def _add_and_activate_layer(self, layer):
"""Helper function for building layer
Args:
layer: Appends layer to internal layer list and builds it based on the current output
shape of ResidualBlocK. Updates current output shape.
"""
self.residual_layers.append(layer)
self.residual_layers[-1].build(self.res_output_shape)
self.res_output_shape = self.residual_layers[-1].compute_output_shape(self.res_output_shape)
def build(self, input_shape):
with tf.keras.backend.name_scope(self.name): # name scope used to make sure weights get unique names
self.res_output_shape = input_shape
for k in range(2):
name = f'conv1D_{k}'
with tf.keras.backend.name_scope(name): # name scope used to make sure weights get unique names
self._add_and_activate_layer(Conv1D(filters=self.nb_filters,
kernel_size=self.kernel_size,
dilation_rate=self.dilation_rate,
padding=self.padding,
name=name,
kernel_regularizer = tf.keras.regularizers.l2(self.kernel_regularizer),
kernel_initializer=self.kernel_initializer))
if self.use_batch_norm:
self._add_and_activate_layer(BatchNormalization())
elif self.use_layer_norm:
self._add_and_activate_layer(LayerNormalization())
self._add_and_activate_layer(SpatialDropout1D(rate=self.dropout_rate))
self._add_and_activate_layer(Activation('relu'))
if not self.last_block:
# 1x1 conv to match the shapes (channel dimension).
name = f'conv1D_{k+1}'
with tf.keras.backend.name_scope(name):
# make and build this layer separately because it directly uses input_shape
self.shape_match_conv = Conv1D(filters=self.nb_filters,
kernel_size=1,
padding='same',
name=name,
kernel_regularizer = tf.keras.regularizers.l2(self.kernel_regularizer),
kernel_initializer=self.kernel_initializer)
else:
self.shape_match_conv = Lambda(lambda x: x, name='identity')
self.shape_match_conv.build(input_shape)
self.res_output_shape = self.shape_match_conv.compute_output_shape(input_shape)
self.final_activation = Activation(self.activation)
self.final_activation.build(self.res_output_shape) # probably isn't necessary
# this is done to force keras to add the layers in the list to self._layers
for layer in self.residual_layers:
self.__setattr__(layer.name, layer)
super(ResidualBlock, self).build(input_shape) # done to make sure self.built is set True
def call(self, inputs, training=None):
"""
Returns: A tuple where the first element is the residual model tensor, and the second
is the skip connection tensor.
"""
x = inputs
for layer in self.residual_layers:
if isinstance(layer, SpatialDropout1D):
x = layer(x, training=training)
else:
x = layer(x)
x2 = self.shape_match_conv(inputs)
res_x = tf.keras.layers.add([x2, x])
return [self.final_activation(res_x), x]
def compute_output_shape(self, input_shape):
return [self.res_output_shape, self.res_output_shape]
class Conv2D(tf.keras.layers.Layer):
""" Multiple repetitions of 2d convolution, batch normalization and dropout layers. """
def __init__(self, filters, kernel_size=3,
strides=1, dilation=1, padding='VALID',
add_dropout=True,
dropout_rate=0.2, activation='relu', batch_normalization=False, use_bias=True, num_repetitions=1):
super().__init__()
repetitions = []
for _ in range(num_repetitions):
layer = [
tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
dilation_rate=dilation,
padding=padding,
use_bias=use_bias,
activation=activation,
)
]
if batch_normalization:
layer.append(tf.keras.layers.BatchNormalization())
if add_dropout:
layer.append(tf.keras.layers.Dropout(rate=dropout_rate))
layer = tf.keras.Sequential(layer)
repetitions.append(layer)
self.combined_layer = tf.keras.Sequential(repetitions)
def call(self, inputs, training=False):
return self.combined_layer(inputs, training=training)
class ResConv2D(tf.keras.layers.Layer):
"""
Layer of N residual convolutional blocks stacked in parallel
This layer stacks in parallel a sequence of 2 2D convolutional layers and returns the addition of their output
feature tensors with the input tensor. N number of convolutional blocks can be added together with different kernel
size and dilation rate, which are specified as a list. If the inputs are not a list, the same parameters are used
for all convolutional blocks.
"""
def __init__(self, filters, kernel_size=3, strides=1, dilation=1, padding='VALID', add_dropout=True,
dropout_rate=0.2, activation='relu', use_bias=True, batch_normalization=False, num_parallel=1):
super().__init__()
if isinstance(kernel_size, list) and len(kernel_size) != num_parallel:
raise ValueError('Number of specified kernel sizes needs to match num_parallel')
if isinstance(dilation, list) and len(dilation) != num_parallel:
raise ValueError('Number of specified dilation rate sizes needs to match num_parallel')
kernel_list = kernel_size if isinstance(kernel_size, list) else [kernel_size]*num_parallel
dilation_list = dilation if isinstance(dilation, list) else [dilation]*num_parallel
self.convs = [Conv2D(filters,
kernel_size=k,
strides=strides,
dilation=d,
padding=padding,
activation=activation,
add_dropout=add_dropout,
dropout_rate=dropout_rate,
use_bias=use_bias,
batch_normalization=batch_normalization,
num_repetitions=2) for k, d in zip(kernel_list, dilation_list)]
self.add = tf.keras.layers.Add()
def call(self, inputs, training=False):
outputs = [conv_layer(inputs, training=training) for conv_layer in self.convs]
return self.add(outputs + [inputs])
class Conv3D(tf.keras.layers.Layer):
""" Multiple repetitions of 3d convolution, batch normalization and dropout layers. """
def __init__(self, filters, kernel_size=3, strides=1, padding='VALID', add_dropout=True, dropout_rate=0.2,
batch_normalization=False, use_bias=True, num_repetitions=1, convolve_time=True):
super().__init__()
repetitions = []
t_size = kernel_size if convolve_time else 1
kernel_shape = (t_size, kernel_size, kernel_size)
for _ in range(num_repetitions):
layer = [
tf.keras.layers.Conv3D(
filters=filters,
kernel_size=kernel_shape,
strides=strides,
padding=padding,
use_bias=use_bias,
activation='relu',
)
]
if batch_normalization:
layer.append(tf.keras.layers.BatchNormalization())
if add_dropout:
layer.append(tf.keras.layers.Dropout(rate=dropout_rate))
layer = tf.keras.Sequential(layer)
repetitions.append(layer)
self.combined_layer = tf.keras.Sequential(repetitions)
def call(self, inputs, training=False):
return self.combined_layer(inputs, training=training)
class Deconv2D(tf.keras.layers.Layer):
""" 2d transpose convolution with optional batch normalization. """
def __init__(self, filters, kernel_size=2, batch_normalization=False):
super().__init__()
layer = [tf.keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=kernel_size,
padding='SAME',
activation='relu'
)]
if batch_normalization:
layer.append(tf.keras.layers.BatchNormalization())
self.layer = tf.keras.Sequential(layer)
def call(self, inputs, training=None):
return self.layer(inputs, training=training)
class CropAndConcat(tf.keras.layers.Layer):
""" Layer that crops the first tensor and concatenates it with the second. Used for skip connections. """
@staticmethod
def call(x1, x2):
# Crop x1 to shape of x2
x2_shape = tf.shape(x2)
x1_crop = tf.image.resize_with_crop_or_pad(x1, x2_shape[1], x2_shape[2])
# Concatenate along last dimension and return
return tf.concat([x1_crop, x2], axis=-1)
class MaxPool3D(tf.keras.layers.Layer):
def __init__(self, kernel_size=2, strides=2, pool_time=False):
super().__init__()
tsize = kernel_size if pool_time else 1
tstride = strides if pool_time else 1
kernel_shape = (tsize, kernel_size, kernel_size)
strides = (tstride, strides, strides)
self.layer = tf.keras.layers.MaxPool3D(
pool_size=kernel_shape,
strides=strides,
padding='SAME'
)
def call(self, inputs, training=None):
return self.layer(inputs, training=training)
class Reduce3DTo2D(tf.keras.layers.Layer):
""" Reduces 3d representations into 2d using 3d convolution over the whole time dimension. """
def __init__(self, filters, kernel_size=3, stride=1, add_dropout=False, dropout_rate=0.2):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.add_dropout = add_dropout
self.dropout_rate = dropout_rate
self.layer = None
def build(self, input_size):
t_size = input_size[1]
layer = [tf.keras.layers.Conv3D(
self.filters,
kernel_size=(t_size, self.kernel_size, self.kernel_size),
strides=(1, self.stride, self.stride),
padding='VALID',
activation='relu'
)]
if self.add_dropout:
layer.append(tf.keras.layers.Dropout(rate=self.dropout_rate))
self.layer = tf.keras.Sequential(layer)
def call(self, inputs, training=None):
r = self.layer(inputs, training=training)
# Squeeze along temporal dimension
return tf.squeeze(r, axis=[1])
class PyramidPoolingModule(tf.keras.layers.Layer):
"""
Implementation of the Pyramid Pooling Module
Implementation taken from the following paper
Zhao et al. - Pyramid Scene Parsing Network - https://arxiv.org/pdf/1612.01105.pdf
PyTorch implementation https://github.com/hszhao/semseg/blob/master/model/pspnet.py
"""
def __init__(self, filters, bins=(1, 2, 4, 8), interpolation='bilinear', batch_normalization=False):
super().__init__()
self.filters = filters
self.bins = bins
self.batch_normalization = batch_normalization
self.interpolation = interpolation
self.layers = None
def build(self, input_size):
_, height, width, n_features = input_size
layers = []
for bin_size in self.bins:
size_factors = height // bin_size, width // bin_size
layer = tf.keras.Sequential()
layer.add(AveragePooling2D(pool_size=size_factors,
padding='same'))
layer.add(tf.keras.layers.Conv2D(filters=self.filters//len(self.bins),
kernel_size=1,
padding='same',
use_bias=False))
if self.batch_normalization:
layer.add(BatchNormalization())
layer.add(Activation('relu'))
layer.add(UpSampling2D(size=size_factors, interpolation=self.interpolation))
layers.append(layer)
self.layers = layers
def call(self, inputs, training=None):
""" Concatenate the output of the pooling layers, resampled to original size """
_, height, width, _ = inputs.shape
outputs = [inputs]
outputs += [layer(inputs, training=training) for layer in self.layers]
return tf.concat(outputs, axis=-1)
```
#### File: models/tempnets_task/cnn_tempnets.py
```python
import logging
import tensorflow as tf
from marshmallow import fields
from marshmallow.validate import OneOf
from tensorflow.keras.layers import Dense
from tensorflow.python.keras.utils.layer_utils import print_summary
from eoflow.models.layers import ResidualBlock
from eoflow.models.tempnets_task.tempnets_base import BaseTempnetsModel, BaseCustomTempnetsModel, BaseModelAdapt
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class TCNModel(BaseCustomTempnetsModel):
""" Implementation of the TCN network taken form the keras-TCN implementation
https://github.com/philipperemy/keras-tcn
"""
class TCNModelSchema(BaseTempnetsModel._Schema):
keep_prob = fields.Float(required=True, description='Keep probability used in dropout tf.keras.layers.', example=0.5)
kernel_size = fields.Int(missing=2, description='Size of the convolution kernels.')
nb_filters = fields.Int(missing=64, description='Number of convolutional filters.')
nb_conv_stacks = fields.Int(missing=1)
dilations = fields.List(fields.Int, missing=[1, 2, 4, 8, 16, 32], description='Size of dilations used in the '
'covolutional tf.keras.layers')
padding = fields.String(missing='CAUSAL', validate=OneOf(['CAUSAL', 'SAME']),
description='Padding type used in convolutions.')
use_skip_connections = fields.Bool(missing=True, description='Flag to whether to use skip connections.')
return_sequences = fields.Bool(missing=False, description='Flag to whether return sequences or not.')
activation = fields.Str(missing='linear', description='Activation function used in final filters.')
kernel_initializer = fields.Str(missing='he_normal', description='method to initialise kernel parameters.')
kernel_regularizer = fields.Float(missing=0, description='L2 regularization parameter.')
batch_norm = fields.Bool(missing=False, description='Whether to use batch normalisation.')
layer_norm = fields.Bool(missing=False, description='Whether to use layer normalisation.')
def _cnn_layer(self, net):
dropout_rate = 1 - self.config.keep_prob
layer = tf.keras.layers.Conv1D(filters= self.config.nb_filters,
kernel_size=self.config.kernel_size,
padding=self.config.padding,
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net)
if self.config.batch_norm:
layer = tf.keras.layers.BatchNormalization(axis=-1)(layer)
layer = tf.keras.layers.Dropout(dropout_rate)(layer)
layer = tf.keras.layers.Activation(self.config.activation)(layer)
return layer
def build(self, inputs_shape):
""" Build TCN architecture
The `inputs_shape` argument is a `(N, T, D)` tuple where `N` denotes the number of samples, `T` the number of
time-frames, and `D` the number of channels
"""
x = tf.keras.layers.Input(inputs_shape[1:])
dropout_rate = 1 - self.config.keep_prob
net = x
net = self._cnn_layer(net)
# list to hold all the member ResidualBlocks
residual_blocks = []
skip_connections = []
total_num_blocks = self.config.nb_conv_stacks * len(self.config.dilations)
if not self.config.use_skip_connections:
total_num_blocks += 1 # cheap way to do a false case for below
for _ in range(self.config.nb_conv_stacks):
for d in self.config.dilations:
net, skip_out = ResidualBlock(dilation_rate=d,
nb_filters=self.config.nb_filters,
kernel_size=self.config.kernel_size,
padding=self.config.padding,
activation=self.config.activation,
dropout_rate=dropout_rate,
use_batch_norm=self.config.batch_norm,
use_layer_norm=self.config.layer_norm,
kernel_initializer=self.config.kernel_initializer,
last_block=len(residual_blocks) + 1 == total_num_blocks,
name=f'residual_block_{len(residual_blocks)}')(net)
residual_blocks.append(net)
skip_connections.append(skip_out)
# Author: @karolbadowski.
output_slice_index = int(net.shape.as_list()[1] / 2) \
if self.config.padding.lower() == 'same' else -1
lambda_layer = tf.keras.layers.Lambda(lambda tt: tt[:, output_slice_index, :])
if self.config.use_skip_connections:
net = tf.keras.layers.add(skip_connections)
if not self.config.return_sequences:
net = lambda_layer(net)
net = tf.keras.layers.Dense(1, activation='linear')(net)
self.net = tf.keras.Model(inputs=x, outputs=net)
#print_summary(self.net)
def call(self, inputs, training=None):
return self.net(inputs, training)
class TempCNNModel(BaseCustomTempnetsModel,BaseModelAdapt):
""" Implementation of the TempCNN network taken from the temporalCNN implementation
https://github.com/charlotte-pel/temporalCNN
"""
class TempCNNModelSchema(BaseCustomTempnetsModel._Schema):
keep_prob = fields.Float(required=True, description='Keep probability used in dropout tf.keras.layers.',
example=0.5)
kernel_size = fields.Int(missing=5, description='Size of the convolution kernels.')
nb_conv_filters = fields.Int(missing=16, description='Number of convolutional filters.')
nb_conv_stacks = fields.Int(missing=3, description='Number of convolutional blocks.')
n_strides = fields.Int(missing=1, description='Value of convolutional strides.')
nb_fc_neurons = fields.Int(missing=256, description='Number of Fully Connect neurons.')
nb_fc_stacks = fields.Int(missing=1, description='Number of fully connected tf.keras.layers.')
fc_activation = fields.Str(missing='relu', description='Activation function used in final FC tf.keras.layers.')
emb_layer = fields.String(missing='Flatten', validate=OneOf(['Flatten', 'GlobalAveragePooling1D', 'GlobalMaxPooling1D']),
description='Final layer after the convolutions.')
padding = fields.String(missing='SAME', validate=OneOf(['SAME','VALID', 'CAUSAL']),
description='Padding type used in convolutions.')
activation = fields.Str(missing='relu', description='Activation function used in final filters.')
n_classes = fields.Int(missing=1, description='Number of classes')
output_activation = fields.String(missing='linear', description='Output activation')
residual_block = fields.Bool(missing=False, description= 'Add residual block')
kernel_initializer = fields.Str(missing='he_normal', description='Method to initialise kernel parameters.')
kernel_regularizer = fields.Float(missing=1e-6, description='L2 regularization parameter.')
enumerate = fields.Bool(missing=False, description='Increase number of filters across convolution')
str_inc = fields.Bool(missing=False, description='Increase strides')
ker_inc = fields.Bool(missing=False, description='Increase kernels')
ker_dec = fields.Bool(missing=False, description='Decrease kernels')
fc_dec = fields.Bool(missing=False, description='Decrease dense neurons')
multioutput = fields.Bool(missing=False, description='Decrease dense neurons')
batch_norm = fields.Bool(missing=True, description='Whether to use batch normalisation.')
def _cnn_layer(self, net, i = 0, first = False):
dropout_rate = 1 - self.config.keep_prob
filters = self.config.nb_conv_filters
kernel_size = self.config.kernel_size
n_strides = self.config.n_strides
if self.config.enumerate:
filters = filters * (2**i)
if self.config.ker_inc:
kernel_size = kernel_size * (i+1)
if self.config.ker_dec:
kernel_size = self.config.kernel_size // (i+1)
if kernel_size ==0: kernel_size += 1
if self.config.str_inc:
n_strides = 1 if first else 2
layer = tf.keras.layers.Conv1D(filters=filters,
kernel_size=kernel_size,
strides=n_strides,
padding=self.config.padding,
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net)
if self.config.batch_norm:
layer = tf.keras.layers.BatchNormalization(axis=-1)(layer)
layer = tf.keras.layers.Dropout(dropout_rate)(layer)
layer = tf.keras.layers.Activation(self.config.activation)(layer)
return layer
def _embeddings(self,net):
name = "embedding"
if self.config.emb_layer == 'Flatten':
net = tf.keras.layers.Flatten(name=name)(net)
elif self.config.emb_layer == 'GlobalAveragePooling1D':
net = tf.keras.layers.GlobalAveragePooling1D(name=name)(net)
elif self.config.emb_layer == 'GlobalMaxPooling1D':
net = tf.keras.layers.GlobalMaxPooling1D(name=name)(net)
return net
def _fcn_layer(self, net, i=0):
dropout_rate = 1 - self.config.keep_prob
nb_neurons = self.config.nb_fc_neurons
if self.config.fc_dec:
nb_neurons /= 2**i
layer_fcn = Dense(units=nb_neurons,
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net)
if self.config.batch_norm:
layer_fcn = tf.keras.layers.BatchNormalization(axis=-1)(layer_fcn)
layer_fcn = tf.keras.layers.Dropout(dropout_rate)(layer_fcn)
if self.config.fc_activation:
layer_fcn = tf.keras.layers.Activation(self.config.fc_activation)(layer_fcn)
return layer_fcn
def build(self, inputs_shape):
""" Build TCN architecture
The `inputs_shape` argument is a `(N, T, D)` tuple where `N` denotes the number of samples, `T` the number of
time-frames, and `D` the number of channels
"""
x = tf.keras.layers.Input(inputs_shape[1:])
net = x
net = self._cnn_layer(net, 0, first = True)
for i, _ in enumerate(range(self.config.nb_conv_stacks-1)):
net = self._cnn_layer(net, i+1)
embedding = self._embeddings(net)
net_mean = self._fcn_layer(embedding)
for i in range(1, self.config.nb_fc_stacks):
net_mean = self._fcn_layer(net_mean, i)
output = Dense(units = self.config.n_classes,
activation = self.config.output_activation,
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net_mean)
if self.config.multioutput or self.config.loss in ['gaussian', 'laplacian']:
net_std = self._fcn_layer(embedding)
for i in range(1, self.config.nb_fc_stacks):
net_std = self._fcn_layer(net_std, i)
output_sigma = Dense(units=self.config.n_classes,
activation=self.config.output_activation,
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net_std)
self.net = tf.keras.Model(inputs=x, outputs=[output, output_sigma])
else:
self.net = tf.keras.Model(inputs=x, outputs=output)
print_summary(self.net)
def call(self, inputs, training=None):
return self.net(inputs, training)
def get_feature_map(self):
output_layer = self.net.tf.keras.layers[-(self.config.nb_fc_stacks * 4 + 2)]
return tf.keras.Model(
inputs=self.net.tf.keras.layers[0].input, outputs=output_layer.output
)
class BayesTempCNNModel(BaseCustomTempnetsModel):
""" Implementation of the TempCNN network taken from the temporalCNN implementation
https://github.com/charlotte-pel/temporalCNN
"""
class BayesTempCNNModel(BaseCustomTempnetsModel._Schema):
keep_prob = fields.Float(required=True, description='Keep probability used in dropout tf.keras.layers.', example=0.5)
kernel_size = fields.Int(missing=5, description='Size of the convolution kernels.')
nb_conv_filters = fields.Int(missing=16, description='Number of convolutional filters.')
nb_conv_stacks = fields.Int(missing=3, description='Number of convolutional blocks.')
n_strides = fields.Int(missing=1, description='Value of convolutional strides.')
nb_fc_neurons = fields.Int(missing=256, description='Number of Fully Connect neurons.')
nb_fc_stacks = fields.Int(missing=1, description='Number of fully connected tf.keras.layers.')
fc_activation = fields.Str(missing='relu', description='Activation function used in final FC tf.keras.layers.')
emb_layer = fields.String(missing='Flatten', validate=OneOf(['Flatten', 'GlobalAveragePooling1D', 'GlobalMaxPooling1D']),
description='Final layer after the convolutions.')
padding = fields.String(missing='SAME', validate=OneOf(['SAME','VALID', 'CAUSAL']),
description='Padding type used in convolutions.')
activation = fields.Str(missing='relu', description='Activation function used in final filters.')
n_classes = fields.Int(missing=1, description='Number of classes')
output_activation = fields.String(missing='linear', description='Output activation')
residual_block = fields.Bool(missing=False, description= 'Add residual block')
activity_regularizer = fields.Float(missing=1e-6, description='L2 regularization parameter.')
enumerate = fields.Bool(missing=False, description='Increase number of filters across convolution')
str_inc = fields.Bool(missing=False, description='Increase strides')
batch_norm = fields.Bool(missing=False, description='Whether to use batch normalisation.')
fc_dec = fields.Bool(missing=False, description='Decrease dense neurons')
ker_inc = fields.Bool(missing=False, description='Increase kernels')
ker_dec = fields.Bool(missing=False, description='Decreae kernels')
def _cnn_layer(self, net, i = 0, first = False):
dropout_rate = 1 - self.config.keep_prob
filters = self.config.nb_conv_filters
kernel_size = self.config.kernel_size
n_strides = self.config.n_strides
if self.config.enumerate:
filters = filters * (2**i)
if self.config.ker_inc:
kernel_size = kernel_size * (i+1)
if self.config.ker_dec:
kernel_size = self.config.kernel_size // (i+1)
if kernel_size ==0: kernel_size += 1
if self.config.str_inc:
n_strides = 1 if first else 2
layer = tfp.layers.Convolution1DReparameterization(filters=filters,
kernel_size=kernel_size,
strides=n_strides,
activation = self.config.activation,
activity_regularizer=tf.keras.regularizers.l2(self.config.activity_regularizer),
padding=self.config.padding)(net)
if self.config.batch_norm:
layer = tfp.bijectors.BatchNormalization()(layer)
layer = tf.keras.layers.Dropout(dropout_rate)(layer)
return layer
def _embeddings(self,net):
name = "embedding"
if self.config.emb_layer == 'Flatten':
net = tf.keras.layers.Flatten(name=name)(net)
elif self.config.emb_layer == 'GlobalAveragePooling1D':
net = tf.keras.layers.GlobalAveragePooling1D(name=name)(net)
elif self.config.emb_layer == 'GlobalMaxPooling1D':
net = tf.keras.layers.GlobalMaxPooling1D(name=name)(net)
return net
def _fcn_layer(self, net, i=0):
dropout_rate = 1 - self.config.keep_prob
nb_neurons = self.config.nb_fc_neurons
if self.config.fc_dec:
nb_neurons //= 2**i
layer_fcn = tfp.layers.DenseReparameterization(units=nb_neurons,
activation=self.config.activation,
activity_regularizer=tf.keras.regularizers.l2(self.config.activity_regularizer))(net)
if self.config.batch_norm:
layer_fcn = tfp.bijectors.BatchNormalization()(layer_fcn)
layer_fcn = tf.keras.layers.Dropout(dropout_rate)(layer_fcn)
#if self.config.fc_activation: layer_fcn = tf.keras.layers.Activation(self.config.fc_activation)(layer_fcn)
return layer_fcn
def build(self, inputs_shape):
""" Build TCN architecture
The `inputs_shape` argument is a `(N, T, D)` tuple where `N` denotes the number of samples, `T` the number of
time-frames, and `D` the number of channels
"""
x = tf.keras.layers.Input(inputs_shape[1:])
net = x
net = self._cnn_layer(net, 0, first = True)
for i, _ in enumerate(range(self.config.nb_conv_stacks-1)):
net = self._cnn_layer(net, i+1)
embedding = self._embeddings(net)
net_mean = self._fcn_layer(embedding)
net_std = self._fcn_layer(embedding)
for i in range(1, self.config.nb_fc_stacks):
net_mean = self._fcn_layer(net_mean, i)
output = tfp.layers.DenseReparameterization(units = self.config.n_classes,
activity_regularizer=tf.keras.regularizers.l2(
self.config.activity_regularizer),
activation = self.config.output_activation)(net_mean)
if self.config.loss in ['gaussian', 'laplacian']:
for i in range(1, self.config.nb_fc_stacks):
net_std = self._fcn_layer(net_std, i)
output_sigma = tfp.layers.DenseReparameterization(units=self.config.n_classes,
activity_regularizer=tf.keras.regularizers.l2(
self.config.activity_regularizer),
activation=self.config.output_activation)(net_std)
self.net = tf.keras.Model(inputs=x, outputs=[output, output_sigma])
else:
self.net = tf.keras.Model(inputs=x, outputs=output)
print_summary(self.net)
def call(self, inputs, training=None):
return self.net(inputs, training)
class HistogramCNNModel(BaseCustomTempnetsModel):
""" Implementation of the CNN2D with histogram time series
https://cs.stanford.edu/~ermon/papers/cropyield_AAAI17.pdf
https://github.com/JiaxuanYou/crop_yield_prediction/blob/master/3%20model/nnet_for_hist_dropout_stride.py
"""
class HistogramCNNModel(BaseCustomTempnetsModel._Schema):
keep_prob = fields.Float(required=True, description='Keep probability used in dropout tf.keras.layers.', example=0.5)
kernel_size = fields.List(fields.Int, missing=[3,3], description='Size of the convolution kernels.')
nb_conv_filters = fields.Int(missing=16, description='Number of convolutional filters.')
nb_conv_stacks = fields.Int(missing=3, description='Number of convolutional blocks.')
n_strides = fields.List(fields.Int, missing=[1, 1], description='Value of convolutional strides.')
nb_fc_neurons = fields.Int(missing=256, description='Number of Fully Connect neurons.')
nb_fc_stacks = fields.Int(missing=1, description='Number of fully connected tf.keras.layers.')
emb_layer = fields.String(missing='Flatten',
validate=OneOf(['Flatten', 'GlobalAveragePooling2D', 'GlobalMaxPooling2D']),
description='Final layer after the convolutions.')
padding = fields.String(missing='SAME', validate=OneOf(['SAME','VALID', 'CAUSAL']),
description='Padding type used in convolutions.')
activation = fields.Str(missing='relu', description='Activation function used in final filters.')
fc_activation = fields.Str(missing='relu', description='Activation function used in final FC tf.keras.layers.')
kernel_initializer = fields.Str(missing='he_normal', description='Method to initialise kernel parameters.')
kernel_regularizer = fields.Float(missing=1e-6, description='L2 regularization parameter.')
enumerate = fields.Bool(missing=False, description='Increase number of filters across convolution')
batch_norm = fields.Bool(missing=False, description='Whether to use batch normalisation.')
fc_dec = fields.Bool(missing=False, description='Decrease dense neurons')
ker_inc = fields.Bool(missing=False, description='Increase kernels')
ker_dec = fields.Bool(missing=False, description='Decrease kernels')
def _cnn_layer(self, net, i = 1, last = False):
dropout_rate = 1 - self.config.keep_prob
filters = self.config.nb_conv_filters
kernel_size = np.array(self.config.kernel_size)
n_strides = self.config.n_strides
if self.config.enumerate:
filters = filters * (2**i)
n_strides = 1 if not last else 2
if self.config.ker_inc:
kernel_size = kernel_size * (i+1)
if self.config.ker_dec:
kernel_size = kernel_size // (i+1)
if kernel_size[0] == 0:
kernel_size += 1
layer = tf.keras.layers.Conv2D(filters=filters,
kernel_size=list(kernel_size),
strides=n_strides,
padding=self.config.padding,
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net)
if self.config.batch_norm:
layer = tf.keras.layers.BatchNormalization(axis=-1)(layer)
layer = tf.keras.layers.Dropout(dropout_rate)(layer)
layer = tf.keras.layers.Activation(self.config.activation)(layer)
return layer
def _fcn_layer(self, net):
dropout_rate = 1 - self.config.keep_prob
layer_fcn = Dense(units=self.config.nb_fc_neurons,
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net)
if self.config.batch_norm:
layer_fcn = tf.keras.layers.BatchNormalization(axis=-1)(layer_fcn)
layer_fcn = tf.keras.layers.Dropout(dropout_rate)(layer_fcn)
if self.config.fc_activation:
layer_fcn = tf.keras.layers.Activation(self.config.fc_activation)(layer_fcn)
return layer_fcn
def _embeddings(self,net):
name = "embedding"
if self.config.emb_layer == 'Flatten':
net = tf.keras.layers.Flatten(name=name)(net)
elif self.config.emb_layer == 'GlobalAveragePooling2D':
net = tf.keras.layers.GlobalAveragePooling2D(name=name)(net)
elif self.config.emb_layer == 'GlobalMaxPooling2D':
net = tf.keras.layers.GlobalMaxPooling2D(name=name)(net)
return net
def build(self, inputs_shape):
""" Build TCN architecture
The `inputs_shape` argument is a `(N, T, D)` tuple where `N` denotes the number of samples, `T` the number of
time-frames, and `D` the number of channels
"""
x = tf.keras.layers.Input(inputs_shape[1:])
net = x
for i, _ in enumerate(range(self.config.nb_conv_stacks-1)):
net = self._cnn_layer(net, i)
net = self._cnn_layer(net, i, True)
#net = self._cnn_layer(net, self.config.nb_conv_stacks-1)
#net = self._cnn_layer(net, self.config.nb_conv_stacks-1)
net = self._cnn_layer(net, self.config.nb_conv_stacks-1, True)
net = self._embeddings(net)
for _ in range(self.config.nb_fc_stacks):
net = self._fcn_layer(net)
net = Dense(units = 1,
activation = 'linear',
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net)
self.net = tf.keras.Model(inputs=x, outputs=net)
print_summary(self.net)
def call(self, inputs, training=None):
return self.net(inputs, training)
def get_feature_map(self, inputs, training=None):
return self.backbone(inputs, training)
class InceptionCNN(BaseCustomTempnetsModel):
'''
https://github.com/hfawaz/InceptionTime
'''
class InceptionCNN(BaseCustomTempnetsModel._Schema):
keep_prob = fields.Float(required=True, description='Keep probability used in dropout tf.keras.layers.', example=0.5)
kernel_size = fields.Int(missing=5, description='Size of the convolution kernels.')
nb_conv_filters = fields.Int(missing=32, description='Number of convolutional filters.')
nb_conv_stacks = fields.Int(missing=3, description='Number of convolutional blocks.')
n_stride = fields.Int(missing=1, description='Value of convolutional strides.')
bottleneck_size = fields.Int(missing=32, description='Bottleneck size.')
use_residual = fields.Bool(missing=False,description='Use residuals.')
nb_fc_neurons = fields.Int(missing=256, description='Number of Fully Connect neurons.')
nb_fc_stacks = fields.Int(missing=1, description='Number of fully connected tf.keras.layers.')
padding = fields.String(missing='SAME', validate=OneOf(['SAME','VALID', 'CAUSAL']),
description='Padding type used in convolutions.')
fc_activation = fields.Str(missing='relu', description='Activation function used in final FC tf.keras.layers.')
kernel_initializer = fields.Str(missing='he_normal', description='Method to initialise kernel parameters.')
kernel_regularizer = fields.Float(missing=1e-6, description='L2 regularization parameter.')
use_bottleneck = fields.Bool(missing=True, description='use_bottleneck')
batch_norm = fields.Bool(missing=False, description='Whether to use batch normalisation.')
nb_class = fields.Int(missing=1, description='Number of class.')
output_activation = fields.Str(missing='linear', description='Output activation.')
n_stride = fields.Int(missing=1, description='Number of strides.')
def _inception_module(self, input_tensor, stride=1, activation='linear'):
if self.config.use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = tf.keras.layers.Conv1D(filters=self.config.bottleneck_size,
kernel_size=1,
padding='SAME',
activation=activation,
use_bias=False)(input_tensor)
else:
input_inception = input_tensor
kernel_size_s = [1,2,3]
#kernel_size_s = [5 // (2 ** i) for i in range(3)]
conv_list = [
tf.keras.layers.Conv1D(
filters=self.config.nb_conv_filters,
kernel_size=kernel_size_,
strides=stride,
padding='SAME',
activation=activation,
use_bias=False,
)(input_inception)
for kernel_size_ in kernel_size_s
]
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=3, strides=self.config.n_strides, padding='SAME')(input_tensor)
conv_6 = tf.keras.layers.Conv1D(filters=self.config.nb_conv_filters,
kernel_size=1,
padding='SAME',
activation=activation, use_bias=False)(max_pool_1)
conv_list.append(conv_6)
x = tf.keras.layers.Concatenate(axis=2)(conv_list)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(1-self.config.keep_prob)(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
def _shortcut_layer(self, input_tensor, out_tensor):
shortcut_y = tf.keras.layers.Conv1D(filters=int(out_tensor.shape[-1]),
kernel_size=1,
padding='SAME', use_bias=False)(input_tensor)
shortcut_y = tf.keras.layers.BatchNormalization()(shortcut_y)
shortcut_y = tf.keras.layers.Dropout(1 - self.config.keep_prob)(shortcut_y)
x = tf.keras.layers.Add()([shortcut_y, out_tensor])
x = tf.keras.layers.Activation('relu')(x)
return x
def build(self, input_shape):
input_layer = tf.keras.layers.Input(input_shape[1:])
input_res = input_layer
x = input_layer
for d in range(self.config.nb_conv_stacks):
x = self._inception_module(x)
if self.config.use_residual and d % 3 == 2:
x = self._shortcut_layer(input_res, x)
input_res = x
gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
output_layer = tf.keras.layers.Dense(self.config.nb_class,
activation=self.config.output_activation)(gap_layer)
self.net = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
print_summary(self.net)
def call(self, inputs, training=None):
return self.net(inputs, training)
class TransformerCNN(BaseCustomTempnetsModel):
""" Implementation of the Pixel-Set encoder + Temporal Attention Encoder sequence classifier
Code is based on the Pytorch implementation of <NAME> et al. https://github.com/VSainteuf/pytorch-psetae
"""
class TransformerCNN(BaseCustomTempnetsModel._Schema):
keep_prob = fields.Float(required=True, description='Keep probability used in dropout tf.keras.layers.',
example=0.5)
num_heads = fields.Int(missing=4, description='Number of Attention heads.')
head_size = fields.Int(missing=64, description='Size Attention heads.')
kernel_size = fields.Int(missing=5, description='Size of the convolution kernels.')
num_transformer_blocks = fields.Int(missing=4, description='Number of transformer blocks.')
ff_dim = fields.Int(missing=4, description='Number of feed-forward neurons in point-wise CNN.')
batch_norm = fields.Bool(missing=False,description='Use batch normalisation.')
n_conv = fields.Int(missing=3, description='Number of Attention heads.')
d_model = fields.Int(missing=None, description='Depth of model.')
mlp_units = fields.List(fields.Int, missing=[64], description='Number of units for each layer in mlp.')
mlp_dropout = fields.Float(required=True, description='Keep probability used in dropout MLP layers.',
example=0.5)
emb_layer = fields.Str(missing='Flatten', description='Embedding layer.')
output_activation = fields.Str(missing='linear', description='Output activation.')
n_classes = fields.Int(missing=1, description='# Classes.')
n_strides = fields.Int(missing=1, description='# strides.')
def transformer_encoder(self, inputs):
# Normalization and Attention
x = tf.keras.layers.LayerNormalization(epsilon=1e-6)(inputs)
x = tf.keras.layers.MultiHeadAttention(
key_dim=self.config.head_size,
num_heads=self.config.num_heads,
dropout=1-self.config.keep_prob)(x, x)
x = tf.keras.layers.Dropout(1-self.config.keep_prob)(x)
res = x + inputs
# Feed Forward Part
x = tf.keras.layers.LayerNormalization(epsilon=1e-6)(res)
for _ in range(self.config.n_conv):
x = tf.keras.layers.Conv1D(filters=self.config.ff_dim,
padding='SAME',
strides=self.config.n_strides,
kernel_size=self.config.kernel_size)(x)
#if self.config.batch_norm: x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(1 - self.config.keep_prob)(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv1D(padding='SAME',
filters=inputs.shape[-1],
kernel_size=1)(x)
return x, res
def _embeddings(self,net):
name = "embedding"
if self.config.emb_layer == 'Flatten':
net = tf.keras.layers.Flatten(name=name)(net)
elif self.config.emb_layer == 'GlobalAveragePooling1D':
net = tf.keras.layers.GlobalAveragePooling1D(name=name,data_format="channels_first")(net)
elif self.config.emb_layer == 'GlobalMaxPooling1D':
net = tf.keras.layers.GlobalMaxPooling1D(name=name,data_format="channels_first")(net)
return net
def build(self, inputs_shape):
input_layer = tf.keras.layers.Input(inputs_shape[1:])
x = input_layer
for _ in range(self.config.num_transformer_blocks):
lay, res = self.transformer_encoder(x)
x = tf.keras.layers.Add()([lay, res])
x = tf.keras.layers.Activation('relu')(x)
x = self._embeddings(x)
for dim in self.config.mlp_units:
x = tf.keras.layers.Dense(dim, activation="relu")(x)
if self.config.batch_norm:
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(self.config.mlp_dropout)(x)
x = tf.keras.layers.Activation('relu')(x)
outputs = tf.keras.layers.Dense(self.config.n_classes, activation=self.config.output_activation)(x)
self.net = tf.keras.models.Model(inputs=input_layer, outputs=outputs)
print_summary(self.net)
def call(self, inputs, training=None):
return self.net(inputs, training)
class CNNTaeSchema(BaseCustomTempnetsModel):
""" Implementation of the Pixel-Set encoder + Temporal Attention Encoder sequence classifier
Code is based on the Pytorch implementation of <NAME> et al. https://github.com/VSainteuf/pytorch-psetae
"""
class CNNTaeSchema(BaseCustomTempnetsModel._Schema):
num_heads = fields.Int(missing=4, description='Number of Attention heads.')
num_dff = fields.Int(missing=32, description='Number of feed-forward neurons in point-wise MLP.')
d_model = fields.Int(missing=None, description='Depth of model.')
mlp3 = fields.List(fields.Int, missing=[512, 128, 128], description='Number of units for each layer in mlp3.')
dropout = fields.Float(missing=0.2, description='Dropout rate for attention encoder.')
T = fields.Float(missing=1000, description='Number of features for attention.')
len_max_seq = fields.Int(missing=24, description='Number of features for attention.')
mlp4 = fields.List(fields.Int, missing=[128, 64, 32], description='Number of units for each layer in mlp4. ')
def init_model(self):
# TODO: missing features from original PseTae:
# * spatial encoder extra features (hand-made)
# * spatial encoder masking
self.spatial_encoder = pse_tae_tf.keras.layers.PixelSetEncoder(
mlp1=self.config.mlp1,
mlp2=self.config.mlp2,
pooling=self.config.pooling)
self.temporal_encoder = pse_tae_tf.keras.layers.TemporalAttentionEncoder(
n_head=self.config.num_heads,
d_k=self.config.num_dff,
d_model=self.config.d_model,
n_neurons=self.config.mlp3,
dropout=self.config.dropout,
T=self.config.T,
len_max_seq=self.config.len_max_seq)
mlp4_tf.keras.layers = [pse_tae_tf.keras.layers.LinearLayer(out_dim) for out_dim in self.config.mlp4]
# Final layer (logits)
mlp4_tf.keras.layers.append(pse_tae_tf.keras.layers.LinearLayer(1, batch_norm=False, activation='linear'))
self.mlp4 = tf.keras.Sequential(mlp4_tf.keras.layers)
def call(self, inputs, training=None, mask=None):
out = self.spatial_encoder(inputs, training=training, mask=mask)
out = self.temporal_encoder(out, training=training, mask=mask)
out = self.mlp4(out, training=training, mask=mask)
return out
```
#### File: models/tempnets_task/mlp_tempnets.py
```python
import logging
import tensorflow as tf
from marshmallow import fields
from marshmallow.validate import OneOf
from keras.layers import TimeDistributed
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU, Dense
from tensorflow.python.keras.utils.layer_utils import print_summary
from eoflow.models.layers import ResidualBlock
from eoflow.models.tempnets_task.tempnets_base import BaseTempnetsModel, BaseCustomTempnetsModel
from eoflow.models import transformer_encoder_layers
from eoflow.models import pse_tae_layers
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class MLP(BaseCustomTempnetsModel):
"""
Implementation of the mlp network
"""
class MLPSchema(BaseCustomTempnetsModel._Schema):
keep_prob = fields.Float(required=True, description='Keep probability used in dropout layers.', example=0.5)
nb_fc_neurons = fields.Int(missing=256, description='Number of Fully Connect neurons.')
nb_fc_stacks = fields.Int(missing=1, description='Number of fully connected layers.')
activation = fields.Str(missing='relu', description='Activation function used in final filters.')
kernel_initializer = fields.Str(missing='he_normal', description='Method to initialise kernel parameters.')
kernel_regularizer = fields.Float(missing=1e-6, description='L2 regularization parameter.')
batch_norm = fields.Bool(missing=False, description='Whether to use batch normalisation.')
def _fcn_layer(self, net):
dropout_rate = 1 - self.config.keep_prob
layer_fcn = Dense(units=self.config.nb_fc_neurons,
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net)
if self.config.batch_norm:
layer_fcn = tf.keras.layers.BatchNormalization(axis=-1)(layer_fcn)
layer_fcn = tf.keras.layers.Dropout(dropout_rate)(layer_fcn)
layer_fcn = tf.keras.layers.Activation(self.config.activation)(layer_fcn)
return layer_fcn
def build(self, inputs_shape):
""" Build TCN architecture
The `inputs_shape` argument is a `(N, T*D)` tuple where `N` denotes the number of samples, `T` the number of
time-frames, and `D` the number of channels
"""
x = tf.keras.layers.Input(inputs_shape[1:])
net = x
for _ in range(self.config.nb_fc_stacks):
net = self._fcn_layer(net)
net = tf.keras.layers.Dense(units = 1,
activation = 'linear',
kernel_initializer=self.config.kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(self.config.kernel_regularizer))(net)
self.net = tf.keras.Model(inputs=x, outputs=net)
print_summary(self.net)
def call(self, inputs, training=None):
return self.net(inputs, training)
class TransformerEncoder(BaseTempnetsModel):
""" Implementation of a self-attention classifier
Code is based on the Pytorch implementation of <NAME> https://github.com/MarcCoru/crop-type-mapping
"""
class TransformerEncoderSchema(BaseTempnetsModel._Schema):
keep_prob = fields.Float(required=True, description='Keep probability used in dropout layers.', example=0.5)
num_heads = fields.Int(missing=8, description='Number of Attention heads.')
num_layers = fields.Int(missing=4, description='Number of encoder layers.')
num_dff = fields.Int(missing=512, description='Number of feed-forward neurons in point-wise MLP.')
d_model = fields.Int(missing=128, description='Depth of model.')
max_pos_enc = fields.Int(missing=24, description='Maximum length of positional encoding.')
layer_norm = fields.Bool(missing=True, description='Whether to apply layer normalization in the encoder.')
activation = fields.Str(missing='linear', description='Activation function used in final dense filters.')
def init_model(self):
self.encoder = transformer_encoder_layers.Encoder(
num_layers=self.config.num_layers,
d_model=self.config.d_model,
num_heads=self.config.num_heads,
dff=self.config.num_dff,
maximum_position_encoding=self.config.max_pos_enc,
layer_norm=self.config.layer_norm)
self.dense = tf.keras.layers.Dense(units=self.config.n_classes,
activation=self.config.activation)
def build(self, inputs_shape):
""" Build Transformer encoder architecture
The `inputs_shape` argument is a `(N, T, D)` tuple where `N` denotes the number of samples, `T` the number of
time-frames, and `D` the number of channels
"""
seq_len = inputs_shape[1]
self.net = tf.keras.Sequential([
self.encoder,
self.dense,
tf.keras.layers.MaxPool1D(pool_size=seq_len),
tf.keras.layers.Lambda(lambda x: tf.keras.backend.squeeze(x, axis=-2), name='squeeze'),
tf.keras.layers.Softmax()
])
# Build the model, so we can print the summary
self.net.build(inputs_shape)
print_summary(self.net)
def call(self, inputs, training=None, mask=None):
return self.net(inputs, training, mask)
########################################################################################################################################################
class PseTae(BaseTempnetsModel):
""" Implementation of the Pixel-Set encoder + Temporal Attention Encoder sequence classifier
Code is based on the Pytorch implementation of <NAME> et al. https://github.com/VSainteuf/pytorch-psetae
"""
class PseTaeSchema(BaseTempnetsModel._Schema):
mlp1 = fields.List(fields.Int, missing=[10, 32, 64], description='Number of units for each layer in mlp1.')
pooling = fields.Str(missing='mean_std', description='Methods used for pooling. Seperated by underscore. (mean, std, max, min)')
mlp2 = fields.List(fields.Int, missing=[132, 128], description='Number of units for each layer in mlp2.')
num_heads = fields.Int(missing=4, description='Number of Attention heads.')
num_dff = fields.Int(missing=32, description='Number of feed-forward neurons in point-wise MLP.')
d_model = fields.Int(missing=None, description='Depth of model.')
mlp3 = fields.List(fields.Int, missing=[512, 128, 128], description='Number of units for each layer in mlp3.')
dropout = fields.Float(missing=0.2, description='Dropout rate for attention encoder.')
T = fields.Float(missing=1000, description='Number of features for attention.')
len_max_seq = fields.Int(missing=24, description='Number of features for attention.')
mlp4 = fields.List(fields.Int, missing=[128, 64, 32], description='Number of units for each layer in mlp4. ')
def init_model(self):
# TODO: missing features from original PseTae:
# * spatial encoder extra features (hand-made)
# * spatial encoder masking
self.spatial_encoder = pse_tae_layers.PixelSetEncoder(
mlp1=self.config.mlp1,
mlp2=self.config.mlp2,
pooling=self.config.pooling)
self.temporal_encoder = pse_tae_layers.TemporalAttentionEncoder(
n_head=self.config.num_heads,
d_k=self.config.num_dff,
d_model=self.config.d_model,
n_neurons=self.config.mlp3,
dropout=self.config.dropout,
T=self.config.T,
len_max_seq=self.config.len_max_seq)
mlp4_layers = [pse_tae_layers.LinearLayer(out_dim) for out_dim in self.config.mlp4]
# Final layer (logits)
mlp4_layers.append(pse_tae_layers.LinearLayer(1, batch_norm=False, activation='linear'))
self.mlp4 = tf.keras.Sequential(mlp4_layers)
def call(self, inputs, training=None, mask=None):
out = self.spatial_encoder(inputs, training=training, mask=mask)
out = self.temporal_encoder(out, training=training, mask=mask)
out = self.mlp4(out, training=training, mask=mask)
return out
```
#### File: eoflow/tasks/evaluate.py
```python
from marshmallow import Schema, fields
from ..base import BaseTask
from ..base.configuration import ObjectConfiguration
class EvaluateTask(BaseTask):
class EvaluateTaskConfig(Schema):
model_directory = fields.String(required=True, description='Directory of the model', example='/tmp/model/')
input_config = fields.Nested(nested=ObjectConfiguration, required=True,
description="Input type and configuration.")
def run(self):
dataset = self.parse_input(self.config.input_config)
self.model.prepare()
self.model.load_latest(self.config.model_directory)
values = self.model.evaluate(dataset)
names = self.model.metrics_names
metrics = {name:value for name,value in zip(names, values)}
# Display metrics
print("Evaluation results:")
for metric_name in metrics:
print("{}: {}".format(metric_name, metrics[metric_name]))
```
#### File: eo-flow/examples/test_inception.py
```python
import eoflow.models.tempnets_task.cnn_tempnets as cnn_tempnets
import tensorflow as tf
# Model configuration CNNLSTM
import numpy as np
import os
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
from eoflow.models.data_augmentation import feature_noise, timeshift, noisy_label
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
########################################################################################################################
########################################################################################################################
def reshape_array(x, T=30):
x = x.reshape(x.shape[0], x.shape[1] // T, T)
x = np.moveaxis(x, 2, 1)
return x
def npy_concatenate(path, prefix='training_x', T=30):
path_npy = os.path.join(path, prefix)
'''
x_bands = np.load(path_npy + '_bands.npy')
x_bands = reshape_array(x_bands, T)
x_vis = np.load(path_npy + '_vis.npy')
x_vis = reshape_array(x_vis, T)
np.concatenate([x_bands, x_vis], axis = -1)
'''
x = np.load(path_npy + '_S2.npy')
x = reshape_array(x, T)
return x
path = '/home/johann/Documents/Syngenta/cleaned_V2/2021'
x_train = npy_concatenate(path, 'training_x')
y_train = np.load(os.path.join(path, 'training_y.npy'))
x_val = npy_concatenate(path, 'val_x')
y_val = np.load(os.path.join(path, 'val_y.npy'))
x_test = npy_concatenate(path, 'test_x')
y_test = np.load(os.path.join(path, 'test_y.npy'))
# x_train = np.concatenate([x_train, x_val], axis = 0)
# y_train = np.concatenate([y_train, y_val], axis = 0)
model_cfg_cnn_stride = {
"learning_rate": 10e-3,
"keep_prob": 0.5, # should keep 0.8
"nb_conv_filters": 32, # wiorks great with 32
"nb_conv_stacks": 3, # Nb Conv layers
"kernel_size": 3,
"batch_norm": True,
'use_residual' : True,
"kernel_regularizer": 1e-6,
"loss": "mse", # huber was working great for 2020 and 2021
"metrics": "r_square",
}
#MODEL 64 128 with drop out 0.5 works great on 2019
model_cnn = cnn_tempnets.InceptionCNN(model_cfg_cnn_stride)
# Prepare the model (must be run before training)
model_cnn.prepare()
ts=3
self = model_cnn
x = x_train
batch_size = 8
print(path)
model_cnn.train_and_evaluate(
train_dataset=(x_train, y_train),
val_dataset=(x_val, y_val),
test_dataset=(x_test, y_test),
num_epochs=500,
save_steps=5,
batch_size = 32,
function = np.min,
shift_step = 1, #3
sdev_label =0.05, #0.1
feat_noise = 0, #0.2
patience = 100,
forget = 1,
reduce_lr = True,
#finetuning = True,
#pretraining_path ='/home/johann/Documents/model_64_Causal_Stride_shift_0',
model_directory='/home/johann/Documents/model_16',
)
t = model_cnn.predict(x_test)
plt.scatter(t, y_test)
plt.xlim((0.2,1))
plt.show()
```
#### File: eo-flow/tests/test_metrics.py
```python
import unittest
import numpy as np
from eoflow.models.metrics import MeanIoU, MCCMetric
from eoflow.models.metrics import GeometricMetrics
from scipy import ndimage
class TestMeanIoU(unittest.TestCase):
def test_not_initialized(self):
metric = MeanIoU()
y_true = np.zeros((1, 32, 32, 3))
y_pred = np.zeros((1, 32, 32, 3))
# Errors should be raised (because not initialized)
self.assertRaises(ValueError, metric.update_state, y_true, y_pred)
self.assertRaises(ValueError, metric.result)
self.assertRaises(ValueError, metric.reset_states)
self.assertRaises(ValueError, metric.get_config)
metric.init_from_config()
# Test that errors are not raised
metric.update_state(y_true, y_pred)
metric.result()
metric.reset_states()
metric.get_config()
def test_iou_results(self):
metric = MeanIoU()
metric.init_from_config({'n_classes': 3})
ones = np.ones((32, 32))
zeros = np.zeros((32, 32))
mixed = np.concatenate([ones[:16], zeros[:16]])
# Predict everything as class 1
y_pred = np.stack([zeros, ones], axis=-1)
y_true1 = np.stack([ones, zeros], axis=-1) # All class 0
y_true2 = np.stack([zeros, ones], axis=-1) # All class 1
y_true3 = np.stack([mixed, 1 - mixed], axis=-1) # Half class 1, half class 0
# Check each one seperately
metric.update_state(y_true1, y_pred)
self.assertAlmostEqual(metric.result().numpy(), 0.0, 10)
metric.reset_states()
metric.update_state(y_true2, y_pred)
self.assertAlmostEqual(metric.result().numpy(), 1.0, 10)
metric.reset_states()
metric.update_state(y_true3, y_pred)
self.assertAlmostEqual(metric.result().numpy(), 0.25, 10) # Class 1 IoU: 0.5, Class 2 IoU: 0.0
# Check aggregation
metric.reset_states()
metric.update_state(y_true1, y_pred)
metric.update_state(y_true2, y_pred)
metric.update_state(y_true3, y_pred)
self.assertAlmostEqual(metric.result().numpy(), 0.25, 10) # Class 1 IoU: 0.5, Class 2 IoU: 0.0
class TestMCC(unittest.TestCase):
def test_not_initialized(self):
metric = MCCMetric()
y_true = np.zeros((1, 32, 32, 3))
y_pred = np.zeros((1, 32, 32, 3))
# Errors should be raised (because not initialized)
self.assertRaises(ValueError, metric.update_state, y_true, y_pred)
self.assertRaises(ValueError, metric.result)
self.assertRaises(ValueError, metric.reset_states)
self.assertRaises(ValueError, metric.get_config)
metric.init_from_config({'n_classes': 3})
# Test that errors are not raised
metric.update_state(y_true, y_pred)
metric.result()
metric.reset_states()
metric.get_config()
def test_wrong_n_classes(self):
metric = MCCMetric()
n_classes = 3
y_true = np.zeros((1, 32, 32, n_classes))
y_pred = np.zeros((1, 32, 32, n_classes))
metric.init_from_config({'n_classes': 1})
# Test that errors are raised
with self.assertRaises(Exception) as context:
metric.update_state(y_true, y_pred)
self.assertTrue((f'Input to reshape is a tensor with {np.prod(y_true.shape)} values, '
f'but the requested shape has {np.prod(y_true.shape[:-1])}') in str(context.exception))
def test_mcc_results_binary_symmetry(self):
metric = MCCMetric()
metric.init_from_config({'n_classes': 2})
y_pred = np.random.randint(0, 2, (32, 32, 1))
y_pred = np.concatenate((y_pred, 1-y_pred), axis=-1)
y_true = np.random.randint(0, 2, (32, 32, 1))
y_true = np.concatenate((y_true, 1 - y_true), axis=-1)
metric.update_state(y_true, y_pred)
results = metric.result().numpy()
self.assertAlmostEqual(results[0], results[1], 7)
def test_mcc_single_vs_binary(self):
metric_single = MCCMetric()
metric_single.init_from_config({'n_classes': 1})
y_pred = np.random.randint(0, 2, (32, 32, 1))
y_true = np.random.randint(0, 2, (32, 32, 1))
metric_single.update_state(y_true, y_pred)
result_single = metric_single.result().numpy()[0]
metric_binary = MCCMetric()
metric_binary.init_from_config({'n_classes': 2})
y_pred = np.concatenate((y_pred, 1-y_pred), axis=-1)
y_true = np.concatenate((y_true, 1 - y_true), axis=-1)
metric_binary.update_state(y_true, y_pred)
result_binary = metric_binary.result().numpy()[0]
self.assertAlmostEqual(result_single, result_binary, 7)
def test_mcc_results(self):
# test is from an example of MCC in sklearn.metrics matthews_corrcoef
y_true = np.array([1, 1, 1, 0])[..., np.newaxis]
y_pred = np.array([1, 0, 1, 1])[..., np.newaxis]
metric = MCCMetric()
metric.init_from_config({'n_classes': 1})
metric.update_state(y_true, y_pred)
self.assertAlmostEqual(metric.result().numpy()[0], -0.3333333, 7)
def test_mcc_threshold(self):
y_true = np.array([1, 1, 1, 0])[..., np.newaxis]
y_pred = np.array([0.9, 0.6, 0.61, 0.7])[..., np.newaxis]
metric = MCCMetric()
metric.init_from_config({'n_classes': 1, 'mcc_threshold': 0.6})
metric.update_state(y_true, y_pred)
self.assertAlmostEqual(metric.result().numpy()[0], -0.3333333, 7)
class TestGeometricMetric(unittest.TestCase):
def detect_edges(self, im, thr=0):
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
return sob > thr
def test_equal_geometries(self):
metric = GeometricMetrics(edge_func=self.detect_edges)
y_true = np.zeros((2, 32, 32))
y_pred = np.zeros((2, 32, 32))
y_true[0, 10:20, 10:20] = 1
y_pred[0, 10:20, 10:20] = 1
y_true[1, 0:10, 0:10] = 1
y_pred[1, 0:10, 0:10] = 1
y_true[1, 15:20, 15:20] = 1
y_pred[1, 15:20, 15:20] = 1
metric.update_state(y_true, y_pred)
overseg_err, underseg_err, border_err, fragmentation_err = metric.result()
self.assertEqual(overseg_err, 0., "For equal geometries oversegmentation should be 0!")
self.assertEqual(underseg_err, 0., "For equal geometries undersegmentation should be 0!")
self.assertEqual(fragmentation_err, 0., "For equal geometries fragmentation error should be 0!")
self.assertEqual(border_err, 0., "For equal geometries border error should be 0!")
def test_empty_geometries(self):
metric = GeometricMetrics(edge_func=self.detect_edges)
y_true = np.ones((1, 32, 32))
y_pred = np.zeros((1, 32, 32))
metric.update_state(y_true, y_pred)
overseg_err, underseg_err, border_err, fragmentation_err = metric.result()
self.assertEqual(overseg_err, 1., "For empty geometries oversegmentation should be 1!")
self.assertEqual(underseg_err, 1., "For empty geometries undersegmentation should be 1!")
self.assertEqual(fragmentation_err, 0., "For empty geometries fragmentation error should be 0!")
self.assertEqual(border_err, 1., "For empty geometries border error should be 1!")
def test_quarter(self):
metric = GeometricMetrics(edge_func=self.detect_edges)
# A quarter of measurement covers a quarter of reference
y_true = np.zeros((1, 200, 200))
y_pred = np.zeros((1, 200, 200))
y_true[0, :100, :100] = 1
y_pred[0, 50:150, 50:150] = 1
metric.update_state(y_true, y_pred)
overseg_err, underseg_err, border_err, fragmentation_err = metric.result()
self.assertEqual(overseg_err, 0.75)
self.assertEqual(underseg_err, 0.75)
self.assertEqual(fragmentation_err, 0.)
self.assertAlmostEqual(border_err, 0.9949494949494949)
def test_multiple(self):
metric = GeometricMetrics(edge_func=self.detect_edges)
# A quarter of measurement covers a quarter of reference
y_true = np.zeros((1, 200, 200))
y_pred = np.zeros((1, 200, 200))
y_true[0, 10:20, 20:120] = 1
y_true[0, 30:40, 20:120] = 1
y_true[0, 50:60, 20:120] = 1
y_pred[0, 15:33, 20:120] = 1
y_pred[0, 36:65, 20:120] = 1
metric.update_state(y_true, y_pred)
overseg_err, underseg_err, border_err, fragmentation_err = metric.result()
self.assertEqual(overseg_err, 0.3666666666666667)
self.assertEqual(underseg_err, 0.7464878671775222)
self.assertEqual(fragmentation_err, 0.000333667000333667)
self.assertAlmostEqual(border_err, 0.9413580246913581)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "j-desloires/eo-learn-examples",
"score": 2
}
|
#### File: eocrops/tasks/cmd_otb.py
```python
import numpy as np
import eolearn
from eolearn.core import FeatureType, EOTask
from pathlib import Path
from osgeo import gdal
import os
import shutil
import subprocess
import rasterio
from eolearn.io.local_io import ExportToTiffTask, ImportFromTiffTask
class MultitempSpeckleFiltering(EOTask):
def __init__(self, otb_path, feature_name = "BANDS-S1-IW", path_in = './', window = 3):
'''
Multitemporal filtering ONLY for Sentinel-1 data using OTB
Parameters:
otb_path (str) : Path where bin from Orfeo Toolbox package is installed
path_in (str) : Path to write the temporary files (removed at the end of the process)
window (int) : window to apply for Quegan filter for SAR data
'''
self.feature_name = feature_name
self.otb_path = otb_path
self.path_in = path_in
self.window = window
@staticmethod
def _refactor_dates(t):
# Add dates as suffix
year, d, m = str(t.year), str(t.day), str(t.month)
if len(d)==1 :
d = '0'+d
if len(m)==1 :
m = '0'+m
return '{0}{1}{2}'.format(year, m, d)
def _apply_OTB_cmd(self, pol, ram = 8):
path_in = os.path.join(self.path_in, 'S1_'+pol)
s1_images = os.listdir(path_in)
infiles = [os.path.join(path_in, k) for k in s1_images]
infiles.sort()
cmd = [os.path.join(self.otb_path, "otbcli_MultitempFilteringOutcore"), "-inl"]
cmd += infiles
cmd += ['-wr', str(self.window), '-oc', os.path.join(path_in, 'outcore.tif'), '-ram', str(8)]
outdir = Path(path_in+'_filtered')
if not outdir.exists() :
os.mkdir(outdir)
subprocess.call(cmd, shell=False)
cmd = [os.path.join(self.otb_path, "otbcli_MultitempFilteringFilter"), "-inl"]
cmd += infiles
cmd += ['-enl', os.path.join(outdir, 'enl.tif'),
'-wr', str(self.window),
'-filtpath', outdir,
'-oc', os.path.join(path_in, 'outcore.tif'),
'-ram', str(ram)]
subprocess.call(cmd, shell=False)
outfiles = [os.path.join(outdir, k.split('.')[0]+'_filtered.tif') for k in s1_images]
outfiles.sort()
return infiles, outdir, outfiles
def _save_temporary_geotiff(self, i, date, eopatch):
## TODO : Find a way to write temporary file without writing on disk using ExportToTiffTask to make the process faster
export = ExportToTiffTask(feature=self.feature_name,
folder=os.path.join(self.path_in, 'S1_VV/S1_VV_' + date),
band_indices=[0],
date_indices=[i])
export.execute(eopatch)
export = ExportToTiffTask(feature=self.feature_name,
folder=os.path.join(self.path_in, 'S1_VH/S1_VH_' + date),
band_indices=[1],
date_indices=[i])
export.execute(eopatch)
def execute(self, eopatch, ram = 8):
if os.path.exists(os.path.join(self.path_in, 'S1_VV')):
shutil.rmtree(os.path.join(self.path_in, 'S1_VV'))
shutil.rmtree(os.path.join(self.path_in, 'S1_VH'))
os.mkdir(os.path.join(self.path_in, 'S1_VV'))
os.mkdir(os.path.join(self.path_in, 'S1_VH'))
times = list(eopatch.timestamp)
for i, t in enumerate(times):
date = self._refactor_dates(t)
self._save_temporary_geotiff(i,date,eopatch)
########################################################################################################
for pol in ['VV', 'VH']:
infiles, outdir, outfiles = self._apply_OTB_cmd(pol,ram)
##########################################################################
reference_file = infiles[0]
with rasterio.open(reference_file) as src0:
meta = src0.meta
meta['nodata'] = 0.0
meta['dtype'] = 'float32'
meta['count'] = len(times)
path_tif = outfiles[0].split('_2017')[0] + '.tif'
if 'outcore_filtered.tif' in os.listdir(outdir):
outfiles.remove(os.path.join(outdir, 'outcore_filtered.tif'))
outfiles.sort()
with rasterio.open(path_tif, 'w', **meta) as dst:
for i in range(1, len(times) + 1):
img = gdal.Open(outfiles[i - 1]).ReadAsArray()
dst.write_band(i, img)
import_tif = ImportFromTiffTask((FeatureType.DATA, pol + '_filtered'), path_tif)
eopatch = import_tif.execute(eopatch)
shutil.rmtree(os.path.join(self.path_in, 'S1_VV_filtered'))
shutil.rmtree(os.path.join(self.path_in, 'S1_VH_filtered'))
shutil.rmtree(os.path.join(self.path_in, 'S1_VV'))
shutil.rmtree(os.path.join(self.path_in, 'S1_VH'))
return eopatch
class PanSharpening(EOTask):
def __init__(self, fname = 'BANDS',
otb_path = '/home/s999379/git-repo/OTB-7.4.0-Linux64/bin',
path_temporary_files = './tempo'):
'''
Multitemporal filtering ONLY for Sentinel-1 data using OTB
Parameters:
fname (str) : Name of the feature stored in data that gathers the bands
otb_path (str) : Path where bin from Orfeo Toolbox package is installed
path_temporary_files (str) : path to save the temporary geotiff file to call OTB
'''
self.fname = fname
self.otb_path = otb_path
self.path_temporary_files = path_temporary_files
@staticmethod
def _refactor_dates(t):
# Add dates as suffix
year, d, m = str(t.year), str(t.day), str(t.month)
if len(d)==1 :
d = '0'+d
if len(m)==1 :
m = '0'+m
return '{0}{1}{2}'.format(year, m, d)
def _extracted_from__save_temporary_geotiff(self, date, i, eopatch, band_indices=None):
if band_indices is None :
band_indices = list(range(4))
export = ExportToTiffTask(feature=self.fname,
folder=os.path.join(self.path_temporary_files, 'PAN_' + date),
band_indices=[-1],
date_indices=[i])
export.execute(eopatch)
export = ExportToTiffTask(feature=self.fname,
folder=os.path.join(self.path_temporary_files, 'BANDS_' + date),
band_indices=band_indices,
date_indices=[i])
export.execute(eopatch)
def _apply_OTB_cmd(self, date):
cm = [os.path.join(self.otb_path, 'otbcli_Pansharpening'),
'-inp',os.path.join(self.path_temporary_files,'PAN_' + date +'.tif'),
'-inxs', os.path.join(self.path_temporary_files,'BANDS_' + date +'.tif'),
'-method', 'lmvm',
'-out', os.path.join(self.path_temporary_files, 'Pansharpened_' + date +'.tif'),
'float']
subprocess.call(cm, shell=False)
def _clean_temporary_files(self):
shutil.rmtree(self.path_temporary_files)
def execute(self, eopatch, band_indices=None):
times = list(eopatch.timestamp)
pan_bands = []
for i, t in enumerate(times):
date = self._refactor_dates(t)
self._extracted_from__save_temporary_geotiff(date, i, eopatch, band_indices)
self._apply_OTB_cmd(date)
img = gdal.Open(os.path.join(self.path_temporary_files, 'Pansharpened_' + date +'.tif')).ReadAsArray()
img = np.moveaxis(img, 0, -1)
pan_bands.append(img)
pan_bands = np.stack(pan_bands, axis =0)
self._clean_temporary_files()
eopatch.add_feature(eolearn.core.FeatureType.DATA, 'BANDS-PAN', pan_bands)
return eopatch
```
|
{
"source": "JD-ETH/CompilerGym",
"score": 3
}
|
#### File: tests/pytest_plugins/random_util.py
```python
import random
from typing import List, Tuple
from compiler_gym.envs import CompilerEnv
from compiler_gym.service import observation_t
def apply_random_trajectory(
env: CompilerEnv, random_trajectory_length_range=(1, 50)
) -> List[Tuple[int, observation_t, float, bool]]:
"""Evaluate and return a random trajectory."""
num_actions = random.randint(*random_trajectory_length_range)
trajectory = []
for _ in range(num_actions):
action = env.action_space.sample()
observation, reward, done, _ = env.step(action)
if done:
break # Broken trajectory.
trajectory.append((action, observation, reward, done))
return trajectory
```
#### File: CompilerGym/tests/test_main.py
```python
import os
import sys
from typing import List, Optional
import gym
import pytest
import compiler_gym # noqa Register environments.
def main(
extra_pytest_args: Optional[List[str]] = None, verbose_service_logging: bool = True
):
"""The main entry point for the pytest runner.
An example file which uses this:
from compiler_gym.util.test_main import main
def test_foo():
assert 1 + 1 == 2
if __name__ == "__main__":
main()
In the above, the single test_foo test will be executed.
:param extra_pytest_args: A list of additional command line options to pass
to pytest.
:param verbose_service_logging: Whether to enable verbose CompilerGym
service logging, useful for debugging failing tests.
"""
# Use isolated data directories for running tests.
os.environ["COMPILER_GYM_SITE_DATA"] = "/tmp/compiler_gym/tests/site_data"
os.environ["COMPILER_GYM_CACHE"] = "/tmp/compiler_gym/tests/cache"
# Install some benchmarks for the LLVM environment as otherwise
# reset() will fail.
env = gym.make("llvm-v0")
try:
env.require_dataset("cBench-v0")
finally:
env.close()
# Use verbose backend debugging when running tests. If a test fails, the debugging
# output will be included in the captured stderr.
if verbose_service_logging:
os.environ["COMPILER_GYM_SERVICE_DEBUG"] = "1"
pytest_args = sys.argv + ["-vv"]
# Support for sharding. If a py_test target has the shard_count attribute
# set (in the range [1,50]), then the pytest-shard module is used to divide
# the tests among the shards. See https://pypi.org/project/pytest-shard/
sharded_test = os.environ.get("TEST_TOTAL_SHARDS")
if sharded_test:
num_shards = int(os.environ["TEST_TOTAL_SHARDS"])
shard_index = int(os.environ["TEST_SHARD_INDEX"])
pytest_args += [f"--shard-id={shard_index}", f"--num-shards={num_shards}"]
else:
pytest_args += ["-p", "no:pytest-shard"]
pytest_args += extra_pytest_args or []
sys.exit(pytest.main(pytest_args))
```
|
{
"source": "JD-ETH/rlmeta",
"score": 2
}
|
#### File: examples/tutorials/remote_example.py
```python
import asyncio
import torch
import torch.multiprocessing as mp
import rlmeta.core.remote as remote
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.core.server import Server
class Adder(remote.Remotable):
@remote.remote_method()
def add(self, a, b):
print(f"[Adder.add] a = {a}")
print(f"[Adder.add] b = {b}")
return a + b
@remote.remote_method(batch_size=10)
def batch_add(self, a, b):
print(f"[Adder.batch_add] a = {a}")
print(f"[Adder.batch_add] b = {b}")
if not isinstance(a, tuple) and not isinstance(b, tuple):
return a + b
else:
return tuple(sum(x) for x in zip(a, b))
async def run_batch(adder_client, send_tensor=False):
futs = []
for i in range(20):
if send_tensor:
a = torch.tensor([i])
b = torch.tensor([i + 1])
else:
a = i
b = i + 1
fut = adder_client.async_batch_add(a, b)
futs.append(fut)
await asyncio.sleep(1.0)
for i, fut in enumerate(futs):
if send_tensor:
a = torch.tensor([i])
b = torch.tensor([i + 1])
else:
a = i
b = i + 1
c = await fut
print(f"{a} + {b} = {c}")
def main():
adder = Adder()
adder_server = Server(name="adder_server", addr="127.0.0.1:4411")
adder_server.add_service(adder)
adder_client = remote_utils.make_remote(adder, adder_server)
adder_server.start()
adder_client.connect()
a = 1
b = 2
c = adder_client.add(a, b)
print(f"{a} + {b} = {c}")
print("")
asyncio.run(run_batch(adder_client, send_tensor=False))
print("")
asyncio.run(run_batch(adder_client, send_tensor=True))
adder_server.terminate()
if __name__ == "__main__":
main()
```
|
{
"source": "jdetle/builderhub",
"score": 2
}
|
#### File: builderhub/builderhub/redirect.py
```python
from tornado import web, gen
class RedirectHandler(web.RequestHandler):
def get(self):
image = self.get_argument('image')
default_url = self.get_argument('default_url')
url = self.settings['hub_redirect_url_template'].format(
image=image,
default_url=default_url
)
self.redirect(url)
```
|
{
"source": "jdetrey/python-musicbrainzngs",
"score": 3
}
|
#### File: python-musicbrainzngs/examples/collection.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import musicbrainzngs
import getpass
from optparse import OptionParser
import sys
try:
user_input = raw_input
except NameError:
user_input = input
musicbrainzngs.set_useragent(
"python-musicbrainzngs-example",
"0.1",
"https://github.com/alastair/python-musicbrainzngs/",
)
def show_collections():
"""Fetch and display the current user's collections.
"""
result = musicbrainzngs.get_collections()
print('All collections for this user:')
for collection in result['collection-list']:
# entity-type only available starting with musicbrainzngs 0.6
if "entity-type" in collection:
print('"{name}" by {editor} ({cat}, {count} {entity}s)\n\t{mbid}'
.format(
name=collection['name'], editor=collection['editor'],
cat=collection['type'], entity=collection['entity-type'],
count=collection[collection['entity-type']+'-count'],
mbid=collection['id']
))
else:
print('"{name}" by {editor}\n\t{mbid}'.format(
name=collection['name'], editor=collection['editor'],
mbid=collection['id']
))
def show_collection(collection_id, ctype):
"""Show a given collection.
"""
if ctype == "release":
result = musicbrainzngs.get_releases_in_collection(
collection_id, limit=0)
elif ctype == "artist":
result = musicbrainzngs.get_artists_in_collection(
collection_id, limit=0)
elif ctype == "event":
result = musicbrainzngs.get_events_in_collection(
collection_id, limit=0)
elif ctype == "place":
result = musicbrainzngs.get_places_in_collection(
collection_id, limit=0)
elif ctype == "recording":
result = musicbrainzngs.get_recordings_in_collection(
collection_id, limit=0)
elif ctype == "work":
result = musicbrainzngs.get_works_in_collection(
collection_id, limit=0)
collection = result['collection']
# entity-type only available starting with musicbrainzngs 0.6
if "entity-type" in collection:
print('{mbid}\n"{name}" by {editor} ({cat}, {entity})'.format(
name=collection['name'], editor=collection['editor'],
cat=collection['type'], entity=collection['entity-type'],
mbid=collection['id']
))
else:
print('{mbid}\n"{name}" by {editor}'.format(
name=collection['name'], editor=collection['editor'],
mbid=collection['id']
))
print('')
# release count is only available starting with musicbrainzngs 0.5
if "release-count" in collection:
print('{} releases'.format(collection['release-count']))
if "artist-count" in collection:
print('{} artists'.format(collection['artist-count']))
if "event-count" in collection:
print('{} events'.format(collection['release-count']))
if "place-count" in collection:
print('{} places'.format(collection['place-count']))
if "recording-count" in collection:
print('{} recordings'.format(collection['recording-count']))
if "work-count" in collection:
print('{} works'.format(collection['work-count']))
print('')
if "release-list" in collection:
show_releases(collection)
else:
pass # TODO
def show_releases(collection):
result = musicbrainzngs.get_releases_in_collection(collection_id, limit=25)
release_list = result['collection']['release-list']
print('Releases:')
releases_fetched = 0
while len(release_list) > 0:
print("")
releases_fetched += len(release_list)
for release in release_list:
print('{title} ({mbid})'.format(
title=release['title'], mbid=release['id']
))
if user_input("Would you like to display more releases? [y/N] ") != "y":
break;
# fetch next batch of releases
result = musicbrainzngs.get_releases_in_collection(collection_id,
limit=25, offset=releases_fetched)
collection = result['collection']
release_list = collection['release-list']
print("")
print("Number of fetched releases: %d" % releases_fetched)
if __name__ == '__main__':
parser = OptionParser(usage="%prog [options] USERNAME [COLLECTION-ID]")
parser.add_option('-a', '--add', metavar="RELEASE-ID",
help="add a release to the collection")
parser.add_option('-r', '--remove', metavar="RELEASE-ID",
help="remove a release from the collection")
parser.add_option('-t', '--type', metavar="TYPE", default="release",
help="type of the collection (default: release)")
options, args = parser.parse_args()
if not args:
parser.error('no username specified')
username = args.pop(0)
# Input the password.
password = getpass.getpass('Password for {}: '.format(username))
# Call musicbrainzngs.auth() before making any API calls that
# require authentication.
musicbrainzngs.auth(username, password)
if args:
# Actions for a specific collction.
collection_id = args[0]
if options.add:
if option.type == "release":
musicbrainzngs.add_releases_to_collection(
collection_id, [options.add]
)
else:
sys.exit("only release collections can be modified ATM")
elif options.remove:
if option.type == "release":
musicbrainzngs.remove_releases_from_collection(
collection_id, [options.remove]
)
else:
sys.exit("only release collections can be modified ATM")
else:
# Print out the collection's contents.
print("")
show_collection(collection_id, options.type)
else:
# Show all collections.
print("")
show_collections()
```
|
{
"source": "JDeuce/puck-strats",
"score": 3
}
|
#### File: JDeuce/puck-strats/graph.py
```python
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import struct
import argparse
import os
from sys import exit
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def make_graph(source_path, dest_path):
x_data = []
y_data = []
z_data = []
stat = os.stat(source_path)
filesize = stat.st_size
sample_count = filesize / 4
seconds = (sample_count / 3.0) / 100.0
with open(source_path, 'rb') as f:
data = struct.unpack('f' * sample_count, f.read())
for chunk in chunks(data, 3):
x_data.append(chunk[0])
y_data.append(chunk[1])
z_data.append(chunk[2])
t = np.arange(0.0, seconds, 0.01)
plt.xlim(0, seconds)
plt.plot(t, x_data, t, y_data, t, z_data)
plt.xlabel("time (s)")
plt.ylabel("stuff")
plt.title("Getting Ripped")
plt.grid(True)
plt.savefig(dest_path)
plt.clf()
plt.close('all')
def main():
args = parse_args()
filepath = args.filepath
make_graph(filepath, "test.png")
exit(0)
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='graph accelerometer data'
)
# Positional Arguments
parser.add_argument(
'filepath', type=str.lower,
help='filepath for binary data file'
)
return parser.parse_args()
if __name__ == '__main__':
main()
```
|
{
"source": "JDeuce/pyramid_jinja2_webpack",
"score": 2
}
|
#### File: src/pyramid_jinja2_webpack/__init__.py
```python
from types import FunctionType
from jinja2_webpack import DEFAULT_SETTINGS, Environment
from jinja2_webpack.filter import WebpackFilter
from pyramid.exceptions import ConfigurationError
from pyramid.settings import asbool
from zope.dottedname.resolve import resolve as resolve_dotted
from zope.interface import Interface
class IWebpackEnvironment(Interface):
pass
class Jinja2EnvironmentMissingException(Exception):
""" Thrown when configuration fails because it can't find
the jinija2 environment """
pass
def get_webpack_environment(context):
return context.registry.queryUtility(IWebpackEnvironment)
def parse_multiline(val):
result = {}
for e in val.splitlines():
if not e.strip():
continue
key, value = e.strip().split('=', 1)
result[key] = value
return result
def parse_renderByExt(val):
# resolves dotted names
result = parse_multiline(val)
resolved_result = {}
for k, v in result.items():
resolved_result[k] = resolve_dotted(v)
return resolved_result
def webpack_settings_from_settings(registry_settings, prefixes=None):
prefixes = prefixes or ['webpack.']
settings = {}
for k, v in registry_settings.items():
for prefix in prefixes:
slicelen = len(prefix)
if k.startswith(prefix):
setting_name = k[slicelen:]
try:
default = DEFAULT_SETTINGS[setting_name]
except KeyError:
toggle = prefix + 'errorOnInvalidSetting'
if toggle in registry_settings:
default = ''
else:
raise ConfigurationError(
'Invalid webpack setting %s' % setting_name)
# jinja2_webpack exposes a DEFAULT_SETTINGS dict which
# contains the default value for all the settings.
# Use the type of the default to process the setting from ini.
if type(default) == bool:
v = asbool(v)
elif type(default) == FunctionType:
v = resolve_dotted(v)
elif setting_name == 'renderByExt':
v = parse_renderByExt(v)
settings[setting_name] = v
return settings
def includeme(config):
registry_settings = config.registry.settings
settings = webpack_settings_from_settings(registry_settings)
registry_settings['webpack'] = settings
# Load the webpack environment
environment = Environment(**settings)
config.registry.registerUtility(environment, IWebpackEnvironment)
# Add callbacks
config.add_directive('get_webpack_environment', get_webpack_environment)
config.add_request_method(get_webpack_environment,
'webpack_environment', reify=True)
# Expose a webpack filter to jinja2 environment
webpack_filter = WebpackFilter(environment)
try:
jinja2_env = config.get_jinja2_environment()
except AttributeError:
raise Jinja2EnvironmentMissingException(
'Unable to find jinja2 environment. '
'Try config.commit() after including jinja2')
jinja2_env.filters['webpack'] = webpack_filter
__version__ = '0.1.1'
```
#### File: pyramid_jinja2_webpack/tests/test_settings.py
```python
import pytest
from jinja2_webpack import DEFAULT_SETTINGS
from pyramid.exceptions import ConfigurationError
from pyramid_jinja2_webpack import webpack_settings_from_settings
def _(settings, **kw):
return webpack_settings_from_settings(settings, **kw)
def test_manifest():
settings = _({'webpack.manifest': 'manifest.json'})
assert settings['manifest'] == 'manifest.json'
def test_useDefaultRenderByExt():
settings = _({'webpack.useDefaultRenderByExt': 'False'})
assert settings['useDefaultRenderByExt'] is False
def test_alternate_prefix():
settings = _({'test_prefix_manifest': 'manifest.json'},
prefixes=['test_prefix_'])
assert settings['manifest'] == 'manifest.json'
def test_multiple_prefixes():
settings = _({'p1.manifest': 'a', 'p2.publicRoot': 'b'},
prefixes=['p1.', 'p2.'])
assert settings['manifest']
assert settings['publicRoot']
def test_error_on_invalid_setting():
with pytest.raises(ConfigurationError):
_({'webpack.fubar': True})
def test_error_on_invalid_setting_toggle():
_({'webpack.fubar': True, 'webpack.errorOnInvalidSetting': False})
FOO = object()
BAR = object()
@pytest.fixture()
def no_imports(monkeypatch):
import pyramid_jinja2_webpack
mapping = {'foo.bar': FOO, 'bar.baz': BAR}
monkeypatch.setattr(pyramid_jinja2_webpack, 'resolve_dotted', mapping.get)
def test_setting_renderByext(no_imports):
settings = _({'webpack.renderByExt': '.js=foo.bar\n.css=bar.baz'})
assert settings['renderByExt']['.js'] == FOO
assert settings['renderByExt']['.css'] == BAR
def test_defaultRenderer(no_imports):
settings = _({'webpack.defaultRenderer': 'foo.bar'})
assert 'defaultRenderer' in DEFAULT_SETTINGS
assert settings['defaultRenderer'] == FOO
def test_badRendererRaisesException():
with pytest.raises(ImportError):
_({'webpack.defaultRenderer': 'foo.bar'})
```
|
{
"source": "JDeuce/python-jinja-webpack",
"score": 3
}
|
#### File: src/jinja2_webpack/__init__.py
```python
from os import path
from . import renderer
from .utils import load_json
DEFAULT_SETTINGS = {
'errorOnInvalidReference': True,
'publicRoot': '/static/pack',
'manifest': 'webpack-manifest.json',
'defaultRenderer': renderer.url,
'useDefaultRenderByExt': False, # this setting is mostly experimental
'renderByExt': {
'.js': renderer.script,
'.png': renderer.image,
'.jpeg': renderer.image,
'.jpg': renderer.image,
'.gif': renderer.image,
'.css': renderer.stylesheet,
}
}
class Asset(object):
""" Asset class.
Might someday expose file access here too so can render assets
inline. For now the url is the interesting attribute """
def __init__(self, filename, url):
self.filename = filename
self.url = url
class AssetNotFoundException(Exception):
""" Thrown when an asset cannot be found,
can be disabled by settings errorOnInvalidReference = False """
pass
class EnvironmentSettings(object):
def __init__(self, **kwargs):
self.__dict__.update(DEFAULT_SETTINGS)
if not kwargs.get('useDefaultRenderByExt', self.useDefaultRenderByExt):
self.renderByExt = {}
self.__dict__.update(kwargs)
class Environment(object):
""" The webpack environment class. Loads the manifest and allows
it to be accessed.
Settings:
* **manifest** - default ``webpack-manifest.json``.
Path to the WebpackManifest file.
* **errorOnInvalidReference** - default ``True``.
True if exception should be thrown when you try to resolve an invalid
asset reference.
* **publicRoot** - default ``/static/pack``.
The public path to prepend to all asset URLs.
"""
def __init__(self, **kwargs):
self.settings = EnvironmentSettings(**kwargs)
if self.settings.manifest:
self.load_manifest(self.settings.manifest)
else:
self._manifest = {}
def _resolve_asset(self, asset):
if not self.settings.publicRoot:
url = asset
elif self.settings.publicRoot.endswith('/'):
url = self.settings.publicRoot + asset
else:
url = '%s/%s' % (self.settings.publicRoot, asset)
return Asset(filename=asset, url=url)
def _resolve_manifest(self, manifest):
result = {}
# Resolve URLs in original manifest items
for name, asset in manifest.items():
result[name] = self._resolve_asset(asset)
# Strip out the extension as well, so if the webpack output
# file is "commons.js" we can use {{ "commons" | webpack }}
for name, asset in manifest.items():
basename, ext = path.splitext(name)
if basename not in result:
result[basename] = result[name]
return result
def load_manifest(self, filename):
manifest = load_json(filename)
self._manifest = self._resolve_manifest(manifest)
def identify_assetspec(self, spec):
""" Lookup an asset from the webpack manifest.
The asset spec is processed such that you might reference an entry
with or without the extension.
Will raise an AssetNotFoundException if the errorOnInvalidReference
setting is enabled and the asset cannot be found.
Note that all files must be have globally unique names,
due to a limitation in the way that WebpackManifestPlugin writes
the data.
"""
nodir = path.basename(spec)
noextension = path.splitext(nodir)[0]
result = self._manifest.get(spec) \
or self._manifest.get(nodir) \
or self._manifest.get(noextension)
if result:
return result
if self.settings.errorOnInvalidReference:
raise AssetNotFoundException(spec)
def register_renderer(self, extension, renderer):
""" Register a new renderer function to the environment """
self.settings.renderByExt[extension] = renderer
def _select_renderer(self, asset):
name, ext = path.splitext(asset.filename)
return self.settings.renderByExt.get(
ext, self.settings.defaultRenderer)
def render_asset(self, asset):
""" Render an asset to a URL or something more interesting,
by looking up the extension in the registered renderers """
renderer = self._select_renderer(asset)
return renderer(asset)
__version__ = '0.2.0'
```
#### File: src/jinja2_webpack/renderer.py
```python
def script(asset):
return '<script src="%s"></script>' % asset.url
def image(asset):
return '<img src="%s">' % asset.url
def stylesheet(asset):
return '<link rel="stylesheet" href="%s">' % asset.url
def url(asset):
return asset.url
```
#### File: jinja2_webpack/utils/normalize_path.py
```python
import os
def normalize_path(path):
""" normalizes a path to using /, even if you're on windows.
jinja2 assumes all templates are referenced with /
despite the underlying OS, see jinja2.loaders.split_template_path.
"""
return '/'.join(os.path.normpath(path).split(os.sep))
```
#### File: tests/test_scan/test_scan.py
```python
import logging
import os
import runpy
import tempfile
import pytest
from jinja2.exceptions import TemplateError
from jinja2_webpack.scan import build_output, main, scan
HERE = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(level=logging.DEBUG)
def _normpath(paths):
# On Windows, convert forward slashes to backward slashes,
# so we will reference the command line the proper way for the OS.
return [os.path.normpath(path) for path in paths]
def _scan(directories, templates):
directories = _normpath(directories)
templates = _normpath(templates)
return scan(
reference_root=HERE,
root=HERE,
directories=directories,
templates=templates)
def test_scan_single():
assets = _scan(
directories=['templates1/'],
templates=['templates1/*.jinja2'])
assert assets == ['test1']
def test_scan_multiple():
assets = _scan(
directories=['templates*'],
templates=['template*/*.jinja2'])
assert 'test1' in assets
assert 'test2' in assets
def test_scan_relative():
assets = _scan(
directories=['templates*'],
templates=['template*/*.jinja2'])
assert os.path.join('templates2', 'test.png') in assets
def test_scan_invalid_throws_exception():
with pytest.raises(TemplateError):
_scan(
directories=['invalid'],
templates=['invalid/*.jinja2'])
def test_variable_to_filter():
_scan(
directories=['variable'],
templates=['variable/*.jinja2'])
def test_build_output():
assets = _scan(
directories=['templates*'],
templates=['template*/*.jinja2'])
assert len(assets) >= 3
with tempfile.TemporaryFile('w+') as fp:
build_output(
reference_root=HERE,
assets=assets,
outfile=fp)
fp.seek(0)
content = fp.read()
assert 'require("./templates2/test.png")' in content
def test_main_file():
try:
with tempfile.NamedTemporaryFile(
delete=False, dir=HERE) as fp:
name = fp.name
main([
'--root', HERE,
'--directories', os.path.join(HERE, 'templates*'),
'--outfile', name,
os.path.join('template*', '*.jinja2')
])
with open(name) as fp:
data = fp.read()
finally:
os.unlink(name)
assert 'require("./templates2/test.png")' in data
def test_main_stdout(capsys):
curdir = os.getcwd()
try:
os.chdir(HERE)
main([
'--root', HERE,
'--directories', os.path.join(HERE, 'templates*'),
'--',
os.path.join('template*', '*.jinja2')
])
data, _ = capsys.readouterr()
finally:
os.chdir(curdir)
assert 'require("./templates2/test.png")' in data
def test_main_reference():
try:
with tempfile.NamedTemporaryFile(delete=False) as fp:
name = fp.name
main([
'--root', HERE,
'--reference-root', os.path.join(HERE, '..'),
'--directories', os.path.join(HERE, 'templates*'),
'--outfile', name,
os.path.join('template*', '*.jinja2')
])
with open(name) as fp:
data = fp.read()
finally:
os.unlink(name)
assert 'require("./test_scan/templates2/test.png")' in data
def test_ifmain():
runpy.run_module('jinja2_webpack.scan')
```
|
{
"source": "jdeuschel/DistrShiftsOnFacialData",
"score": 2
}
|
#### File: DistrShiftsOnFacialData/base/base_data_loader.py
```python
import numpy as np
import os
import sys
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils import data
from torchvision import transforms
import logging
from copy import copy
from parse_config import ConfigParser
class BaseDataLoader:
"""
Base class for all data loaders
"""
def __init__(
self,
dataset_train,
dataset_val=None,
dataset_test=None,
batch_size=None,
shuffle=True,
validation_split=0.0,
num_workers=0,
collate_fn=default_collate,
):
self.dataset_train = dataset_train
self.dataset_val = dataset_val
self.dataset_test = dataset_test
self.validation_split = validation_split
self.batch_idx = 0
self.n_samples = len(dataset_train)
self.config = ConfigParser()
self.mode = os.environ["MODE"]
self.drop_last = False
self.shuffle = self.config["data_loader"]["args"]["shuffle"]
if "drop_last" in self.config["data_loader"]["args"]:
self.drop_last = self.config["data_loader"]["args"]["drop_last"]
# Augmentations
self.apply_augmentation = self.training and self.mode == "TRAINING"
self.augmentation_applied = False
self.input_size = self.config["input_size"]
self.current_transform = self.dataset_train.transform # Transform before augmentation
# Set num_classes for model generation
if "num_classes" not in self.config["arch"]["args"]:
self.config["arch"]["args"]["num_classes"] = self.num_classes # From sub class
# Set num_channels for model generation
if "num_channels" not in self.config["arch"]["args"]:
self.config["arch"]["args"]["num_channels"] = self.num_channels # From sub class
if self.dataset_val == None and self.validation_split != 0.0:
self.dataset_train, self.dataset_val = self._split_sampler(self.validation_split)
self.dataset_val.dataset = copy(self.dataset_val.dataset)
# Training
self.init_kwargs = {
"dataset": self.dataset_train,
"batch_size": batch_size,
"shuffle": self.shuffle,
"collate_fn": collate_fn,
"num_workers": num_workers,
"drop_last": self.drop_last,
}
self.training_loader = DataLoader(**self.init_kwargs)
# Test
if self.dataset_test != None:
self.init_kwargs = {
"dataset": self.dataset_test,
"batch_size": batch_size,
"shuffle": False,
"collate_fn": collate_fn,
"num_workers": num_workers,
"drop_last": self.drop_last,
}
self.test_loader = DataLoader(**self.init_kwargs)
# Validation
if self.dataset_val != None:
self.init_kwargs_val = {
"dataset": self.dataset_val,
"batch_size": batch_size,
"shuffle": False,
"collate_fn": collate_fn,
"num_workers": num_workers,
"drop_last": self.drop_last,
}
self.validation_loader = DataLoader(**self.init_kwargs_val)
def get_training_loader(self):
return self.training_loader
def get_validation_loader(self):
try:
return self.validation_loader
except Exception as e:
print("Validation data is not defined:" + repr(e))
def get_test_loader(self):
try:
return self.test_loader
except Exception as e:
print("Test data is not defined:" + repr(e))
@staticmethod
def _get_config():
config = ConfigParser()
return config
@staticmethod
def _get_input_size():
config = ConfigParser()
print("Using custom input size.")
return config["input_size"]
@staticmethod
def _get_data_path():
cluster_used = True if "HOSTNAME" in os.environ else False
DATA_PATH = os.environ["DATA_PATH_NETAPP"] # todo: specify path to data here, replace by "/folder/name/data/"
return DATA_PATH, cluster_used
@staticmethod
def _get_imagenet_normalization():
normalze_imagenet = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return normalze_imagenet
def _split_sampler(self, split: float):
if split == 0.0:
return self.dataset_train, None
if isinstance(split, int):
assert split > 0
assert split < self.n_samples, "validation set size is configured to be larger than entire dataset."
len_valid = split
else:
len_valid = int(self.n_samples * split)
train_set, val_set = data.random_split(self.dataset_train, [(self.n_samples - len_valid), len_valid])
return train_set, val_set
def split_validation(self):
if self.val_set is None:
return None
else:
if self.cutmix:
self.val_set.dataset = copy(self.val_set.dataset.dataset)
self.val_set.dataset = copy(self.val_set.dataset)
if hasattr(self, "trsfm_test"):
self.val_set.dataset.transform = self.trsfm_test
else:
self.val_set.dataset.transform = self.trsfm
val_kwargs = self.init_kwargs
val_kwargs["dataset"] = self.val_set
val_kwargs[
"shuffle"
] = False # NOTE: For validation data no shuffling is used because of bayesian model averaging!
return DataLoader(**val_kwargs)
```
#### File: datasets/affective_computing/fairface.py
```python
from torch.utils.data import Dataset
from torchvision import transforms
import os
from PIL import Image
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from training.data_shifts.rand_augment_fairface import RandAugment_Fairface
from base import BaseDataLoader
import os
import scipy
from random import randrange
from scipy.stats import norm
__all__ = ["FairFace"]
class FairFace(BaseDataLoader):
"""
FairFace Data Loader
=====================
Task: Age, Gender, Race
Output:
Image - [3, x, y] // Various input sizes
Target - [3] (age, gender, race)
"""
def __init__(
self,
data_dir,
batch_size,
shuffle=True,
validation_split=0.12,
num_workers=1,
training=True,
target="age",
**kwargs,
):
self.training = training
if target == "age":
self.num_classes = 9
elif target == "gender":
self.num_classes = 2
elif target == "race":
self.num_classes = 7
else:
raise ValueError(f"Target {target} is not defined")
self.input_size = self._get_input_size()
self.num_channels = 3
data_dir, cluster_used = self._get_data_path()
print("data_dir: ", data_dir)
rgb_mean = (0.4914, 0.4822, 0.4465)
rgb_std = (0.2023, 0.1994, 0.2010)
trsfm = transforms.Compose([
transforms.Resize((224, 224), interpolation=Image.BICUBIC),
transforms.CenterCrop(224),
transforms.RandomCrop(224, padding=4),
RandAugment_Fairface(n=2,m=8),
transforms.RandomHorizontalFlip(),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize(mean=rgb_mean, std=rgb_std)
])
trsfm_test = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# specify folder here
PATH = "FairFace_v2" if cluster_used else "FairFace_v2" #todo: specify folder name depending on hardware. in this case they are called the same.
seed = randrange(1000)
experiment = kwargs['experiment']
# if training:
print(os.path.join(data_dir, PATH))
self.dataset = FairFaceDataset(
root_dir=os.path.join(data_dir, PATH), subset="train", transform=trsfm, experiment=experiment, num_classes=self.num_classes, validation_split=validation_split, seed=seed
)
self.dataset_val = FairFaceDataset(
root_dir=os.path.join(data_dir, PATH), subset="val", transform=trsfm_test, experiment=experiment, num_classes=self.num_classes, validation_split=validation_split, seed=seed
)
self.dataset_test = FairFaceDataset(
root_dir=os.path.join(data_dir, PATH), subset="test", transform=trsfm_test, experiment="", num_classes=self.num_classes, validation_split=validation_split, seed=seed
)
super().__init__(
dataset_train=self.dataset,
dataset_val=self.dataset_val, # None
dataset_test=self.dataset_test,
batch_size=batch_size,
shuffle=shuffle,
validation_split=validation_split,
num_workers=num_workers,
)
class FairFaceDataset(Dataset):
def __init__(
self, root_dir, num_classes, transform=None, subset="train", experiment="", validation_split=0.12, seed=522
):
"""
Parameters
----------
root_dir : str
Location of FairFace
num_classes: int
number of classes depending on target
Type: [9,2,7]
transform : [type], optional
[description], by default None
subset: str
Type: ["train", "val"]
experiment : str, optional
experiment name
default: baseline experiment
validation_split : float, optional
split between training and validation
seed: int, optional
default 522
"""
self.num_classes=num_classes
self.root_dir = root_dir
self.subset = subset
if self.subset == "train" or self.subset == "val":
self.data_list = pd.read_csv(os.path.join(self.root_dir, "fairface_label_train.csv"))
elif self.subset == "test":
self.data_list = pd.read_csv(os.path.join(self.root_dir, "fairface_label_val.csv"))
else:
raise Exception(f"Subset definition {self.subset} does not exist for FairFace")
mapping_age = {
"0-2": 0,
"3-9": 1,
"10-19": 2,
"20-29": 3,
"30-39": 4,
"40-49": 5,
"50-59": 6,
"60-69": 7,
"more than 70": 8,
}
mapping_gender = {"Female": 0, "Male": 1}
mapping_race = {
"Black": 0,
"East Asian": 1,
"Indian": 2,
"Latino_Hispanic": 3,
"Middle Eastern": 4,
"Southeast Asian": 5,
"White": 6,
}
self.data_list = self.data_list.replace({"race": mapping_race})
self.data_list = self.data_list.replace({"gender": mapping_gender})
self.data_list = self.data_list.replace({"age": mapping_age})
if self.subset == "train" or self.subset == "val":
train, val = train_test_split(self.data_list, test_size=validation_split, random_state=573)
if self.subset == "train":
self.data_list = train
elif self.subset == "val":
self.data_list = val
if experiment != "" and not self.subset == "val":
#get all experiments on the same size
train_with_val = True
if train_with_val:
self.sample_size_race = 61410
self.sample_size_age = 40758
self.sample_size_gender = 35703
self.sample_size_race_sp = 48006
else:
self.sample_size_race = 70217
self.sample_size_age = 55592
self.sample_size_gender = 40758
self.sample_size_race_sp = 54482
# a baseline experiment without any modifications
if experiment=="verification_baseline":
self.data_list = self.data_list
# baseline experiments with same number of samples as corresponding shifts, to exclude dependency
elif experiment=="baseline_race":
self.data_list = self.data_list.sample(n=self.sample_size_race, replace=False, random_state=seed)
elif experiment=="baseline_age":
self.data_list = self.data_list.sample(n=self.sample_size_age, replace=False, random_state=seed)
elif experiment=="baseline_gender":
self.data_list = self.data_list.sample(n=self.sample_size_gender, replace=False, random_state=seed)
# spurious correlations with young and race. possible correlations: if young then race, iff young then race
elif experiment == 'spurious_correlations_baseline_young':
# here another baseline is necessary to exclude the sampling bias from the group
old = 6
young = 2
train_data_young = self.data_list[self.data_list["age"]<=young].sample(n=2676, replace=False)
self.data_list = self.data_list[self.data_list["age"]>young].append(train_data_young).sample(n=self.sample_size_race_sp, replace=False)
elif experiment == 'spurious_correlations_baseline_old':
# here another baseline is necessary to exclude the sampling bias from the group
old = 6
young = 2
train_data_old = self.data_list[self.data_list["age"]>=old].sample(n=1243, replace=False)
self.data_list = self.data_list[self.data_list["age"]<old].append(train_data_old).sample(n=self.sample_size_race_sp, replace=False)
# gender shifts: 1: no women, 2: no men, 3: less women, 4: less men
elif experiment in ["split_gen_1", "split_gen_2", "split_gen_3", "split_gen_4"]:
hard_filter_gender1 = self.data_list[(self.data_list['gender']!=0)]
hard_filter_gender2 = self.data_list[(self.data_list['gender']!=1)]
part_soft_gender1, _ = train_test_split(hard_filter_gender1, test_size=round(len(hard_filter_gender2)/2), random_state=seed)
part_soft_gender2, _ = train_test_split(hard_filter_gender2, test_size=round(len(hard_filter_gender1)/2), random_state=seed)
soft_filter_gender1 = hard_filter_gender1.append(part_soft_gender2)
soft_filter_gender2 = hard_filter_gender2.append(part_soft_gender1)
if experiment=="split_gen_1":
try:
self.data_list = hard_filter_gender1.sample(n=self.sample_size_gender, replace=False, random_state=seed)
except:
raise Exception("SAMPLE SIZE TOO LARGE")
elif experiment=="split_gen_2":
try:
self.data_list = hard_filter_gender2.sample(n=self.sample_size_gender, replace=False, random_state=seed)
except:
raise Exception("SAMPLE SIZE TOO LARGE")
elif experiment=="split_gen_3":
self.data_list = self.data_list = soft_filter_gender1.sample(n=self.sample_size_gender, replace=False, random_state=seed)
elif experiment=="split_gen_4":
self.data_list = self.data_list = soft_filter_gender2.sample(n=self.sample_size_gender, replace=False, random_state=seed)
# sampling bias with one underrepresented (partly erased until erased) race group, p={1.0, 0.75, 0.5, 0.25, 0.0}
elif experiment.startswith("data_shift"):
# please write something like experiment="data_shift_Black_0.25"
if "Black" in experiment:
race = 0
elif "East_Asian" in experiment:
race = 1
elif "Indian" in experiment:
race = 2
elif "Latino_Hispanic" in experiment:
race = 3
elif "Middle_Eastern" in experiment:
race = 4
elif "Southeast_Asian" in experiment:
race = 5
elif "White" in experiment:
race = 6
if "0.0" in experiment:
frac = 0.0
elif "0.25" in experiment:
frac = 0.25
elif "0.5" in experiment:
frac = 0.5
elif "0.75" in experiment:
# pdb.set_trace()
frac = 0.75
elif "1.0" in experiment:
frac = 1.0
try:
self.data_list = self.data_list_drop(data=self.data_list, race=race, frac=frac, seed=seed).sample(n=self.sample_size_race,
replace=False,
random_state=seed)
except:
print("sampling did not work for", race, frac)
raise Exception("SAMPLE SIZE TOO LARGE")
# left label bias: makes a specific race group younger. Uses Gauss distribution with sigma={0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0}
elif experiment.startswith("left_label_shift"):
# write something like: "left_label_shift_Black_1.5"
if "Black" in experiment:
race = [0]
elif "East_Asian" in experiment:
race = [1]
elif "Indian" in experiment:
race = [2]
elif "Latino_Hispanic" in experiment:
race = [3]
elif "Middle_Eastern" in experiment:
race = [4]
elif "Southeast_Asian" in experiment:
race = [5]
elif "White" in experiment:
race = [6]
std = float(experiment[-3:])
self.data_list = self.left_racial_label_bias(self.data_list, race_li=race, std=std, seed=seed).sample(n=self.sample_size_race,
replace=False,
random_state=seed)
elif experiment.startswith('spurious_correlations'):
old = 6
young = 2
self.data_list = self.data_list.reset_index(drop=True)
# 1a: first kind of spurious correlations: if old then race
if experiment.startswith('spurious_correlations_old_0'):
spur_old_0_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 0))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_0_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_old_1'):
spur_old_1_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 1))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_1_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_old_2'):
spur_old_2_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 2))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_2_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_old_3'):
spur_old_3_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 3))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_3_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_old_4'):
spur_old_4_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 4))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_4_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_old_5'):
spur_old_5_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 5))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_5_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_old_6'):
spur_old_6_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 6))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_6_all.sample(n=self.sample_size_race_sp, replace=False)
# 1b first kind of spurious correlations: if young then race
elif experiment.startswith('spurious_correlations_young_0'):
spur_young_0_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 0))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_0_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_young_1'):
spur_young_1_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 1))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_1_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_young_2'):
spur_young_2_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 2))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_2_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_young_3'):
spur_young_3_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 3))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_3_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_young_4'):
spur_young_4_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 4))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_4_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_young_5'):
spur_young_5_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 5))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_5_all.sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_young_6'):
spur_young_6_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 6))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_6_all.sample(n=self.sample_size_race_sp, replace=False)
# 2a: second kind of spurious correlations: iff old then race
elif experiment.startswith('spurious_correlations_iff_old_0'):
spur_old_0_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 0))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_0_all.drop(np.array(
np.where((spur_old_0_all['age'] < old) & (spur_old_0_all['race'] == 0))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_old_1'):
spur_old_1_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 1))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_1_all.drop(np.array(
np.where((spur_old_1_all['age'] < old) & (spur_old_1_all['race'] == 1))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_old_2'):
spur_old_2_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 2))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_2_all.drop(np.array(
np.where((spur_old_2_all['age'] < old) & (spur_old_2_all['race'] == 2))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_old_3'):
spur_old_3_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 3))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_3_all.drop(np.array(
np.where((spur_old_3_all['age'] < old) & (spur_old_3_all['race'] == 3))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_old_4'):
spur_old_4_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 4))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_4_all.drop(np.array(
np.where((spur_old_4_all['age'] < old) & (spur_old_4_all['race'] == 4))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_old_5'):
spur_old_5_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 5))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_5_all.drop(np.array(
np.where((spur_old_5_all['age'] < old) & (spur_old_5_all['race'] == 5))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_old_6'):
spur_old_6_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] >= old) & (self.data_list['race'] != 6))).flatten()).reset_index(
drop=True)
self.data_list = spur_old_6_all.drop(np.array(
np.where((spur_old_6_all['age'] < old) & (spur_old_6_all['race'] == 6))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
# 2b: second kind of spurious correlations: iff young then race
elif experiment.startswith('spurious_correlations_iff_young_0'):
spur_young_0_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 0))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_0_all.drop(np.array(np.where(
(spur_young_0_all['age'] > young) & (spur_young_0_all['race'] == 0))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_young_1'):
spur_young_1_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 1))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_1_all.drop(np.array(np.where(
(spur_young_1_all['age'] > young) & (spur_young_1_all['race'] == 1))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_young_2'):
spur_young_2_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 2))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_2_all.drop(np.array(np.where(
(spur_young_2_all['age'] > young) & (spur_young_2_all['race'] == 2))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_young_3'):
spur_young_3_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 3))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_3_all.drop(np.array(np.where(
(spur_young_3_all['age'] > young) & (spur_young_3_all['race'] == 3))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_young_4'):
spur_young_4_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 4))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_4_all.drop(np.array(np.where(
(spur_young_4_all['age'] > young) & (spur_young_4_all['race'] == 4))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_young_5'):
spur_young_5_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 5))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_5_all.drop(np.array(np.where(
(spur_young_5_all['age'] > young) & (spur_young_5_all['race'] == 5))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
elif experiment.startswith('spurious_correlations_iff_young_6'):
spur_young_6_all = self.data_list.drop(np.array(
np.where(
(self.data_list['age'] <= young) & (self.data_list['race'] != 6))).flatten()).reset_index(
drop=True)
self.data_list = spur_young_6_all.drop(np.array(np.where(
(spur_young_6_all['age'] > young) & (spur_young_6_all['race'] == 6))).flatten()).reset_index(
drop=True).sample(n=self.sample_size_race_sp, replace=False)
else:
print("NO SHIFT SELECTED")
raise Exception("NO SHIFT SELECTED")
self.path_list = np.array(self.data_list.iloc[:, 0].tolist())
self.age = np.array(self.data_list.iloc[:,]["age"].tolist())
self.gender = np.array(self.data_list.iloc[:,]["gender"].tolist())
self.race = np.array(self.data_list.iloc[:,]["race"].tolist())
self.transform = transform
def __len__(self):
return len(self.path_list)
def get_vector(self, targets, nb_classes):
return np.ones(nb_classes)*targets
def data_list_drop(self, data, race, frac, seed=123):
data_list = data.copy(deep=True)
frac = 1-frac
data_drop = data_list[data_list['race'] == race].sample(frac=frac, random_state=seed)
data_ret = data_list.drop(data_drop.index)
return data_ret
def left_racial_label_bias(self, data_label, race_li, seed=123, std=1.5):
def left_labels_normal(ages, std=1.5, seed=123):
np.random.seed(seed)
new_ages = np.zeros(ages.shape, dtype=np.int64)
x = np.unique(ages)
for num, mean in enumerate(ages):
normal = norm(loc=mean, scale=std)
prob = normal.pdf(x)
prob[np.argwhere(x>mean)]=0 # set probabilitie greater than mean to zero
prob = prob / prob.sum() # normalize the probabilities so their sum is 1
new_ages[num] = np.random.choice(x, p = prob)
return new_ages
data_my = data_label.copy(deep=True)
for race_i in race_li:
race = data_my[data_my.race.isin([race_i])].copy(deep=True)
race["age"] = left_labels_normal(ages=race["age"], seed=seed, std=std)
data_my.update(race)
data_my["age"] = data_my["age"].astype(np.int64)
data_my["gender"] = data_my["gender"].astype(np.int64)
data_my["race"] = data_my["race"].astype(np.int64)
return data_my
def __getitem__(self, idx):
img_file = self.path_list[idx]
age, gender, race = self.age[idx], self.gender[idx], self.race[idx]
image_path = os.path.join(self.root_dir, img_file)
img = Image.open(image_path)
if self.transform:
img = self.transform(img)
return img, age, self.get_vector(age, self.num_classes), gender, race
```
#### File: DistrShiftsOnFacialData/model/loss.py
```python
import torch.nn.functional as F
from torch import nn
import torch
def nll_loss(output, target):
return F.nll_loss(output, target)
def cross_entropy(output, target):
return F.cross_entropy(output, target)
def mixup(output, target, valid=False):
if valid:
return F.cross_entropy(output, target)
else:
targets1, targets2, lam = target
criterion = nn.CrossEntropyLoss(reduction="mean")
return lam * criterion(output, targets1) + (1 - lam) * criterion(output, targets2)
```
#### File: model/metrics/metric_uncertainty.py
```python
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
import torch
import torch.nn.functional as F
from torchvision import transforms
import os
def get_entropy(prob, n_classes=None):
if n_classes is None:
max_entropy = 1 # no normalization made
else:
max_entropy = -np.log(1 / n_classes)
entropy = torch.sum(-prob * torch.log(prob), 1) * 1 / max_entropy
entropy[entropy != entropy] = 0 # nan to zero
return entropy
def softmax_uncertainty(outputs, labels, use_softmax=False):
"""
outputs : torch.Tensor, [n_samples, n_classes]
Logit or softmax outputs of your model. (n_classes)
torch.Tensor, [n_samples]
Ground Truth Labels between (0 - (n_classes-1))
"""
labels = labels.numpy()
if use_softmax:
softmaxes = F.softmax(outputs, 1)
else:
softmaxes = outputs
confidences, predictions = softmaxes.max(1)
confidences = confidences.numpy()
predictions = predictions.numpy()
accuracies = np.equal(predictions, labels).astype(float)
return confidences, accuracies
def ece(confidences, accuracies, n_bins=20):
"""
ECE
Arguments:
confidences {torch.Tensor} -- confidence for each prediction
accuracies {torch.Tensor} -- corrects: vector of TRUE and FALSE indicating whether the prediction was correct for each prediction
Keyword Arguments:
n_bins {int} -- How many bins should be used for the plot (default: {20})
"""
accuracies = accuracies.numpy().astype(float)
confidences = confidences.numpy()
# Calibration Curve Calculation
bins = np.linspace(0, 1, n_bins + 1)
bins[-1] = 1.0001
width = bins[1] - bins[0]
bin_indices = [
np.greater_equal(confidences, bin_lower) * np.less(confidences, bin_upper)
for bin_lower, bin_upper in zip(bins[:-1], bins[1:])
]
bin_corrects = np.array([np.mean(accuracies[bin_index]) for bin_index in bin_indices]) # Height of bars
bin_scores = np.array([np.mean(confidences[bin_index]) for bin_index in bin_indices]) # confidence range
# ECE Calculation
B = np.sum(np.array(bin_indices), 1)
n = np.sum(B)
weights = B / n
d_acc_conf = np.abs(bin_corrects - bin_scores)
d_acc_conf = np.nan_to_num(d_acc_conf)
ece = np.sum(d_acc_conf * weights)
return ece*100, np.nan_to_num(bin_corrects), np.nan_to_num(bins)
def calculate_auroc(preds_in, preds_out, use_softmax=False, n_classes=10, confidence_measure="max_softmax"):
"""
Calculate AUROC with confidences (max).
Parameters
----------
preds_in : torch.Tensor, [n_samples, n_classes]
Predictions of the in-distribution (with or without softmax ), [n_samples, n_classes]
preds_out : torch.Tensor, [n_samples, n_classes]
Predictions of the out-distribution (with or without softmax )
use_softmax : bool, optional
Test , by default False
Returns
-------
float
AUROC for confidences (max) in range 0-1
"""
with torch.no_grad():
if use_softmax:
preds_in_soft = F.softmax(preds_in, 1)
preds_out_soft = F.softmax(preds_out, 1)
else:
preds_in_soft = preds_in
preds_out_soft = preds_out
if confidence_measure == "max_softmax":
confidences_in, prediction_in = preds_in_soft.max(1)
confidences_out, prediction_out = preds_out_soft.max(1)
else:
confidences_in = 1 - get_entropy(preds_in_soft, n_classes)
confidences_out = 1 - get_entropy(preds_out_soft, n_classes)
confidences_in = confidences_in.numpy()
confidences_out = confidences_out.numpy()
labels_in = np.ones(len(confidences_in))
labels_out = np.zeros(len(confidences_out))
confs = np.concatenate((confidences_in, confidences_out))
labels = np.concatenate((labels_in, labels_out))
auroc = roc_auc_score(y_true=labels, y_score=confs)
return auroc
```
#### File: training/data_shifts/train.py
```python
import os
import argparse
import collections
import torch
# Project Modules
import data_loader.data_loaders as module_data
import model.loss as module_loss
import model.model as module_arch
import model.optim as optim
from parse_config import ConfigParser
from trainer import TrainModel
def main(config):
print("Devices", torch.cuda.device_count())
print(config["lr_scheduler"]["args"]["milestones"])
# Data Loader
experiment = config['group']
print("starting experiment ",experiment)
data_loader_factory = config.init_obj("data_loader", module_data, experiment=experiment)
data_loader = data_loader_factory.get_training_loader()
valid_data_loader = data_loader_factory.get_validation_loader()
num_classes = data_loader_factory.num_classes
# Init Modules
criterion = getattr(module_loss, config["loss"])
# Saving tests
cluster_used = True if "HOSTNAME" in os.environ else False
if cluster_used:
path = "/output/data-shifts/" + config["data_loader"]["args"]["target"]
else :
path = "No output path set"
print(cluster_used)
print(path)
print(os.path.exists("/output/data-shifts/"))
#################################################################
# ENSEMBLE #
#################################################################
# Init Ensemble
num_ensemble = config["method"]["num_ensemble"]
models = []
for idx in range(0, num_ensemble):
model = config.init_obj("arch", module_arch)
models.append(model)
print("Devices", torch.cuda.device_count())
optimizers = []
lr_schedulers = []
for idx, model in enumerate(models):
# build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = config.init_obj("optimizer", optim, trainable_params)
lr_scheduler = config.init_obj("lr_scheduler", torch.optim.lr_scheduler, optimizer)
optimizers.append(optimizer)
lr_schedulers.append(lr_scheduler)
# # # # # # # # # # # # # # # # # # # # # # #
# Training #
# # # # # # # # # # # # # # # # # # # # # # #
sequential_training = config['method']['sequential']
print("SEQUENTIAL: ", sequential_training)
new_models = []
for idx, _ in enumerate(models):
train_model = TrainModel(config, [models[idx]], [optimizers[idx]], criterion, num_classes, [lr_schedulers[idx]], model_idx=idx)
trainer = train_model.prepare_trainer()
trainer.fit(train_model, data_loader, valid_data_loader)
new_models.append(train_model.model[0])
# # # # # # # # # # # # # # # # # # # # # # #
# Testing #
# # # # # # # # # # # # # # # # # # # # # # #
print(sequential_training)
print(config["trainer"]["csv"])
print("Starting Testing")
path = "/output/data-shifts/" + config["data_loader"]["args"]["target"]
print(os.path.exists(path))
if sequential_training:
if config["trainer"]["csv"] == True:
path = "/output/data-shifts/" + config["data_loader"]["args"]["target"]
print(os.path.exists(path))
print(os.path.join(path, config.method + config.comment + config.temp + ".csv"))
test_model = TrainModel(config, new_models, optimizers, criterion, num_classes, lr_schedulers)
trainer_test = test_model.prepare_trainer()
test_data_loader = data_loader_factory.get_test_loader()
trainer_test.test( model=test_model, test_dataloaders=test_data_loader, ckpt_path=None)
else:
test_data_loader = data_loader_factory.get_test_loader()
trainer.test(test_dataloaders=test_data_loader, ckpt_path=None)
if __name__ == "__main__":
args = argparse.ArgumentParser(description="Experiments")
args.add_argument("-c", "--config", default="configs/config.yml", type=str, help="config file path (default: None)")
args.add_argument("-r", "--resume", default=None, type=str, help="path to latest checkpoint (default: None)")
args.add_argument("-d", "--device", default=None, type=str, help="indices of GPUs to enable (default: all)")
# custom cli options to modify configuration from default values given in json file.
CustomArgs = collections.namedtuple("CustomArgs", "flags type target")
options = [
CustomArgs(["-pretrained", "--pretrained"], type=bool, target="method;pretrained"),
CustomArgs(["-finetuning", "--finetuning"], type=str, target="method;finetuning"),
CustomArgs(["-target", "--target"], type=str, target="data_loader;args;target"),#
CustomArgs(["-temp_scale", "--temperature"], type=str, target="calibration;temp_scaling"),
CustomArgs(["-num_ensemble", "--num_ensemble"], type=int, target="method;num_ensemble"),
CustomArgs(["-n_bins", "--n_bins"], type=int, target="trainer;n_bins"),
CustomArgs(["-lr_milestones", "--lr_milestones"], type=list, target="lr_scheduler;args;milestones"),
CustomArgs(["-comment", "--comment"], type=str, target="comment"),
]
config = ConfigParser.from_args(args, options)
main(config)
```
#### File: data_shifts/utils/metrics.py
```python
import math
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# Some keys used for the following dictionaries
COUNT = "count"
CONF = "conf"
ACC = "acc"
BIN_ACC = "bin_acc"
BIN_CONF = "bin_conf"
# Calibration error scores in the form of loss metrics
class ECELoss(nn.Module):
"""
Compute ECE (Expected Calibration Error)
"""
def __init__(self, n_bins=15):
super(ECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, softmax_in, labels):
confidences, predictions = torch.max(softmax_in, 1)
accuracies = predictions.eq(labels)
ece = torch.zeros(1, device=softmax_in.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
class AdaptiveECELoss(nn.Module):
"""
Compute Adaptive ECE
"""
def __init__(self, n_bins=15):
super(AdaptiveECELoss, self).__init__()
self.nbins = n_bins
def histedges_equalN(self, x):
npt = len(x)
return np.interp(
np.linspace(0, npt, self.nbins + 1), np.arange(npt), np.sort(x)
)
def forward(self, softmax_in, labels):
confidences, predictions = torch.max(softmax_in, 1)
accuracies = predictions.eq(labels)
n, bin_boundaries = np.histogram(
confidences.cpu().detach(),
self.histedges_equalN(confidences.cpu().detach()),
)
# print(n,confidences,bin_boundaries)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
ece = torch.zeros(1, device=softmax_in.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
class ClasswiseECELoss(nn.Module):
"""
Compute Classwise ECE
"""
def __init__(self, n_bins=15):
super(ClasswiseECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, softmax_in, labels):
num_classes = int((torch.max(labels) + 1).item())
per_class_sce = None
for i in range(num_classes):
class_confidences = softmax_in[:, i]
class_sce = torch.zeros(1, device=softmax_in.device)
labels_in_class = labels.eq(
i
) # one-hot vector of all positions where the label belongs to the class i
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
in_bin = class_confidences.gt(bin_lower.item()) * class_confidences.le(
bin_upper.item()
)
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = labels_in_class[in_bin].float().mean()
avg_confidence_in_bin = class_confidences[in_bin].mean()
class_sce += (
torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
)
if i == 0:
per_class_sce = class_sce
else:
per_class_sce = torch.cat((per_class_sce, class_sce), dim=0)
sce = torch.mean(per_class_sce)
return sce
```
|
{
"source": "j-devel/django-slack-events-api",
"score": 2
}
|
#### File: django-slack-events-api/slack/adapter_slacker.py
```python
from django.conf import settings
SLACK_VERIFICATION_TOKEN = settings.SLACK_VERIFICATION_TOKEN
SLACK_BOT_TOKEN = settings.SLACK_BOT_TOKEN
import logging
logging.getLogger().setLevel(logging.INFO)
from pyee import EventEmitter
from slacker import Slacker
CLIENT = Slacker(SLACK_BOT_TOKEN)
class SlackEventAdapter(EventEmitter):
def __init__(self, verification_token):
EventEmitter.__init__(self)
self.verification_token = verification_token
slack_events_adapter = SlackEventAdapter(SLACK_VERIFICATION_TOKEN)
# Example responder to greetings
@slack_events_adapter.on("message")
def handle_message(event_data):
message = event_data["event"]
# If the incoming message contains "hi", then respond with a "Hello" message
if message.get("subtype") is None and "hi" in message.get('text'):
channel = message["channel"]
message = "Hello <@%s>! :tada:" % message["user"]
logging.info("chat.postMessage: channel: %s text: %s" % (channel, message))
CLIENT.chat.post_message(channel, message)
# Example reaction emoji echo
@slack_events_adapter.on("reaction_added")
def reaction_added(event_data):
event = event_data["event"]
emoji = event["reaction"]
channel = event["item"]["channel"]
text = ":%s:" % emoji
logging.info("chat.postMessage: channel: %s text: %s" % (channel, text))
CLIENT.chat.post_message(channel, text)
```
#### File: django-slack-events-api/slack/client_urllib2.py
```python
import urllib
import urllib2
import logging
logging.getLogger().setLevel(logging.INFO)
class Client():
def __init__(self, slack_bot_token):
self.slack_bot_token = slack_bot_token
def chat_post_message(self, channel, text):
# https://api.slack.com/methods/chat.postMessage
# Present these parameters as part of
# an application/x-www-form-urlencoded querystring or POST body.
# application/json is not currently accepted.
data = {
'token': self.slack_bot_token,
'channel': channel,
'text': text
}
req = urllib2.Request('https://slack.com/api/chat.postMessage')
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
req.add_data(urllib.urlencode(data))
response = urllib2.urlopen(req)
logging.info("response: %s" % response.read())
```
|
{
"source": "jdevera/ogma",
"score": 2
}
|
#### File: ogma/ogma/codegen.py
```python
import os
from datetime import datetime, timezone
# Third party imports
from functools import lru_cache
import pystache
# Local imports
from . import utils
from . import version
# ---------------------------------------------------------------------------
def get_abs_path(relpath):
return os.path.abspath(os.path.join(os.path.dirname(__file__), relpath))
class NumericEnumTemplateData:
"""
Hold all necessary data to render all Enum related templates
"""
CONVERTER_SUFFIX = "TypeConverter"
def __init__(self, name):
self.name = name
self._num = 0
self._values = []
self._package = None
self._conv_package = None
self._datetime_cache = None
def compiler_version(self):
return version.VERSION
@lru_cache()
def datetime(self):
return datetime.now(timezone.utc).isoformat()
def code_file_name(self):
return f"{self.name}.java"
def converter_class_name(self):
return f"{self.name}{self.CONVERTER_SUFFIX}"
def converter_file_name(self):
return f"{self.name}{self.CONVERTER_SUFFIX}.java"
def enum_fqn(self):
return f"{self._package}.{self.name}"
def converter_fqn(self):
return f"{self._conv_package}.{self.name}{self.CONVERTER_SUFFIX}"
def _add_value(self, name):
"""
Add a value by its name and assign an increasing numeric value to it
"""
self._values.append(dict(valname=name, valnum=self._num))
self._num += 1
return self
def with_values(self, names):
for name in names:
self._add_value(name)
return self
def with_enum_package(self, package):
self._package = package
return self
def with_enum_converter_package(self, package):
self._conv_package = package
return self
def values(self):
"""
Return all values as dictionaries of name and numeric value. Mark the last one
with the 'last' key to help template rendering
"""
return self._values[:-1] + [dict(self._values[-1], last=True)]
def __repr__(self):
return f"{self.__class__.__name__}({self.name})"
class EnumCodeGenerator:
"""
Code generation logic for Enums and Enum converters
"""
ENUM_CODE_TEMPLATE = "java_enum"
ENUM_CONV_CODE_TEMPLATE = "java_enum_converter"
JOOQ_GEN_CONFIG_TEMPLATE = "jooq_generator_config"
TEMPLATE_DIR = get_abs_path("./templates")
def __init__(
self, enums, code_dir, config_dir, enum_package, converter_package, model_file
):
self.renderer = pystache.Renderer(search_dirs=self.TEMPLATE_DIR)
self.code_dir = os.path.abspath(code_dir)
self.config_dir = os.path.abspath(config_dir)
self.enum_package = enum_package
self.enum_converter_package = converter_package
self.database_model_file_name = model_file
self.enums = self._adjust_enums_from_model(enums)
self.enums_by_name = {enum.name: enum for enum in self.enums}
def _adjust_enums_from_model(self, enums):
"""
Decorate the bare enum definitions from the model with additional attributes
necessary for code generation
"""
return [
NumericEnumTemplateData(enum.name)
.with_values(enum._values)
.with_enum_package(self.enum_package)
.with_enum_converter_package(self.enum_converter_package)
for enum in enums.values()
]
def _prepare_package_dir(self, package):
elements = [self.code_dir] + package.split(".")
directory = os.path.join(*elements)
if not os.path.isdir(directory):
os.makedirs(directory)
return directory
def _new_java_file(self, package, file_name):
directory = self._prepare_package_dir(package)
if not file_name.endswith(".java"):
file_name += ".java"
path = os.path.join(directory, file_name)
return open(path, "w")
def _new_config_file(self, name):
if not os.path.isdir(self.config_dir):
os.makedirs(self.config_dir)
path = os.path.join(self.config_dir, name)
return open(path, "w")
def _render_enum_code(self, enum, package, file_name, template):
data = {
"package": package,
"database_model_file": self.database_model_file_name.replace("\\", "/"),
"file_name": file_name,
}
with self._new_java_file(package, file_name) as jout:
path = jout.name
jout.write(self.renderer.render_name(template, enum, data))
return path
def render_enum(self, enum):
return self._render_enum_code(
enum=enum,
package=self.enum_package,
file_name=enum.code_file_name(),
template=self.ENUM_CODE_TEMPLATE,
)
def render_enum_converter(self, enum):
return self._render_enum_code(
enum=enum,
package=self.enum_converter_package,
file_name=enum.converter_file_name(),
template=self.ENUM_CONV_CODE_TEMPLATE,
)
def render_jooq_gen_config(self, template_data):
return self.renderer.render_name(self.JOOQ_GEN_CONFIG_TEMPLATE, template_data)
def _jooq_forced_type_data(self, table, field, type_name):
return {"expression": f"{table}\\.{field}", "name": type_name}
def generate_enum_java_code(self):
utils.print_section_header("Java Enums And Converters")
for enum in self.enums:
utils.print_action(f"Generating files for enum: {enum.name}")
utils.print_generated_file(self.render_enum(enum))
utils.print_generated_file(self.render_enum_converter(enum))
utils.print_end_action()
def generate_jooq_config(self, type_mappings, dbsettings, schema_name, package):
field_data = []
for table, columns in type_mappings.items():
for column, type_name in columns.items():
type_fqn = type_name
# If the type is an Enum, fully qualify the name
enum = self.enums_by_name.get(type_name, None)
if enum is not None:
type_fqn = enum.enum_fqn()
field_data.append(self._jooq_forced_type_data(table, column, type_fqn))
template_data = {
"dbhost": dbsettings.host,
"dbname": dbsettings.name,
"dbuser": dbsettings.user,
"dbpassword": <PASSWORD>,
"dbport": dbsettings.port,
"schema_name": schema_name,
"codedir": self.code_dir,
"fields": field_data,
"enums": self.enums,
"package": package,
}
utils.print_section_header("jOOQ")
utils.print_action("Generating jOOQ generator configuration file")
config_file_name = "ogma_jooq_gen_config.{}.xml".format(schema_name.lower())
with self._new_config_file(config_file_name) as xout:
xout.write(self.render_jooq_gen_config(template_data))
utils.print_generated_file(xout.name)
utils.print_end_action()
return xout.name
```
#### File: ogma/commands/__init__.py
```python
from .. import modelutils
from .. import utils
from . import common
# Command implementations
from .generate import generate
from .enumtables import enum_tables
from .enumusage import enum_usage
# ---------------------------------------------------------------------------
__all__ = ["generate", "enum_usage", "enum_tables", "get_db_name"]
def get_db_name(args):
print(modelutils.get_new_database_name())
def drop_db(args):
utils.print_action(f"Dropping database: {args.database}")
error = None
try:
args.dbsettings.name = "mysql"
engine = common.get_db_engine(args.dbsettings)
engine.execute(f"DROP DATABASE {args.database}")
except Exception as ex:
error = str(ex)
finally:
utils.print_end_action(error=error)
def create_db(args):
utils.print_action(f"Creating database: {args.dbsettings.name}")
error = None
try:
args.dbmodel.metadata.save(args.dbsettings)
except Exception as ex:
error = str(ex)
finally:
utils.print_end_action(error=error)
```
#### File: ogma/modelutils/stored_procedures.py
```python
from . import ValueHolder
from textwrap import dedent, indent
class ProcParamDirection(ValueHolder):
"""
Hold the direction of a parameter in a stored procedure
"""
def __init__(self, value):
assert value in ("IN", "OUT", "INOUT")
super().__init__(value)
# The available parameter directions:
IN = ProcParamDirection("IN")
OUT = ProcParamDirection("OUT")
INOUT = ProcParamDirection("INOUT")
class ProcSqlBody(ValueHolder):
"""
Hold the SQL code that makes up the body of a stored procedure
"""
pass
class ProcComment(ValueHolder):
"""
Hold the comment test of a store procedure
"""
pass
class ProcParam(object):
"""
A parameter in a stored procedure
"""
def __init__(self, name, the_type, direction):
self._name = name
self._type = the_type
self._direction = None
self.direction = direction
@property
def direction(self):
return self._direction
@direction.setter
def direction(self, value):
"""
A custom setter to make sure the value is an instance of
ProcParamDirection
"""
assert isinstance(value, ProcParamDirection)
self._direction = value
@property
def sql(self):
"""
The representation of the parameter in SQL
"""
return "{0.direction.value} {0._name} {0._type}".format(self)
class StoredProcedure(object):
"""
Models a SQL store procedure
"""
def __init__(self, name, *args):
self.name = name
self.comment = None
self.params = []
self.sqlbody = None
self.process_arguments(args)
def process_arguments(self, args):
"""
Arguments to the constructor can come in any order and their semantics
is determined through their type, following the way a Table in
sqlalchemy is created.
"""
for arg in args:
if isinstance(arg, ProcComment):
self.comment = arg()
elif isinstance(arg, ProcSqlBody):
self.sqlbody = arg()
elif isinstance(arg, ProcParam):
self.params.append(arg)
else:
raise ValueError(
"Unexpected argument type for StoredProcedure: {}".type(arg)
)
@property
def sql(self):
"""
MySQL statement of procedure creation based on the current procedure
definition
"""
indented = lambda x: indent(x, 4 * " ")
statement = [f"CREATE OR REPLACE PROCEDURE {self.name}("]
if self.params:
params_text = ",\n".join((param.sql for param in self.params))
statement += ["\n", indented(params_text), "\n"]
statement.append(")\nLANGUAGE SQL")
if self.comment:
statement.append(f"\nCOMMENT '{self.comment}'")
indented_sql = indented(dedent(self.sqlbody))
statement.append(f"\nBEGIN\n{indented_sql}\nEND\n")
return "".join(statement)
@property
def creation_statement(self):
return "\n".join(["DELIMITER //", self.sql + "\n//", "DELIMITER ;"])
def test():
"""
Simple test for definition and text output
"""
proc = StoredProcedure(
"topiccounter",
ProcParam("count", "BIGINT", OUT),
ProcParam("count2", "BIGINT", OUT),
ProcComment("Count the topics"),
ProcSqlBody(
"""
SELECT COUNT(*) INTO count FROM topic;
"""
),
)
print(proc.creation_statement)
if __name__ == "__main__":
test()
```
#### File: ogma/modelutils/value_holder.py
```python
class ValueHolder(object):
"""
Hold one arbitrary value per instance.
This exists so that a module level variable can be propagated down through
imports while keeping value changes.
Instances are callables that return the held value
"""
def __init__(self, value=None):
self.value = value
def __call__(self):
return self.value
```
|
{
"source": "jdevera/pythoncanarias_web",
"score": 2
}
|
#### File: pythoncanarias_web/about/views.py
```python
import logging
from django.conf import settings
from django.shortcuts import render
from organizations.models import Organization
logger = logging.getLogger(__name__)
def index(request):
pythoncanarias = Organization.objects.get(
name__istartswith=settings.ORGANIZATION_NAME)
return render(request, 'about/index.html',
{'pythoncanarias': pythoncanarias})
def history(request):
return render(request, 'about/history.html', {})
```
#### File: pythoncanarias_web/certificates/utils.py
```python
import os
import re
import subprocess
import sys
import logging
current_module = sys.modules[__name__]
base_dir = os.path.dirname(current_module.__file__)
FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
def get_template_full_name(filename, base=base_dir):
return os.path.join(base, 'templates', filename)
def get_output_full_name(filename, base=base_dir):
output_dir = os.path.join(base, 'media')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
return os.path.join(output_dir, filename)
def inkscape_export(source_filename, target_filename, tron=False):
commands = [
"inkscape",
"--export-pdf={}".format(target_filename),
source_filename,
]
if tron:
logger.info(" ".join(commands))
subprocess.call(commands)
def create_certificate(template, output_name, **kwargs):
def extract_value(match):
name = match.group(1)[2:-2].strip()
return kwargs.get(name, 'Value {} not found'.format(name))
pat = re.compile(r'(\{\{.+\}\})')
full_input_name = get_template_full_name('{}.svg'.format(template))
full_output_name = get_output_full_name('{}.svg'.format(output_name))
with open(full_input_name, 'r') as fin:
with open(full_output_name, 'w') as fout:
template = fin.read()
output = pat.sub(extract_value, template)
fout.write(output)
pdf_filename = get_output_full_name('{}.pdf'.format(output_name))
inkscape_export(full_output_name, pdf_filename)
return pdf_filename
```
#### File: events/migrations/0007_waitinglist_buy_code.py
```python
from django.db import migrations, models
import uuid
def create_uuid(apps, schema_editor):
WaitingList = apps.get_model('events', 'WaitingList')
for wl in WaitingList.objects.all():
wl.buy_code = uuid.uuid4()
wl.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0006_refund'),
]
operations = [
migrations.AddField(
model_name='waitinglist',
name='buy_code',
field=models.UUIDField(default=None, blank=True, null=True),
),
migrations.RunPython(create_uuid),
migrations.AlterField(
model_name='waitinglist',
name='buy_code',
field=models.UUIDField(default=uuid.uuid4, unique=True),
),
]
```
#### File: pythoncanarias_web/events/test_tasks.py
```python
import datetime
import os
from unittest.mock import Mock
import pytest
from django.core.mail import EmailMessage
from . import tasks
@pytest.fixture
def test_ticket():
event = Mock(
get_long_start_date=Mock(return_value='Aquí va la fecha'),
)
event.name = 'Event name'
category = Mock()
category.name = 'Ticket type name'
article = Mock(
price=10.00,
stock=20,
)
article.category = category
article.event = event
ticket = Mock(
number=9,
customer_name="<NAME>",
customer_surname='<NAME>',
customer_email='<EMAIL>',
sold_at=datetime.datetime(2018, 10, 11, 22, 44, 00),
keycode='18b0b618-7b9e-4857-9f01-39999424ee3f',
)
ticket.article = article
return ticket
def test_get_qrcode_as_svg():
svg_code = tasks.get_qrcode_as_svg('This is a test')
assert svg_code.startswith('<?xml')
assert svg_code.strip().endswith('</svg>')
def test_get_tickets_dir():
path = tasks.get_tickets_dir()
assert 'temporal' in path
assert 'tickets' in path
assert os.path.isdir(path)
def test_create_ticket_pdf(test_ticket):
path = tasks.create_ticket_pdf(test_ticket)
assert test_ticket.keycode in path
assert path.endswith('.pdf')
assert os.path.exists(path)
def test_create_ticket_message(test_ticket):
msg = tasks.create_ticket_message(test_ticket)
assert isinstance(msg, EmailMessage)
def test_send_ticket(test_ticket):
tasks.send_ticket(test_ticket)
if __name__ == '__main__':
pytest.main()
```
#### File: pythoncanarias_web/members/views.py
```python
import logging
from django.conf import settings
from django.shortcuts import render
from organizations.models import Organization
logger = logging.getLogger(__name__)
def join(request):
pythoncanarias = Organization.objects.get(
name__istartswith=settings.ORGANIZATION_NAME)
return render(request, 'members/join.html',
{'pythoncanarias': pythoncanarias})
```
#### File: pythoncanarias_web/organizations/admin.py
```python
from django.contrib import admin
from django.http import HttpResponse
from .models import (Membership, Organization, OrganizationCategory,
OrganizationRole)
class MembershipInline(admin.StackedInline):
model = Membership
extra = 0
autocomplete_fields = ['organization']
fk_name = 'organization'
class OrganizationCategoryInline(admin.StackedInline):
model = OrganizationCategory
extra = 0
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
inlines = [MembershipInline]
search_fields = ['name']
list_display = ('name', 'url', 'cif', 'email', 'address')
list_filter = ('memberships__event', 'city')
def memberships(self, obj):
return (', '.join('[{}] {}'.format(x.event, x.category)
for x in obj.memberships.all()))
@admin.register(OrganizationRole)
class OrganizationRoleAdmin(admin.ModelAdmin):
inlines = [OrganizationCategoryInline]
prepopulated_fields = {'code': ('name', ), }
list_display = ('name', 'code', 'display_name')
@admin.register(OrganizationCategory)
class OrganizationCategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'code': ('name', ), }
list_display = ('name', 'role', 'code', 'display_name')
@admin.register(Membership)
class MembershipAdmin(admin.ModelAdmin):
list_display = ('event', 'organization', 'category', 'amount', 'order')
list_filter = ('category__name', 'event')
search_fields = ['organization__name']
autocomplete_fields = ['organization', 'joint_organization']
def download_emails(self, request, queryset):
content = ','.join([m.get_email() for m in queryset])
filename = 'emails.txt'
response = HttpResponse(content, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename={}'.format(
filename)
return response
download_emails.short_description = 'Download management emails'
actions = [download_emails, ]
```
#### File: pythoncanarias_web/schedule/admin.py
```python
from django import forms
from django.contrib import admin
from django.http import HttpResponse
from .models import Schedule, Slot, SlotCategory, SlotLevel, SlotTag, Track
from locations.models import Location
class ScheduleInline(admin.StackedInline):
model = Schedule
extra = 0
autocomplete_fields = ['speakers']
@admin.register(SlotCategory)
class SlotCategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'code': ('name', ), }
list_display = ('name', 'code')
@admin.register(SlotTag)
class SlotTagAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name', ), }
list_display = ('name', 'slug')
@admin.register(SlotLevel)
class SlotLevelAdmin(admin.ModelAdmin):
list_display = ('name', 'order')
@admin.register(Slot)
class SlotAdmin(admin.ModelAdmin):
def has_slides(self, obj):
return obj.slides != ''
has_slides.boolean = True
inlines = [ScheduleInline]
list_display = ('name', 'has_slides', 'level', '_tags')
search_fields = ['name']
list_filter = ['level', 'tags']
def _tags(self, obj):
return ', '.join(tag.name for tag in obj.tags.all())
@admin.register(Track)
class TrackAdmin(admin.ModelAdmin):
list_display = ('name', 'order')
class LocationChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return f'{obj.name} - {obj.venue}'
@admin.register(Schedule)
class ScheduleAdmin(admin.ModelAdmin):
search_fields = ['event__name', 'location__name', 'slot__name',
'track__name', 'speakers__name', 'speakers__surname']
list_display = ('slot', 'event', 'location', 'start')
autocomplete_fields = ['speakers', 'slot']
list_filter = ['event']
def download_speakers_emails(self, request, queryset):
emails = set()
for schedule in queryset:
schedule_emails = schedule.speakers.all().values_list(
'email', flat=True)
if schedule_emails:
emails.add(*schedule_emails)
content = ','.join(emails)
filename = 'emails.txt'
response = HttpResponse(content, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename={}'.format(
filename)
return response
download_speakers_emails.short_description = "Download speakers' emails"
actions = [download_speakers_emails, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'location':
return LocationChoiceField(
queryset=Location.objects.all())
return super().formfield_for_foreignkey(db_field, request, **kwargs)
```
#### File: pythoncanarias_web/schedule/models.py
```python
from django.db import models
from commons.constants import PRIORITY
class SlotCategory(models.Model):
# Workshop, Talk, Organization, Coffee, Meal, ...
name = models.CharField(max_length=256)
code = models.CharField(max_length=32, unique=True)
description = models.TextField(blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'slot categories'
class SlotTag(models.Model):
# Machine Learning, Science, DevOps, ...
name = models.CharField(max_length=256)
slug = models.SlugField(unique=True)
description = models.TextField(blank=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class SlotLevel(models.Model):
# Basic, Intermediate, Advanced, ...
name = models.CharField(max_length=256)
order = models.PositiveIntegerField(
choices=PRIORITY.CHOICES,
default=PRIORITY.MEDIUM,
)
description = models.TextField(blank=True)
def __str__(self):
return self.name
class Slot(models.Model):
name = models.CharField(max_length=256)
description = models.TextField(blank=True)
repo = models.URLField(blank=True)
slides = models.URLField(blank=True)
category = models.ForeignKey(
SlotCategory,
on_delete=models.PROTECT,
related_name='slots',
)
level = models.ForeignKey(
SlotLevel,
on_delete=models.PROTECT,
related_name='slots',
blank=True,
null=True,
)
tags = models.ManyToManyField(SlotTag, related_name='slots', blank=True)
def __str__(self):
return self.name
def get_level(self):
return self.level.name if self.level else 'N/A'
def get_tags(self):
return [t.slug for t in self.tags.all().order_by('slug')]
def is_talk(self):
return self.category_id in (1, 2)
class Track(models.Model):
name = models.CharField(max_length=256)
order = models.PositiveIntegerField(
choices=PRIORITY.CHOICES,
default=PRIORITY.MEDIUM,
)
description = models.TextField(blank=True)
def __str__(self):
return self.name
def schedule_in_range(self, start=None, end=None, event=None):
queryset = self.schedule.all().order_by('start')
if start:
queryset = queryset.filter(start__gte=start)
if end:
queryset = queryset.filter(end__lte=end)
if event:
queryset = queryset.filter(event=event)
return queryset
def get_talks(self, event=None):
qs = self.schedule.select_related('slot')
if event:
qs = qs.filter(event=event)
qs = qs.order_by('start')
return [{
'talk_id': t.slot.pk,
'name': t.slot.name,
'start': t.start.strftime('%H:%M'),
'end': t.end.strftime('%H:%M'),
'description': t.slot.description,
'tags': t.slot.get_tags(),
'language': t.language,
'speakers': t.get_speakers(),
} for t in qs]
class Schedule(models.Model):
SPANISH = 'ES'
ENGLISH = 'EN'
LANGUAGE_CHOICES = ((SPANISH, 'Español'), (ENGLISH, 'Inglés'))
event = models.ForeignKey(
'events.Event',
on_delete=models.PROTECT,
related_name='schedule',
)
location = models.ForeignKey(
'locations.Location',
on_delete=models.PROTECT,
related_name='schedule',
)
# if track is null the slot is plenary
track = models.ForeignKey(
Track,
on_delete=models.PROTECT,
related_name='schedule',
null=True,
blank=True,
)
speakers = models.ManyToManyField(
'speakers.Speaker',
related_name='schedule',
blank=True,
)
slot = models.ForeignKey(
Slot,
on_delete=models.PROTECT,
related_name='schedule',
)
start = models.DateTimeField()
end = models.DateTimeField()
language = models.CharField(
max_length=2,
choices=LANGUAGE_CHOICES,
default=SPANISH,
)
def __str__(self):
return "{} {}-{}".format(
self.start.date(),
self.start.time(),
self.end.time(),
)
def get_speakers(self):
qs = self.speakers.all().order_by('surname', 'name')
result = [{
'speaker_id': speaker.pk,
'name': speaker.name,
'surname': speaker.surname,
'bio': speaker.bio,
'photo': speaker.photo_url,
'social': speaker.socials(),
} for speaker in qs]
return result
@property
def size_for_display(self):
t = round((self.end - self.start) / self.event.default_slot_duration)
return t if t > 0 else 1
def track_name(self):
if self.track:
return self.track.name
else:
return 'No track'
```
#### File: pythoncanarias_web/speakers/models.py
```python
from urllib.parse import urljoin
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db import models
class Social(models.Model):
class Meta:
verbose_name = 'social network'
verbose_name_plural = 'social networks'
name = models.CharField(max_length=256)
code = models.CharField(max_length=32, unique=True)
base_url = models.CharField(max_length=128)
def __str__(self):
return self.name
class Speaker(models.Model):
name = models.CharField(max_length=256)
surname = models.CharField(max_length=256)
slug = models.SlugField(unique=True)
bio = models.TextField()
email = models.EmailField(blank=True)
phone = models.CharField(max_length=32, blank=True)
photo = models.ImageField(
upload_to='speakers/speaker/',
blank=True,
)
def __str__(self):
return '{} {}'.format(self.name, self.surname)
def socials(self):
return {
c.social.code: c.href
for c in self.contacts.order_by('social__name')
}
def socials_for_display(self):
return [{'code': c.social.code, 'href': c.href}
for c in self.contacts.order_by('social__name')]
@property
def photo_url(self):
if self.photo:
return self.photo.url
else:
return static('speakers/img/noavatar.png')
def talks(self, event=None):
"""Returns a list with all the talks (schedule & slot) for a given speaker.
If we pass an event as a parameter, it returns only a list of the
speaker's talks given in this event.
Params:
- event (class Event) [optional]: if provided, if filter the talks to be
from this event.
Return:
- A list of talks from this speaker (and event, if provided). Empty
list if no talks from this speaker.
"""
qs = self.schedule.select_related('slot')
if event:
qs = qs.filter(event=event)
qs = qs.order_by('slot__name')
return list(qs)
class Contact(models.Model):
social = models.ForeignKey(
Social,
on_delete=models.PROTECT,
related_name='contacts'
)
speaker = models.ForeignKey(
Speaker,
on_delete=models.PROTECT,
related_name='contacts'
)
identifier = models.CharField(max_length=128)
def __str__(self):
return self.href
@property
def href(self):
return urljoin(self.social.base_url, self.identifier)
```
#### File: tickets/services/ticket_maker.py
```python
import os
from django.utils import timezone
from reportlab.graphics import renderPDF
from reportlab.graphics.barcode import qr
from reportlab.graphics.shapes import Drawing
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import cm, mm
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer, Table
class BaseReport:
FONTS = {
'light': 'Existence-Light.ttf',
'bold': 'Amaranth-Bold.ttf',
'title': 'AirAmerica-Regular.ttf',
'far': 'Font Awesome-5-Free-Regular-400.ttf',
'fab': 'Font-Awesome-5-Brands-Regular-400.ttf',
'fas': 'Font-Awesome-5-Free-Solid-900.ttf',
'fira': 'FiraCode-Light.ttf'
}
def __init__(self, ticket, width, height):
self.ticket = ticket
self.width = width
self.height = height
self.styles = getSampleStyleSheet()
self.configure_paths()
self.configure_fonts()
def configure_paths(self):
self.resources_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'resources')
self.fonts_path = os.path.join(self.resources_path, 'fonts')
self.images_path = os.path.join(self.resources_path, 'images')
def configure_fonts(self):
self.fonts = {}
for font_alias, font_filename in BaseReport.FONTS.items():
font_path = os.path.join(self.fonts_path, font_filename)
pdfmetrics.registerFont(TTFont(font_alias, font_path))
def coord(self, x, y, unit=mm):
x, y = x * unit, self.height - y * unit
return x, y
def insert_image(self, image_path, x, y, width, anchor='sw'):
self.canvas.drawImage(
image_path,
x,
y,
width=width,
preserveAspectRatio=True,
anchorAtXY=True,
mask='auto',
anchor=anchor)
def paragraph(self, text):
return Paragraph(text, self.styles['Normal'])
class Header(BaseReport):
def __init__(self, ticket, canvas, x, y, width=17 * cm, height=3 * cm):
BaseReport.__init__(self, ticket, width, height)
self.canvas = canvas
self.x = x
self.y = y
def draw(self):
self.canvas.saveState()
logo_path = os.path.join(self.images_path, 'logo-python-canarias.png')
self.insert_image(
logo_path,
self.x,
self.y,
45 * mm,
anchor='nw')
self.canvas.setLineWidth(0.2 * mm)
y = self.y - 18 * mm
self.canvas.line(self.x, y, self.x + self.width, y)
self.canvas.restoreState()
class Footer(BaseReport):
def __init__(self, ticket, canvas, x, y, width=17 * cm, height=3 * cm):
BaseReport.__init__(self, ticket, width, height)
self.canvas = canvas
self.x = x
self.y = y
def draw(self):
self.canvas.saveState()
self.canvas.setLineWidth(0.2 * mm)
self.canvas.line(self.x, self.y, self.x + self.width, self.y)
# Left side of footer
start_x = self.x
start_y = self.y - 5 * mm
# icon
self.canvas.setFont('fab', 10)
self.canvas.drawString(start_x, start_y, '\uf099')
# message
start_x += 5 * mm
self.canvas.setFont('bold', 10)
self.canvas.drawString(start_x, start_y,
f'Comparte el evento con:')
# hashtag
start_x += 38 * mm
self.canvas.setFont('fira', 9)
self.canvas.drawString(start_x, start_y,
self.ticket.event.qualified_hashtag)
# Right side of footer
start_x = self.x + self.width - 33 * mm
start_y = self.y - 5 * mm
# icon
self.canvas.setFont('fas', 10)
self.canvas.drawString(start_x, start_y, '\uf0c1')
# url
start_x += 5 * mm
self.canvas.setFont('bold', 10)
self.canvas.drawString(start_x, start_y, 'pythoncanarias.es')
self.canvas.restoreState()
class QRCode(BaseReport):
def __init__(self, ticket, canvas, x, y, width=5 * cm, height=5 * cm):
BaseReport.__init__(self, ticket, width, height)
self.canvas = canvas
self.x = x
self.y = y
def draw(self):
self.canvas.saveState()
# qrcode image
qr_code = qr.QrCodeWidget(str(self.ticket.keycode))
qr_code.barWidth = self.width
qr_code.barHeight = self.height
qr_code.qrVersion = 1
d = Drawing()
d.add(qr_code)
renderPDF.draw(d, self.canvas, self.x, self.y)
# qrcode text
self.canvas.setFont('fira', 8)
self.canvas.rotate(90)
self.canvas.drawString(52 * mm, -18 * cm, str(self.ticket.keycode))
self.canvas.restoreState()
class TicketMaker(BaseReport):
def __init__(self, pdf_file, ticket, width=A4[0], height=A4[1]):
super().__init__(ticket, width, height)
self.doc = SimpleDocTemplate(
pdf_file,
pagesize=A4,
rightMargin=3 * cm,
leftMargin=2 * cm,
topMargin=4 * cm,
bottomMargin=3 * cm)
self.elements = []
def create_title(self):
p = self.paragraph(f'''
<para leading=27><font size=25 name=title>{ self.ticket.event }
</font></para>''')
self.elements.append(p)
s = Spacer(1, 1 * cm)
self.elements.append(s)
def create_features(self):
start = timezone.localtime(self.ticket.event.start_datetime())
event_date = start.strftime('%d/%m/%Y')
if start.hour == 0:
event_hour = 'Aún sin definir'
else:
event_hour = start.strftime('%H:%Mh')
data = (
('\uf554', 'asistente', self.ticket.customer_full_name),
('\uf0e0', 'email', self.ticket.customer_email),
('\uf784', 'fecha del evento', event_date),
('\uf017', 'hora de comienzo', event_hour),
('\uf292', 'número de entrada', self.ticket.number),
('\uf810', 'tipo de entrada', self.ticket.article.category.name),
('\uf4c0', 'precio de la entrada',
f'{self.ticket.article.price}€'),
('\uf788', 'método de pago',
self.ticket.get_payment_method_display() or 'Free'),
('\uf07a', 'fecha de compra',
self.ticket.sold_at.strftime('%d/%m/%y @ %H:%Mh')),
('\uf3c5', 'ubicación',
self.paragraph(f'''
<font name=bold>{ self.ticket.event.venue.name }</font>''')),
('\uf14e', 'dirección',
self.paragraph(f'''
<font name=bold>{ self.ticket.event.venue.address }</font>
'''))
)
tblstyle = ([('FONT', (0, 0), (0, -1), 'fas'),
('FONT', (1, 0), (1, -1), 'light'),
('FONT', (2, 0), (2, -1), 'bold'),
('TEXTCOLOR', (0, 0), (0, -1), '#595959'),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('ALIGN', (0, 0), (0, -1), 'CENTER')])
tbl = Table(
data, colWidths=(6 * mm, 4 * cm, 10 * cm), rowHeights=(8 * mm))
tbl.setStyle(tblstyle)
self.elements.append(tbl)
def create_header(self):
header = Header(self.ticket, self.canvas, *self.coord(2, 1, cm))
header.draw()
def create_footer(self):
footer = Footer(self.ticket, self.canvas, *self.coord(2, 27, cm))
footer.draw()
def create_qr(self):
qrcode = QRCode(self.ticket, self.canvas, *self.coord(13, 25, cm))
qrcode.draw()
def draw_flowables(self):
self.create_title()
self.create_features()
def draw_fixed(self, canvas, doc):
self.canvas = canvas
self.create_header()
self.create_qr()
self.create_footer()
def save(self):
self.doc.build(self.elements, onFirstPage=self.draw_fixed)
def create(self):
self.draw_flowables()
self.save()
```
|
{
"source": "jdevera/splinter",
"score": 2
}
|
#### File: splinter/tests/test_webdriver_chrome.py
```python
import os
import unittest
import pytest
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests, get_browser
from selenium.common.exceptions import WebDriverException
def chrome_installed():
try:
Browser("chrome")
except WebDriverException:
return False
return True
class ChromeBase(object):
@pytest.fixture(autouse=True, scope='class')
def teardown(self, request):
request.addfinalizer(self.browser.quit)
class ChromeBrowserTest(WebDriverTests, ChromeBase, unittest.TestCase):
@pytest.fixture(autouse=True, scope='class')
def setup_browser(self, request):
request.cls.browser = get_browser('chrome', fullscreen=False)
@pytest.fixture(autouse=True)
def visit_example_app(self, request):
self.browser.driver.set_window_size(1024, 768)
self.browser.visit(EXAMPLE_APP)
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
self.browser.attach_file("file", file_path)
self.browser.find_by_name("upload").click()
html = self.browser.html
self.assertIn("text/plain", html)
with open(file_path) as f:
expected = str(f.read().encode("utf-8"))
self.assertIn(expected, html)
def test_should_support_with_statement(self):
with get_browser('chrome'):
pass
class ChromeBrowserFullscreenTest(WebDriverTests, ChromeBase, unittest.TestCase):
@pytest.fixture(autouse=True, scope='class')
def setup_browser(self, request):
request.cls.browser = get_browser('chrome', fullscreen=True)
@pytest.fixture(autouse=True)
def visit_example_app(self):
self.browser.visit(EXAMPLE_APP)
def test_should_support_with_statement(self):
with get_browser('chrome', fullscreen=True):
pass
```
|
{
"source": "JDevlieghere/apple-llvm-infrastructure-tools",
"score": 2
}
|
#### File: git_apple_llvm/am/core.py
```python
from git_apple_llvm.git_tools import git, git_output, get_dev_null, GitError
from typing import Dict, List, Optional
import logging
AM_PREFIX = 'refs/am/changes/'
AM_STATUS_PREFIX = 'refs/am-status/changes/'
log = logging.getLogger(__name__)
class CommitStates:
new = "NEW"
conflict = "CONFLICT"
pending = "PENDING"
started = "STARTED"
passed = "PASSED"
failed = "FAILED"
known_failed = "KNOWN_FAILED" # When Failed was already reported.
all = [new, conflict, pending, started, passed, failed, known_failed]
def has_merge_conflict(commit: str, target_branch: str, remote: str = 'origin') -> bool:
""" Returns true if the given commit hash has a merge conflict with the given target branch.
"""
try:
# Always remove the temporary worktree. It's possible that we got
# interrupted and left it around. This will raise an exception if the
# worktree doesn't exist, which can be safely ignored.
git('worktree', 'remove', '--force', '.git/temp-worktree',
stdout=get_dev_null(), stderr=get_dev_null())
except GitError:
pass
git('worktree', 'add', '.git/temp-worktree', f'{remote}/{target_branch}', '--detach',
stdout=get_dev_null(), stderr=get_dev_null())
try:
git('merge', '--no-commit', commit,
git_dir='.git/temp-worktree', stdout=get_dev_null(), stderr=get_dev_null())
return False
except GitError:
return True
finally:
git('worktree', 'remove', '--force', '.git/temp-worktree',
stdout=get_dev_null(), stderr=get_dev_null())
def compute_unmerged_commits(remote: str, target_branch: str,
upstream_branch: str, format: str = '%H') -> Optional[List[str]]:
""" Returns the list of commits that are not yet merged from upstream to the target branch. """
commit_log_output = git_output(
'log',
'--first-parent',
f'--pretty=format:{format}', '--no-patch',
f'{remote}/{target_branch}..{remote}/{upstream_branch}',
)
if not commit_log_output:
return None
return commit_log_output.split('\n')
def find_inflight_merges(remote: str = 'origin') -> Dict[str, List[str]]:
"""
This function fetches the refs created by the automerger to find
the inflight merges that are currently being processed.
"""
# Delete the previously fetched refs to avoid fetch failures
# where there were force pushes.
existing_refs = git_output('for-each-ref', AM_STATUS_PREFIX,
'--format=%(refname)').split('\n')
for ref in existing_refs:
if not ref:
continue
log.debug(f'Deleting local ref "{ref}" before fetching')
git('update-ref', '-d', ref)
git('fetch', remote,
f'{AM_PREFIX}*:{AM_STATUS_PREFIX}*') # FIXME: handle fetch failures.
refs = git_output('for-each-ref', AM_STATUS_PREFIX,
'--format=%(refname)').split('\n')
inflight_merges: Dict[str, List[str]] = {}
for ref in refs:
if not ref:
continue
assert ref.startswith(AM_STATUS_PREFIX)
merge_name = ref[len(AM_STATUS_PREFIX):]
underscore_idx = merge_name.find('_')
assert underscore_idx != -1
commit_hash = merge_name[:underscore_idx]
dest_branch = merge_name[underscore_idx + 1:]
if dest_branch in inflight_merges:
inflight_merges[dest_branch].append(commit_hash)
else:
inflight_merges[dest_branch] = [commit_hash]
for (m, k) in inflight_merges.items():
log.debug(f'in-flight {m}: {k}')
return inflight_merges
```
#### File: git_apple_llvm/am/main.py
```python
import click
import logging
from typing import Optional, List
from git_apple_llvm.git_tools import git
from git_apple_llvm.am.core import CommitStates
from git_apple_llvm.am.am_graph import print_graph
from git_apple_llvm.am.am_status import print_status
from git_apple_llvm.am.oracle import set_state, get_state
log = logging.getLogger(__name__)
is_verbose = False
@click.group()
@click.option('-v', '--verbose', count=True)
def am(verbose):
global is_verbose
# Setup logging. Use verbose flag to determine console output, and log to a file in at debug level.
is_verbose = bool(verbose)
level = logging.WARNING - (verbose * 10)
if level < 1:
raise ValueError("Too verbose.")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('am.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter and add it to the handlers
fh_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [%(filename)s:%(lineno)d]')
fh.setFormatter(fh_formatter)
ch_fomatter = logging.Formatter('%(levelname)s: %(message)s [%(filename)s:%(lineno)d] ')
ch.setFormatter(ch_fomatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
@am.command()
@click.option('--target', metavar='<branch>', type=str,
default=None,
help='The target branch for which the status should be reported. All branches are shown by default.')
@click.option('--all-commits', is_flag=True, default=False,
help='List all outstanding commits in the merge backlog.')
@click.option('--no-fetch', is_flag=True, default=False,
help='Do not fetch remote (WARNING: status will be stale!).')
@click.option('--ci-status', is_flag=True, default=False,
help='Query additional CI status from Redis.')
def status(target: Optional[str], all_commits: bool, no_fetch: bool, ci_status: bool):
remote = 'origin'
if not no_fetch:
click.echo(f'❕ Fetching "{remote}" to provide the latest status...')
git('fetch', remote, stderr=None)
click.echo('✅ Fetch succeeded!\n')
print_status(remote=remote, target_branch=target, list_commits=all_commits, query_ci_status=ci_status)
@am.command()
@click.option('--format', metavar='<format>', type=str,
default=None,
help='The file format for the generated graph.')
@click.option('--remote', metavar='<remote>',
multiple=True,
help='The remote(s) to graph.')
@click.option('--no-fetch', is_flag=True, default=False,
help='Do not fetch remote (WARNING: status will be stale!).')
@click.option('--ci-status', is_flag=True, default=False,
help='Query additional CI [status from Redis.')
def graph(format: str, remote: List[str], no_fetch: bool, ci_status: bool):
remotes = remote if remote else ['origin']
if not no_fetch:
for r in remotes:
click.echo(f'❕ Fetching "{r}" to provide the latest status...')
git('fetch', r, stderr=None)
print_graph(remotes=remotes, query_ci_status=ci_status, fmt=format)
@am.group()
def result():
""" Set and Get merge results.
"""
pass
@result.command()
@click.argument('merge_id', envvar='MERGE_ID')
@click.argument('status')
def set(merge_id, status):
"""Set the merge status for a merge ID."""
if status not in CommitStates.all:
all_status = ', '.join(CommitStates.all)
raise click.BadArgumentUsage(f"Status must be one of {all_status}.")
set_state(merge_id, status)
click.echo(f"Set {merge_id} to {status}")
@result.command()
@click.argument('merge_id', envvar='MERGE_ID')
def get(merge_id):
"""Get the merge status of a merge_id.
"""
click.echo(get_state(merge_id))
if __name__ == '__main__':
am()
```
|
{
"source": "jdev-org/QgisCadastrePlugin",
"score": 2
}
|
#### File: cadastre/dialogs/cadastre_load_dialog.py
```python
__email__ = "<EMAIL>"
import os.path
from pathlib import Path
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QDialog
from cadastre.cadastre_loading import cadastreLoading
from cadastre.tools import set_window_title
LOAD_FORM_CLASS, _ = uic.loadUiType(
os.path.join(
str(Path(__file__).resolve().parent.parent),
'forms',
'cadastre_load_form.ui'
)
)
class CadastreLoadDialog(QDialog, LOAD_FORM_CLASS):
""" Load data from database. """
def __init__(self, iface, cadastre_search_dialog, parent=None):
super(CadastreLoadDialog, self).__init__(parent)
self.iface = iface
self.setupUi(self)
self.setWindowTitle('{} {}'.format(self.windowTitle(), set_window_title()))
self.mc = self.iface.mapCanvas()
self.cadastre_search_dialog = cadastre_search_dialog
# common cadastre methods
from cadastre.dialogs.dialog_common import CadastreCommon
self.qc = CadastreCommon(self)
self.ql = cadastreLoading(self)
# spatialite support
self.hasSpatialiteSupport = CadastreCommon.hasSpatialiteSupport()
if not self.hasSpatialiteSupport:
self.liDbType.removeItem(2)
# Set initial values
self.go = True
self.step = 0
self.totalSteps = 0
self.dbType = None
self.dbpluginclass = None
self.connectionName = None
self.connection = None
self.db = None
self.schema = None
self.schemaList = None
self.hasStructure = None
# Get style list
self.getStyleList()
# Signals/Slot Connections
self.liDbType.currentIndexChanged[str].connect(self.qc.updateConnectionList)
self.liDbConnection.currentIndexChanged[str].connect(self.qc.updateSchemaList)
self.btProcessLoading.clicked.connect(self.onProcessLoadingClicked)
self.ql.cadastreLoadingFinished.connect(self.onLoadingEnd)
self.btLoadSqlLayer.clicked.connect(self.onLoadSqlLayerClicked)
self.rejected.connect(self.onClose)
self.buttonBox.rejected.connect(self.onClose)
self.qc.load_default_values()
def onClose(self):
"""
Close dialog
"""
if self.db:
self.db.connector.__del__()
self.close()
def getStyleList(self):
"""
Get the list of style directories
inside the plugin dir
and add combobox item
"""
spath = os.path.join(self.qc.plugin_dir, "styles/")
dirs = os.listdir(spath)
dirs = [a for a in dirs if os.path.isdir(os.path.join(spath, a))]
dirs.sort()
cb = self.liTheme
cb.clear()
for d in dirs:
cb.addItem('%s' % d, d)
def onProcessLoadingClicked(self):
"""
Activate the loading of layers
from database tables
when user clicked on button
"""
if self.connection:
if self.db:
self.ql.processLoading()
def onLoadSqlLayerClicked(self):
"""
Loads a layer
from given SQL
when user clicked on button
"""
if self.connection:
if self.db:
self.ql.loadSqlLayer()
def onLoadingEnd(self):
"""
Actions to trigger
when all the layers
have been loaded
"""
self.cadastre_search_dialog.checkMajicContent()
self.cadastre_search_dialog.clearComboboxes()
self.cadastre_search_dialog.setupSearchCombobox('commune', None, 'sql')
self.cadastre_search_dialog.setupSearchCombobox('commune_proprietaire', None, 'sql')
# self.cadastre_search_dialog.setupSearchCombobox('section', None, 'sql')
```
#### File: cadastre/dialogs/dialog_common.py
```python
__email__ = "<EMAIL>"
import os.path
import re
import unicodedata
from collections import namedtuple
from pathlib import Path
from db_manager.db_plugins import createDbPlugin
from db_manager.db_plugins.plugin import BaseError
from db_manager.dlg_db_error import DlgDbError
from qgis.core import QgsMapLayer, QgsProject, QgsSettings
from qgis.PyQt.QtCore import QFileInfo, Qt
from qgis.PyQt.QtGui import QTextCursor
from qgis.PyQt.QtWidgets import QApplication, QFileDialog, qApp
import cadastre.cadastre_common_base as common_utils
class CadastreCommon:
""" Import data from EDIGEO and MAJIC files. """
def __init__(self, dialog):
self.dialog = dialog
# plugin directory path
self.plugin_dir = str(Path(__file__).resolve().parent.parent)
# default auth id for layers
self.defaultAuthId = '<PASSWORD>'
# Bind as class properties for compatibility
hasSpatialiteSupport = common_utils.hasSpatialiteSupport
openFile = common_utils.openFile
def updateLog(self, msg):
"""
Update the log
"""
t = self.dialog.txtLog
t.ensureCursorVisible()
prefix = '<span style="font-weight:normal;">'
suffix = '</span>'
t.append('%s %s %s' % (prefix, msg, suffix))
c = t.textCursor()
c.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
t.setTextCursor(c)
qApp.processEvents()
def updateProgressBar(self):
"""
Update the progress bar
"""
if self.dialog.go:
self.dialog.step += 1
self.dialog.pbProcess.setValue(int(self.dialog.step * 100 / self.dialog.totalSteps))
qApp.processEvents()
def load_default_values(self):
""" Try to load values in the UI which are stored in QGIS settings.
The function will return as soon as it is missing a value in the QGIS Settings.
The order is DB Type, connection name and then the schema.
"""
settings = QgsSettings()
WidgetSettings = namedtuple('WidgetSettings', ('ui', 'settings'))
widgets = [
WidgetSettings('liDbType', 'databaseType'),
WidgetSettings('liDbConnection', 'connection'),
WidgetSettings('liDbSchema', 'schema'),
]
# Default to PostGIS ticket #302
is_postgis = settings.value("cadastre/databaseType", type=str, defaultValue='postgis') == 'postgis'
for widget in widgets:
# Widgets are ordered by hierarchy, so we quit the loop as soon as a value is not correct
if widget.settings == 'schema' and not is_postgis:
return
if not hasattr(self.dialog, widget.ui):
return
value = settings.value("cadastre/" + widget.settings, type=str, defaultValue='')
if not value:
return
combo = getattr(self.dialog, widget.ui)
index = combo.findText(value, Qt.MatchFixedString)
if not index:
return
combo.setCurrentIndex(index)
def updateConnectionList(self):
"""
Update the combo box containing the database connection list
"""
QApplication.setOverrideCursor(Qt.WaitCursor)
dbType = str(self.dialog.liDbType.currentText()).lower()
self.dialog.liDbConnection.clear()
if self.dialog.liDbType.currentIndex() != 0:
self.dialog.dbType = dbType
# instance of db_manager plugin class
dbpluginclass = createDbPlugin(dbType)
self.dialog.dbpluginclass = dbpluginclass
# fill the connections combobox
self.dialog.connectionDbList = []
for c in dbpluginclass.connections():
self.dialog.liDbConnection.addItem(str(c.connectionName()))
self.dialog.connectionDbList.append(str(c.connectionName()))
# Show/Hide database specific pannel
if hasattr(self.dialog, 'databaseSpecificOptions'):
if dbType == 'postgis':
self.dialog.databaseSpecificOptions.setCurrentIndex(0)
else:
self.dialog.databaseSpecificOptions.setCurrentIndex(1)
self.toggleSchemaList(False)
else:
if hasattr(self.dialog, "inDbCreateSchema"):
self.dialog.databaseSpecificOptions.setTabEnabled(0, False)
self.dialog.databaseSpecificOptions.setTabEnabled(1, False)
QApplication.restoreOverrideCursor()
def toggleSchemaList(self, t):
"""
Toggle Schema list and inputs
"""
self.dialog.liDbSchema.setEnabled(t)
if hasattr(self.dialog, "inDbCreateSchema"):
self.dialog.inDbCreateSchema.setEnabled(t)
self.dialog.btDbCreateSchema.setEnabled(t)
self.dialog.databaseSpecificOptions.setTabEnabled(0, t)
self.dialog.databaseSpecificOptions.setTabEnabled(1, not t)
self.dialog.btCreateNewSpatialiteDb.setEnabled(not t)
def updateSchemaList(self):
"""
Update the combo box containing the schema list if relevant
"""
self.dialog.liDbSchema.clear()
QApplication.setOverrideCursor(Qt.WaitCursor)
connectionName = str(self.dialog.liDbConnection.currentText())
self.dialog.connectionName = connectionName
dbType = str(self.dialog.liDbType.currentText()).lower()
# Deactivate schema fields
self.toggleSchemaList(False)
connection = None
if connectionName:
# Get schema list
dbpluginclass = createDbPlugin(dbType, connectionName)
self.dialog.dbpluginclass = dbpluginclass
try:
connection = dbpluginclass.connect()
except BaseError as e:
DlgDbError.showError(e, self.dialog)
self.dialog.go = False
self.updateLog(e.msg)
QApplication.restoreOverrideCursor()
return
except:
self.dialog.go = False
msg = u"Impossible de récupérer les schémas de la base. Vérifier les informations de connexion."
self.updateLog(msg)
QApplication.restoreOverrideCursor()
return
finally:
QApplication.restoreOverrideCursor()
if connection:
self.dialog.connection = connection
db = dbpluginclass.database()
if db:
self.dialog.db = db
self.dialog.schemaList = []
if dbType == 'postgis':
# Activate schema fields
self.toggleSchemaList(True)
for s in db.schemas():
self.dialog.liDbSchema.addItem(str(s.name))
self.dialog.schemaList.append(str(s.name))
else:
self.toggleSchemaList(False)
else:
self.toggleSchemaList(False)
QApplication.restoreOverrideCursor()
def checkDatabaseForExistingStructure(self):
"""
Search among a database / schema
if there are already Cadastre structure tables
in it
"""
hasStructure = False
hasData = False
hasMajicData = False
hasMajicDataProp = False
hasMajicDataParcelle = False
hasMajicDataVoie = False
searchTable = 'geo_commune'
majicTableParcelle = 'parcelle'
majicTableProp = 'proprietaire'
majicTableVoie = 'voie'
if self.dialog.db:
if self.dialog.dbType == 'postgis':
schemaSearch = [s for s in self.dialog.db.schemas() if s.name == self.dialog.schema]
schemaInst = schemaSearch[0]
getSearchTable = [a for a in self.dialog.db.tables(schemaInst) if a.name == searchTable]
if self.dialog.dbType == 'spatialite':
getSearchTable = [a for a in self.dialog.db.tables() if a.name == searchTable]
if getSearchTable:
hasStructure = True
# Check for data in it
sql = 'SELECT * FROM "%s" LIMIT 1' % searchTable
if self.dialog.dbType == 'postgis':
sql = 'SELECT * FROM "{}"."{}" LIMIT 1'.format(self.dialog.schema, searchTable)
data, rowCount, ok = CadastreCommon.fetchDataFromSqlQuery(self.dialog.db.connector, sql)
if ok and rowCount >= 1:
hasData = True
# Check for Majic data in it
sql = 'SELECT * FROM "%s" LIMIT 1' % majicTableParcelle
if self.dialog.dbType == 'postgis':
sql = 'SELECT * FROM "{}"."{}" LIMIT 1'.format(self.dialog.schema, majicTableParcelle)
data, rowCount, ok = CadastreCommon.fetchDataFromSqlQuery(self.dialog.db.connector, sql)
if ok and rowCount >= 1:
hasMajicData = True
hasMajicDataParcelle = True
# Check for Majic data in it
sql = 'SELECT * FROM "%s" LIMIT 1' % majicTableProp
if self.dialog.dbType == 'postgis':
sql = 'SELECT * FROM "{}"."{}" LIMIT 1'.format(self.dialog.schema, majicTableProp)
data, rowCount, ok = CadastreCommon.fetchDataFromSqlQuery(self.dialog.db.connector, sql)
if ok and rowCount >= 1:
hasMajicData = True
hasMajicDataProp = True
# Check for Majic data in it
sql = 'SELECT * FROM "%s" LIMIT 1' % majicTableVoie
if self.dialog.dbType == 'postgis':
sql = 'SELECT * FROM "{}"."{}" LIMIT 1'.format(self.dialog.schema, majicTableVoie)
data, rowCount, ok = CadastreCommon.fetchDataFromSqlQuery(self.dialog.db.connector, sql)
if ok and rowCount >= 1:
hasMajicData = True
hasMajicDataVoie = True
# Set global properties
self.dialog.hasStructure = hasStructure
self.dialog.hasData = hasData
self.dialog.hasMajicData = hasMajicData
self.dialog.hasMajicDataParcelle = hasMajicDataParcelle
self.dialog.hasMajicDataProp = hasMajicDataProp
self.dialog.hasMajicData = hasMajicDataVoie
def checkDatabaseForExistingTable(self, tableName, schemaName=''):
"""
Check if the given table
exists in the database
"""
tableExists = False
if not self.dialog.db:
return False
if self.dialog.dbType == 'postgis':
sql = "SELECT * FROM information_schema.tables WHERE table_schema = '%s' AND table_name = '%s'" % (
schemaName, tableName)
if self.dialog.dbType == 'spatialite':
sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='%s'" % tableName
data, rowCount, ok = CadastreCommon.fetchDataFromSqlQuery(self.dialog.db.connector, sql)
if ok and rowCount >= 1:
tableExists = True
return tableExists
# Bind as class properties for compatibility
def getLayerFromLegendByTableProps(*args, **kwargs) -> QgsMapLayer:
return common_utils.getLayerFromLegendByTableProps(QgsProject.instance(), *args, **kwargs)
getConnectionParameterFromDbLayer = common_utils.getConnectionParameterFromDbLayer
setSearchPath = common_utils.setSearchPath
fetchDataFromSqlQuery = common_utils.fetchDataFromSqlQuery
getConnectorFromUri = common_utils.getConnectorFromUri
def normalizeString(self, s):
"""
Removes all accents from
the given string and
replace e dans l'o
"""
p = re.compile('(œ)')
s = p.sub('oe', s)
s = unicodedata.normalize('NFD', s)
s = s.encode('ascii', 'ignore')
s = s.upper()
s = s.decode().strip(' \t\n')
r = re.compile(r"[^ -~]")
s = r.sub(' ', s)
s = s.replace("'", " ")
return s
# Bind as class properties for compatibility
postgisToSpatialite = common_utils.postgisToSpatialite
postgisToSpatialiteLocal10 = common_utils.postgisToSpatialiteLocal10
def createNewSpatialiteDatabase(self):
"""
Choose a file path to save
create the sqlite database with
spatial tools and create QGIS connection
"""
# Let the user choose new file path
ipath, __ = QFileDialog.getSaveFileName(
None,
u"Choisir l'emplacement du nouveau fichier",
str(os.path.expanduser("~").encode('utf-8')).strip(' \t'),
"Sqlite database (*.sqlite)"
)
if not ipath:
self.updateLog(u"Aucune base de données créée (annulation)")
return None
# Delete file if exists (question already asked above)
if os.path.exists(str(ipath)):
os.remove(str(ipath))
# Create the spatialite database
try:
# Create a connection (which will create the file automatically)
from qgis.utils import spatialite_connect
con = spatialite_connect(str(ipath), isolation_level=None)
cur = con.cursor()
sql = "SELECT InitSpatialMetadata(1)"
cur.execute(sql)
con.close()
del con
except:
self.updateLog(u"Échec lors de la création du fichier Spatialite !")
return None
# Create QGIS connexion
baseKey = "/SpatiaLite/connections/"
settings = QgsSettings()
myName = os.path.basename(ipath)
baseKey += myName
myFi = QFileInfo(ipath)
settings.setValue(baseKey + "/sqlitepath", myFi.canonicalFilePath())
# Update connections combo box and set new db selected
self.updateConnectionList()
listDic = {self.dialog.connectionDbList[i]: i for i in range(0, len(self.dialog.connectionDbList))}
self.dialog.liDbConnection.setCurrentIndex(listDic[myName])
# Bind as class properties for compatibility
getCompteCommunalFromParcelleId = common_utils.getCompteCommunalFromParcelleId
getProprietaireComptesCommunaux = common_utils.getProprietaireComptesCommunaux
getItemHtml = common_utils.getItemHtml
```
#### File: cadastre/processing/provider.py
```python
from os.path import join
from pathlib import Path
from qgis.core import QgsProcessingProvider
from qgis.PyQt.QtGui import QIcon
from cadastre.processing.algorithms.config import ConfigProjectAlgorithm
from cadastre.processing.algorithms.edigeo_downloader import EdigeoDownloader
class CadastreProvider(QgsProcessingProvider):
def loadAlgorithms(self):
self.addAlgorithm(ConfigProjectAlgorithm())
self.addAlgorithm(EdigeoDownloader())
def id(self): # NOQA
return 'cadastre'
def name(self):
return 'Cadastre'
def longName(self):
return 'Outils d\'exploitation des données cadastrale français'
def icon(self):
plugin_dir = str(Path(__file__).resolve().parent.parent)
return QIcon(join(plugin_dir, 'icon.png'))
```
|
{
"source": "jdevries3133/chaotic_christmas_present",
"score": 3
}
|
#### File: custom_protocol_server/multiportserver/schema.py
```python
from random import randint
SCHEMA = {
'hello world / please kill me now': {
'host': '0.0.0.0',
'port': 1050,
'message': (
'Dear god please help me world! These assholes have me replying to '
'every request by hand. This is insanity. No person should have to '
'suffer such a fate!!!'
),
},
}
fibbonaci_ports = [
1597,
2584,
4181,
6765,
10946,
17711,
28657,
46368,
]
fibbonaci_message = 'GOTOJAIL'
assert len(fibbonaci_ports) == len(fibbonaci_message)
for port, message in zip(fibbonaci_ports, fibbonaci_message):
SCHEMA[f'Fibbonaci, port {port}'] = {
'port': port,
'host': '0.0.0.0',
'message': message,
}
SCHEMA['JAIL'] = {
'port': 5245, # JAIL on a number pad
'host': '0.0.0.0',
'message': (
'Nice job buddy ol boy. Go to this link: https://thomasdevri.es'
'/staff/docs/you.might/'
),
}
class GenSchema:
"""
Generate part of schema for round 1 challenge.
"""
def __init__(self,secret, root_node_port, ports_used):
self.secret = secret
self.root_node = root_node_port
self.ports_used = ports_used
self.last_port = None
self.schema = {}
def gen_schema(self):
"""
Distribute a self.secret amongst a linked list of tcp ports. Output data
in a schema which can be used to spin up actual servers.
"""
if self.schema:
return self.schema
port = None
for letter in self.secret[:0:-1]:
port = None
while not port:
tmp_port = randint(1025, 65534)
if tmp_port in self.ports_used:
continue
port = tmp_port
if not self.last_port:
message = f'CHAR: {letter}; GOTO: NULL'
else:
message = f'CHAR: {letter}; GOTO: {self.last_port}'
self.schema[f'__round_1_letter_{letter}_on_port{port}'] = {
'host': '0.0.0.0',
'port': port,
'message': message,
}
self.last_port = port
self.ports_used.add(port)
self.schema['__round_1_root_node'] = {
'host': '0.0.0.0',
'port': self.root_node,
'message': f'CHAR: self.secret[0]; GOTO: {port}'
}
return self.schema
secret = (
'<KEY>'
'<KEY>'
'<KEY>'
'JJAKDtzhPiiFYcLJAtgsmPsXDlfgfyFhiKoBfSotnNPmdqLRYYurOEWpZoprSWXnHpKwtWzYbk'
'gnBr'
)
root_node_port = 45320
ports_used = {i['port'] for i in SCHEMA.values()}
SCHEMA = {
**GenSchema(secret, root_node_port, ports_used).gen_schema(),
**SCHEMA,
}
```
#### File: custom_protocol_server/multiportserver/subprocess_server_manager.py
```python
from time import sleep
import signal
import sys
import socket
import subprocess
from pathlib import Path
import logging
from .exceptions import SubprocessServerNotResponding, ImproperlyConfigured
logger = logging.getLogger(__name__)
class SubprocessServer:
"""
Manage a single server running ./server.Server in a subprocess.
"""
def __init__(self, host: str, port: int, message: str):
self.host = host
self.port = port
self.message = message
self.is_healthy = False
self.server = None
def start(self):
self.server = subprocess.Popen(
[
'python3.8',
Path(Path(__file__).parent, 'server.py').resolve(),
'--host', self.host,
'--port', str(self.port),
'--message', self.message,
],
)
self._check_health()
def restart(self):
self.stop()
self.start()
def stop(self):
if not self.server:
self.is_healthy = False
return
self.server.terminate()
self.server.wait()
self.is_healthy = False
def _check_health(self) -> bool:
"""
Return True when the server responds, or raise an exception
if there is no response after 5 tries each five seconds apart.
"""
if self.is_healthy:
return True
for _ in range(5):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((self.host, self.port))
recieved = b''
while data := sock.recv(1024):
recieved += data
if str(recieved, 'utf-8') == self.message:
self.is_healthy = True
return True
except (ConnectionRefusedError, OSError):
sleep(1)
raise SubprocessServerNotResponding
class SubprocessServerManager:
"""
Manage many simple TCP servers (instances of SubprocessServer above).
Each server will listen on a single port and echo a fixed message to any
connection.
"""
def __init__(self, schema: dict):
self.is_healthy = False # meaning that servers are up and running
self.servers = {}
signal.signal(signal.SIGINT, self.stop)
self._validate_schema(schema)
for server_name, server in schema.items():
self.servers[server_name] = SubprocessServer(
server['host'],
server['port'],
server['message'],
)
def start(self):
print(f'Starting {len(self.servers)} servers.')
for server_name, server in self.servers.items():
logger.info(
f'Started server: {server_name} on port {server.host}:'
f'{server.port} with message {server.message}'
)
server.start()
self._check_health()
self._wait()
def stop(self, *a, **kw):
print(f'Stopping {len(self.servers)} servers.')
for server_name, server in self.servers.items():
logger.info(
f'Stopped server: {server_name} on port {server.host}:'
f'{server.port} with message {server.message}'
)
server.stop()
def restart(self):
self.stop()
self.start()
def _wait(self):
"""
While subprocesses are running, wait for an exit signal.
"""
try:
while True:
sleep(1)
except KeyboardInterrupt:
self.stop()
sys.exit()
def _check_health(self) -> bool:
"""
Wait until all subprocesses have started and connections are available
with a timeout.
"""
self.is_healthy = True
for server_name, server in self.servers.items():
if not server.is_healthy:
logger.info(
'Health state set to false because of the server with a '
f'name of {server_name} on {server.host}:{server.port} '
)
self.is_healthy = False
return self.is_healthy
@ staticmethod
def _validate_schema(schema):
"""
Should look like this:
{
"server_name": {
"host": str
"port": int,
"message": str,
"server" SubprocessServer instance
},
...
}
"""
used_ports = set()
for v in schema.values():
for item, type_ in {
'host': str,
'port': int,
'message': str
}.items():
if item not in v:
raise ImproperlyConfigured
if not isinstance(v[item], type_):
raise ImproperlyConfigured
if v['port'] in used_ports:
raise ImproperlyConfigured(f'Two servers want port {v["port"]}')
used_ports.add(v['port'])
```
|
{
"source": "jdevries3133/ea_internal_tools",
"score": 2
}
|
#### File: ea_internal_tools/authenticate_ea/middleware.py
```python
import re
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.shortcuts import redirect
User = get_user_model()
class EaMiddlewareException(Exception):
pass
class EaAuthMiddleware:
def __init__(self, get_response):
self.get_response = get_response
self.domain_name = settings.EA_AUTHENTICATION.get('domain_name')
self.filter_mode = settings.EA_AUTHENTICATION.get('filter_mode')
self.filter_routes = settings.EA_AUTHENTICATION.get('filter_routes')
self.request = None
self.user = None
def __call__(self, request):
self.request = request
self.user = request.user
if self.need_to_check():
return self.perform_teacher_check()
response = self.get_response(request)
return response
def perform_teacher_check(self):
"""
Check if the user is a teacher. If they are not, redirect them to the
not_validated_yet view.
"""
# just let superusers through
if self.user.is_superuser:
return self.get_response(self.request)
if not self.user.is_authenticated:
messages.add_message(
self.request,
messages.INFO,
'You need to make an account and verify your email before you '
'can access this page.'
)
return redirect('/register/')
if self.user.role != User.TEACHER:
messages.add_message(
self.request,
messages.INFO,
'You need to verify your email before you can access this '
'page.'
)
return redirect('not_verified_yet')
# check has passed
return self.get_response(self.request)
def need_to_check(self) -> bool:
"""
Based on filter_routes and filter_mode, determine if this route needs
to be protected.
"""
for route in self.filter_routes:
if re.search(route, self.request.path):
if self.filter_mode == 'whitelist':
return False # don't need to check if we match on whitelist
if self.filter_mode == 'blacklist':
return True # need to check if we match on blacklist
# there was no match.
# no need to check if it's a blacklist
# do need to check if it's a whitelist.
return self.filter_mode == 'whitelist'
```
#### File: ea_internal_tools/zar/models.py
```python
import logging
from django.db import models
from django.contrib.auth import get_user_model
from django_mysql.models import JSONField
logger = logging.getLogger(__name__)
class UnknownZoomName(models.Model):
zoom_name = models.CharField(max_length=50, null=False)
real_name = models.CharField(max_length=50, null=False)
def __str__(self):
return f'{self.zoom_name} => {self.real_name}'
class MeetingCompletedReport(models.Model):
"""
For updating the frontend of progress. As an alternative to web sockets,
this will be created after each report is processed, and the frontend
will just ping the server for progress every few seconds.
"""
topic = models.CharField(max_length=100)
meeting_time = models.DateField(auto_now=False, auto_now_add=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
meeting_set_model = models.ForeignKey(
'zar.MeetingSetModel',
on_delete=models.CASCADE,
related_name='completed_reports',
)
def __str__(self):
return self.topic + ' on ' + self.meeting_time.strftime('%m/%d/%y')
class MeetingSetModel(models.Model):
"""
teacherHelper MeetingSet already has a serialize and deserialize method.
Once I get the prod database going, we'll store it here.
"""
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
json = JSONField()
is_processed = models.BooleanField(default=False)
needs_name_matching = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
class RawMeetingData(models.Model):
"""
Straight from the csv file.
"""
class Meta:
verbose_name_plural = 'Raw meeting data'
data = models.TextField()
meeting_set_model = models.ForeignKey(
'zar.MeetingSetModel',
on_delete=models.SET_NULL,
null=True,
related_name='data'
)
def __str__(self):
try:
return (
self.data.split('\n')[1].split(',')[1] # topic
+ ' at '
+ self.data.split('\n')[1].split(',')[2] # datetime
)
except Exception as e:
logger.error(
'RawMeetingData string method failed to do sloppy csv parsing. '
f'Exception {e} was thrown'
)
return super().__str__()
class Report(models.Model):
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
meeting_set_model = models.ForeignKey(
'zar.MeetingSetModel',
on_delete=models.CASCADE,
)
report = models.FileField(upload_to='reports/') # excel workbook
created = models.DateTimeField(auto_now=False, auto_now_add=True)
```
#### File: ea_internal_tools/zar/services.py
```python
from uuid import uuid4
from pathlib import Path
import string
import logging
from typing import List
from django.db.models import Q
from django.shortcuts import redirect
from django.core.files import File
from django.contrib import messages
from django.core.files.storage import default_storage
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from teacherHelper.zoom_attendance_report import MeetingSet, WorkbookWriter
from openpyxl.writer.excel import save_virtual_workbook
from .models import (
MeetingCompletedReport,
MeetingSetModel,
RawMeetingData,
UnknownZoomName,
Report
)
logger = logging.getLogger(__name__)
def queue_meeting_set(*,
data: List[bytes],
user) -> None:
"""
Save raw data, and create an empty MeetingSetModel to signal to the
worker that it has a MeetingSet to process. Filter all non-ascii
characters.
"""
if MeetingSetModel.objects.filter(owner=user, is_processed=False):
raise Exception(
f'{user.email} is creating a new MeetingSet despite already having '
'a MeetingSet in progress.'
)
meeting_set_model = MeetingSetModel.objects.create(owner=user)
data_models = []
for f in data:
raw = str(f.read(), 'utf-8-sig')
processed = ''.join(filter(lambda c: c in string.printable, raw))
data_models.append(RawMeetingData(
meeting_set_model=meeting_set_model,
data=processed,
))
RawMeetingData.objects.bulk_create(data_models)
def process_meeting_set(*,
meeting_set_model: MeetingSetModel,
data: List[str]) -> None:
"""
Take an unprocessed MeetingSet and process it! Will be run by a worker
separate from the web application.
"""
# initialize MeetingSet with all previously provided user matches
known_matches = {
m.zoom_name : m.real_name for m in UnknownZoomName.objects.all()
}
meeting_set = MeetingSet(data, known_matches=known_matches)
logger.debug(
'Initialized MeetingSet with the followign known matches: %(km)s',
{'km': known_matches}
)
# process all data, create progress reports during processing.
for meeting in meeting_set.process():
report = MeetingCompletedReport.objects.create(
topic=meeting.topic,
meeting_time=meeting.datetime.date(),
meeting_set_model=meeting_set_model,
)
logger.debug(f'Processed {report}. MeetingCompletedReport created.')
# update meeting_set_model now that processing is finished.
meeting_set_model.json = meeting_set.get_serializable_data()
meeting_set_model.is_processed = True
meeting_set_model.save()
# cleanup; delete reports now that processing is done
MeetingCompletedReport.objects.filter(
meeting_set_model=meeting_set_model
).delete()
def repair_broken_state(*, request, user): # -> django.http.HttpResponseRedirect
"""
Call this when an attempt to select the single wip report fails. This
means something went wrong and the state of the user's models is broken.
We need to reset and try again.
"""
MeetingSetModel.objects.filter(
Q(owner=user),
Q(needs_name_matching=True) | Q(is_processed=False)
).update(
needs_name_matching=False,
is_processed=True,
)
messages.add_message(
request,
messages.ERROR,
'Something went wrong, please try again.'
)
logger.error('Report processing abandoned. Something went wrong')
return redirect('file_upload')
def generate_excel_report(report_pk: str) -> Report:
"""
Returns Path in the django default_storage where the excel report is.
"""
ms = MeetingSetModel.objects.get(pk=report_pk)
meeting_set = MeetingSet.deserialize(ms.json)
workbook_buf = save_virtual_workbook(
WorkbookWriter(meeting_set).generate_report()
)
cf = ContentFile(workbook_buf)
path = f'{ms.owner.username}__{ms.created.isoformat()}.xlsx'
default_storage.save(path, cf)
report = Report(
owner=ms.owner,
meeting_set_model=ms,
)
report.report.name = path
report.save()
return report
```
|
{
"source": "jdevries3133/python_scripts",
"score": 4
}
|
#### File: jdevries3133/python_scripts/bingo.py
```python
from random import shuffle
from docx import Document
# change me
BINGO_GAME_NAME = 'Musical Instrument Bingo'
# change me
INSTRUMENTS = [
'electric guitar',
'acoustic guitar',
'piano',
'electric bass',
'drum set',
'ukulele',
'piccolo',
'flute',
'clarinet',
'bass clarinet',
'soprano saxophone',
'alto saxophone',
'tenor saxophone',
'baritone saxophone',
'trumpet',
'slide trumpet',
'trombone',
'euphonium',
'tuba',
'violin',
'viola',
'cello',
'string bass',
'timpani',
'cowbell',
'congas',
'timbales',
'maracas',
'guiro',
'clave',
'triangle',
'tambourine',
'theremin',
'marimba',
'xylophone',
'vibraphone',
]
# ---
# script; only change the below if you dare!
# ---
def is_bingo(x, y):
"""Cells at the center of the board are "free" cells"""
return x in range(2, 4) and y in range(2, 4)
def add_bingo_board(doc):
"""Add a single bingo board table to the given document."""
table = doc.add_table(rows=6, cols=6)
table.style = 'TableGrid'
for i, instrument in enumerate(INSTRUMENTS):
x = i % 6
y = i // 6
if is_bingo(x, y):
table.rows[i % 6].cells[i // 6].text = 'FREE'
else:
table.rows[i % 6].cells[i // 6].text = instrument
def main():
"""Create a bingo printout for one class, with 25 unique boards."""
document = Document()
for _ in range(26):
shuffle(INSTRUMENTS)
document.add_heading('Instrument Bingo')
# add two boards to each page, to allow for a second round
document.add_paragraph('Round One')
add_bingo_board(document)
document.add_paragraph('Round Two')
add_bingo_board(document)
document.add_page_break()
document.save('bingo_board.docx')
if __name__ == '__main__':
main()
```
#### File: python_scripts/star360/data_entry.py
```python
import csv
import re
import pyperclip as pc
from teacherHelper import Helper
helper = Helper.read_cache()
def entry():
"""
Take the data off each page from the clipboard and also do a first
round of cleaning.
"""
with open('out.csv', 'w') as csvf:
while True:
appends = {'a': [], 'b': [], 'c': []}
append_to = 'a'
for d in pc.paste().split('\n'):
if d == 'User Name':
append_to = 'b'
continue
if d == 'Grade Password':
append_to = 'c'
continue
appends[append_to].append(d)
wr = csv.writer(csvf)
data = []
for i in range(len(appends['a'])):
last, first, *rest = appends['a'][i].split(',')
row = [
f'{first} {last}',
appends['b'][i],
appends['c'][i],
]
# clean names
if not helper.find_nearest_match(row[0], auto_yes=True):
inp = input(f'Fix name "{row[0]}... {rest if rest else ""}" or press enter to keep: ')
row[0] = inp if inp else row[0]
wr.writerow(row)
inp = input('Enter to append again, or type "exit" to exit: ')
if inp.lower() == 'exit':
break
def _clean_name(name: str) -> str:
if ',' in name:
last, first, *rest = name.split(',')
name = f'{first} {last}'
st = helper.find_nearest_match(name, auto_yes=True)
if not st:
print(f'Warning! {name} was not found!')
return st.name if st else name
def _clean_pwd(pwd: str) -> str:
return re.search(
r'Grade \d (.*)',
pwd
)[1]
def _clean_row(row: list) -> list:
"""
Thoroughly clean each individual row.
"""
row = [i.strip() for i in row]
row[0] = _clean_name(row[0])
row[2] = _clean_pwd(row[2])
return row
def clean():
cleaned = []
with open('out.csv', 'r') as csvf:
rd = csv.reader(csvf)
for row in rd:
cleaned.append(_clean_row(row))
with open('clean.csv', 'w') as csvf:
wr = csv.writer(csvf)
wr.writerows(cleaned)
if __name__ == '__main__':
clean()
```
#### File: python_scripts/star360/email.py
```python
import csv
from datetime import datetime
import sys
from pathlib import Path
import logging
from time import sleep
from teacherHelper import Helper, Email
logging.basicConfig(
filename=Path(Path(__file__).parent, "star360_emailer.log"),
level=logging.DEBUG
)
logger = logging.getLogger(__name__)
helper = Helper.read_cache()
class StudentNotFound(Exception):
pass
class Star360MailMerge:
def __init__(self, csv_path: Path, debug=True, skip=None):
self.debug = debug # sends all emails to me
self.DEBUG_EMAIL = '<EMAIL>'
self.csv = csv_path
# allow the ability to skip some students on a second run after smtp
# kick. This should be a list of exact-match student names.
self.skip = skip if skip else []
self.sent_to = [] # successfully sent; a list of names
# saving myself from myself
if not self.debug:
i = input('WARNING: Really send emails to students?')
if i != 'yes':
print(f'input "{i}" != "yes". Exiting...')
sys.exit()
self.students = []
with open(csv_path, 'r') as csvf:
rd = csv.reader(csvf)
# check on headers
assert next(rd) == ['name', 'username', 'password']
for name, username, password in rd:
st = helper.find_nearest_match(name, auto_yes=True)
if not st:
raise StudentNotFound(f'{name} not found')
st.username = username
st.password = password
# LIMIT RECIPIENTS TO THE FOURTH GRADE.
if (
st.grade_level == 4
and not st.homeroom == ['<NAME>','<NAME>']
):
self.students.append(st)
def send_emails(self):
# TODO: break up this big function
sent_to = []
with Email() as eml:
for i, st in enumerate(self.students):
if st.name in self.skip:
continue
if self.debug:
recipient = self.DEBUG_EMAIL
else:
recipient = st.email
message = [
f'Hi {st.first_name},',
'',
'Today, you will be taking your Star360 Assessment. '
'Please complete the steps below:',
'',
'1. Click on this link: <a href="https://global-zone08.renaiss'
'ance-go.com/welcomeportal/6234531">https://global-zone08.rena'
'issance-go.com/welcomeportal/6234531</a>',
'2. Click "I\'m a Student"',
f'3. Type your Username: <b>{st.username}</b>',
f'4. Type your Password: <b>{<PASSWORD>>',
'5. Click "Log In"',
'6. Find the Star Math or Reading test (depending on teacher '
'directions).',
'7. Begin your test. If you need to enter a monitor password, '
'type <b>Admin</b>',
'8. When you finish, send a chat to your teacher.',
'',
'<span style="color: red;">STAY ON THE ZOOM UNTIL WE CONFIRM '
'YOU SUBMITTED YOUR TEST</span>',
'',
'Good luck!',
]
eml.send(
to=recipient,
subject='Star360 Username and Password',
message=message
)
self.sent_to.append(st.name)
logger.info(f'Email sent to {st.name} including username: {st.username}')
logger.debug(f'{i}/{len(self.students)} emails sent')
sent_to.append(st)
```
|
{
"source": "jdevries3133/teacher_helper",
"score": 3
}
|
#### File: src/teacherhelper/entities.py
```python
from datetime import datetime
class EntityException(Exception): ...
class ParentGuardianError(EntityException): ...
class Group:
"""
Group class is used to manage groups for extracurricular activities, field
trips, and other purposes.
"""
def __init__(self, name, grade_level, students):
self.name = name
self.grade_level = grade_level
self.students = students
class Homeroom:
def __init__(self, teacher, grade_level, students):
"""
Ensure that string constants for csv headers of id, student names, and
(if applicable) student emails are correct.
"""
self.teacher = teacher
self.grade_level = grade_level
self.students = students
class Student:
def __init__(self, context):
context.setdefault('groups', [])
context.setdefault('guardians', [])
self.first_name = context.get('first_name')
self.last_name = context.get('last_name')
self.student_id = context.get('student_id')
self.homeroom = context.get('homeroom')
self.grade_level = context.get('grade_level')
self.groups = context.get('groups')
self.email = context.get('email')
self.guardians = context.get('guardians')
# TODO: birthdays
self.name = self.first_name + ' ' + self.last_name
"""
primary_contact is an instance of ParentGuardian, which is assigned
during the parsing of parent / guardian data in the new_school_year
function within OnCourseMixin
"""
self.primary_contact = None
def __str__(self, verbose=False):
"""
Assemble nice printout of student information, and student's guardian
information. Also, offer verbose option, which ties into /shell.py
"""
data = [
'--',
self.name,
self.grade_level,
self.homeroom,
self.email,
'--'
'\nGuardians:'
]
student_data = [str(i) for i in data if i] # filter out NoneTypes
if not self.guardians:
return '\n'.join(data)
# indent
gu_data = ['\n\t'.join(self.primary_contact.__str__().split('\n'))]
cutoff = 3 if not verbose else len(self.guardians) - 1
for gu in self.guardians[1:cutoff]:
gu_data.append('\n\t'.join(gu.__str__().split('\n')))
return '\n'.join(student_data + gu_data)
class ParentGuardian:
def __init__(self, context: dict, verbose=False):
self.student = context.get('student')
self.first_name = context.get('first_name')
self.last_name = context.get('last_name')
self.home_phone = context.get('home_phone')
self.mobile_phone = context.get('mobile_phone')
self.work_phone = context.get('work_phone')
self.email = context.get('email')
self.relationship_to_student = context.get('relationship_to_student')
self.primary_contact = context.get('primary_contact')
self.allow_contact = context.get('allow_contact')
self.student_resides_with = context.get('student_resides_with')
# full name
if self.first_name and self.last_name:
self.name = self.first_name + ' ' + self.last_name
else:
raise ParentGuardianError(
'First and last name were not provided as context to '
'ParentGuardian class. This means that self.name cannot\n'
'be constructed. Please at least pass a first and last name '
'for every guardian into this class.'
)
# warn about missing attributes
for k, v in self.__dict__.items():
if not v:
if verbose:
print(
f'WARNING: Guardian\t{self.name}\thas no value for\t{k}'
)
# type checking
if not isinstance(self.student, Student):
raise ParentGuardianError(
'Student was a string, not a Student object.'
)
for k, v in self.__dict__.items():
if 'phone' in k and v and not isinstance(v, int):
raise ParentGuardianError(
'Phone number should be of type int.'
)
def __str__(self):
outs = [
self.relationship_to_student,
self.name,
f'Contact allowed: {self.allow_contact}',
f'Is primary contact: {self.primary_contact}',
f'Mobile Phone: {self.mobile_phone}',
f'Email Address: {self.email}',
'\n',
]
return '\n'.join([str(i) for i in outs if i])
```
#### File: teacherhelper/tests/test_make_mocks.py
```python
import csv
import unittest
from unittest.mock import patch
from pathlib import Path
from .make_mocks import (
make_students_csv,
make_parents_csv,
random_class
)
with open(
Path(Path(__file__).parent, 'random_names.csv'),
'r'
) as csvf:
rd = csv.reader(csvf)
names = [r for r in rd]
# smaller sample for fast test
@ patch('teacherhelper.tests.make_mocks.names', names[:100])
class TestMakeMocks(unittest.TestCase):
def test_make_students_csv(self):
data = make_students_csv()
assert data # no empty list
for row in data:
# check that student names come from list of random names
assert ' '.join(row[:2]) in [' '.join(i) for i in names]
# check that none of the teachers are also students
teacher_last, teacher_first = row[3].split(', ')
assert ' '.join((teacher_first, teacher_last)) not in [
' '.join(i) for i in names
]
# check that homerooms are all in the same grade
groups_by_teacher = {}
for row in data:
groups_by_teacher.setdefault(row[3], set())
groups_by_teacher[row[3]].add(row[2])
for set_ in groups_by_teacher.values():
assert len(set_) == 1
def test_make_parents_csv(self):
students = make_students_csv()
parents = make_parents_csv(students)
assert parents # no empty list
for row in parents:
# every row has 13 items
assert len(row) == 13
# row is all strings
for i in row:
assert isinstance(i, str)
# unpack parent name variables
parent_first_nm, parent_last_nm, *_ = row
parent_name = ' '.join((
parent_first_nm,
parent_last_nm
))
# no parent shares a name with a student
assert parent_name not in [' '.join(n) for n in names]
# no parent shares a name with a teacher
teachers = set([' '.join(r[3].split(', ')[::-1]) for r in students])
assert parent_name not in teachers
# two parents for each student
assert len(parents) / 2 == len(students)
def test_random_class(self):
"""
Test that students from a random class are actually all in the same
class and grade level.
"""
students = make_students_csv()
class_ = random_class(students)
for strow in class_:
assert strow[2] == class_[0][2]
assert strow[3] == class_[0][3]
```
#### File: teacherhelper/tools/csv_parser.py
```python
from difflib import ndiff
class IterCsvError(Exception): ...
class IterCsv:
def __init__(self, acceptable_headers, rows, strip_data=True):
"""
Generator returns context, and row. Context is a dict in which
the key is one of the acceptable headers, and the value is the
index at which that header field can be found in each row.
"""
self.current_row = 0
self.rows = rows
self.context = {}
self._acceptable_headers = acceptable_headers
self._strip_data = strip_data
self._assign_context()
def _assign_context(self):
"""Context is a mapping of header labels to row indices, so that the
row items can later be fetched by name."""
for i, raw_header in enumerate(self.rows[0]):
raw_header = raw_header.lower()
for clean_header in self._acceptable_headers:
if clean_header == raw_header:
# create mapping and check for duplicate match
before = len(self.context)
self.context[clean_header] = i
after = len(self.context)
if before == after:
raise IterCsvError(
'There are two or more headers with the value '
f'{raw_header}. Edit the csv to differentiate '
'between these columns to continue.'
)
if not (a := list(self.context.keys())) == (b := self._acceptable_headers):
diff = '\n\t'.join(ndiff(a, b))
msg = ('A match was not found for all headers:\n'
f'DIFF:\n\t{diff}')
raise IterCsvError(msg)
self._validate_context(self.context)
def fetch(self, name: str):
"""Fetch an item from a row by name during iteration."""
if (index := self.context.get(name)) is None:
raise IterCsvError(f'{name} does not exist in csv context')
value = self.rows[self.current_row][index]
if self._strip_data:
return value.strip()
return value
def __iter__(self):
return self
def __next__(self):
self.current_row += 1
if self.current_row < len(self.rows) - 1:
return self.fetch
raise StopIteration
@ staticmethod
def _validate_context(context):
"""Because this field is misspelled in OnCourse....."""
is_parent_spreadsheet = 'guardian <NAME>' in context
is_header_misspelled = context.get('student resides with') is None
if is_parent_spreadsheet and is_header_misspelled:
raise IterCsvError(
'Remember, "student resides with" is misspelled in '
'OnCourse. Fix it in the CSV you downloaded.'
)
```
|
{
"source": "jdewald/lispy",
"score": 3
}
|
#### File: jdewald/lispy/lispy.py
```python
import sys
import re
from environment import standard_env, Env
List = list
Number = (int, float)
class InPort(object):
"""An input port. Retains a line of chars."""
tokenizer = r'''\s*(,@|[('`,)]|"(?:[\\].|[^\\"])*"|;.*|[^\s('"`,;)]*)(.*)'''
def __init__(self, file):
self.file = file;
self.line = ''
def next_token(self):
"Return the next token, reading new text into line buffer if needed."
while True:
if self.line == '': self.line = self.file.readline()
if self.line == '': return eof_object
token, self.line = re.match(InPort.tokenizer, self.line).groups()
if token != '' and not token.startswith(';'):
return token
class Symbol(str): pass
def Sym(s, symbol_table={}):
"""Find or create unique symbol entry for str s in symbol table."""
if s not in symbol_table: symbol_table[s] = Symbol(s)
return symbol_table[s]
_quote, _if, _set, _define, _lambda, _begin, _definemacro, = map(Sym,
"quote if set! define lambda begin define-macro".split())
_quasiquote, _unquote, _unquotesplicing = map(Sym,
"quasiquote unquote unquote-splicing".split())
_load = Sym("load")
global_env = standard_env()
eof_object = Symbol('#<eof-object>') # note that it's using Symbol, not Sym, so it's unreadable
def schemestr(exp):
"""Convert a python Object back into a Scheme-readable string."""
if isinstance(exp, List):
return '(%s)' % (' '.join(map(schemestr, exp)))
else:
return str(exp)
def parse(program):
"""Read a scheme expression from a string."""
return read(program)
def tokenize(chars):
"""Convert a string of characters into a list of tokens."""
return chars.replace('(', ' ( ').replace(')', ' ) ').split()
def readchar(inport):
"""Read the next character from an input port."""
if inport.line != '':
ch, inport.line = inport.line[0], inport.line[1:]
return ch
else:
return inport.file.read(1) or eof_object
def read(inport):
"""Read a Scheme expression from an input port."""
def read_ahead(token):
if '(' == token:
L = []
while True:
token = inport.next_token()
if token == ')': return L
else: L.append(read_ahead(token))
elif ')' == token: raise SyntaxError('unexpected )')
elif token in quotes: return [quotes[token], read(inport)]
elif token is eof_object: raise SyntaxError("unexpected EOF in list")
else: return atom(token)
# body of read:
token1 = inport.next_token()
return eof_object if token1 is eof_object else read_ahead(token1)
quotes = {"'": _quote, "`": _quasiquote, ",": _unquote, ",@": _unquotesplicing}
def read_from_tokens(tokens):
"""Read an expression from a sequence of tokens."""
if len(tokens) == 0:
raise SyntaxError("Unexpected EOF while reading")
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # last thing we saw was ')', pop it
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token):
"""Numbers become numbers; every other token is a symbol."""
if token == '#t': return True
if token == '#f': return False
elif token[0] == '"': return token[1:-1].decode('string_escape')
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
try:
return complex(token.replace('i', 'j', 1))
except ValueError:
return Sym(token)
def to_string(x):
"""Convert a Python object back into a Lisp-readable string."""
if x is True: return "#t"
elif x is False: return "#f"
elif isinstance(x, Symbol): return x
elif isinstance(x, str): return '"%s"' % x.encode('string_escape').replace('"', r'\"')
elif isinstance(x, list): return '('+' '.join(map(to_string, x))+')'
elif isinstance(x, complex): return str(x).replace('j', 'i')
else: return str(x)
def load(filename):
"""Eval every expression from a file."""
repl(None, InPort(open(filename)), None)
# This will likely get moved to it's own module
def eval(x, env=global_env):
"""Evaluate an expression in an environment"""
print "(DEBUG: %s )" % (x, )
if isinstance(x, Symbol):
# variable reference: http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-7.html#%_sec_4.1.1
return env.find(x)[x]
elif not isinstance(x, List):
# constant literal: http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-7.html#%_sec_4.1.2
return x
elif x[0] is _if:
# conditional: http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-7.html#%_sec_4.1.5
# assumes (if test conseq alt)
(_, test, conseq, alt) = x
exp = (conseq if eval(test, env) else alt)
return eval(exp, env)
elif x[0] is _define:
# definition: http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-8.html#%_sec_5.2
# this is assuming it's synactically valid: (define var expr)
# if it's of the form (define (f params) expr) then we want to treat it as:
# (define f (lambda (params) expr)
_ = x[0]
var = x[1]
exp = x[2:] if len(x) > 3 else x[2]
print "(DEBUG: \tvar is %s" % (type(var), )
if isinstance(var, List):
print "(DEBUG: \tswitching to lambda)"
return eval([_,
var[0], [_lambda, var[1:], exp]], env)
else:
env[var] = eval(exp, env)
return env[var]
elif x[0] is _set:
# assignemnt: http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-7.html#%_sec_4.1.6
(_, var, expression) = x
env.find(var)[var] = eval(expression, env)
return
elif x[0] is _lambda:
# lambda procedure: http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-7.html#%_sec_4.1.4
(_, params, body) = x
return Procedure(params, body, env)
elif x[0] is _load:
# my own special form, though mit-scheme has it, so it probably is defined
(_, filename) = x
load(filename)
else:
# procedure call: http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-7.html#%_sec_4.1.3
proc = eval(x[0], env)
args = [eval(arg, env) for arg in x[1:]]
return proc(*args)
def repl(prompt="lisp.py> ", inport=InPort(sys.stdin), out=sys.stdout):
"""A prompt-read-eval-print loop."""
sys.stderr.write("Lispy version 2.0 (jdewald)\n")
while True:
#try:
if prompt: sys.stderr.write(prompt)
x = parse(inport)
if x is eof_object: return
val = eval(x)
if val is not None and out: print >> out, to_string(val)
#except Exception as e:
# print '%s: %s' % (type(e).__name__, e)
def main():
repl()
class Procedure(object):
"""A user-defined Scheme procedure
http://www.schemers.org/Documents/Standards/R5RS/HTML/r5rs-Z-H-7.html#%_sec_4.1.4
"""
def __init__(self, params, body, env):
self.params, self.body, self.env = params, body, env
# A procedure can actually be a set of Internal Definitions
# followed by the actual body. Here we aren't actually validating
# that but assuming that an array will be valid
if not isinstance(self.body[0], List):
self.body = [self.body]
def __call__(self, *args):
env = Env(self.params, args, self.env)
ret = None
for body in self.body:
ret = eval(body, env)
return ret
if __name__ == "__main__":
main()
```
|
{
"source": "jdewasseige/Crux",
"score": 2
}
|
#### File: Crux/components/user.py
```python
import socket
import json
import time
from petlib.ec import *
import binascii
import sys
import numpy
import math
from includes import config as conf
from includes import utilities
from includes import Classes
from includes import SocketExtend as SockExt
from includes import parser as p
#Globals
G = EcGroup(nid=conf.EC_GROUP)
def remote_encrypt(ip, port, value):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
data = {'request':'encrypt', 'contents': {'value':value}}
SockExt.send_msg(s, json.dumps(data))
result = json.loads(SockExt.recv_msg(s))
data = json.loads(result['return'])
cipher_obj = Classes.Ct(EcPt.from_binary(binascii.unhexlify(data['pub']),G), EcPt.from_binary(binascii.unhexlify(data['a']),G), EcPt.from_binary(binascii.unhexlify(data['b']),G), Bn.from_hex(data['k']), None)
s.close()
return cipher_obj
def remote_decrypt(ip, port, cipher_obj):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
json_obj_str = cipher_obj.to_JSON()
data = {'request':'decrypt', 'contents': json_obj_str}
SockExt.send_msg(s, json.dumps(data))
result = json.loads(SockExt.recv_msg(s))
s.close()
return result['return']
def comp_median(fn, sheet, column_1, column_2, column_3):
rows = p.get_rows(fn, sheet, 1, 0) #determine which rows correspond to relay
values = p.read_xls_cell(fn, sheet, column_1, column_2, column_3, rows) #load values from xls
median = numpy.median(values)
return median
def comp_mean(fn, sheet, column_1, column_2, column_3):
rows = p.get_rows(fn, sheet, 1, 0) #determine which rows correspond to relay
values = p.read_xls_cell(fn, sheet, column_1, column_2, column_3, rows) #load values from xls
mean = numpy.mean(values)
return mean
def comp_variance(fn, sheet, column_1, column_2, column_3):
rows = p.get_rows(fn, sheet, 1, 0) #determine which rows correspond to relay
values = p.read_xls_cell(fn, sheet, column_1, column_2, column_3, rows) #load values from xls
variance = numpy.var(values)
return variance
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='User interface for privacy preserving statistics queries to the ToR network')
#ip/port
parser.add_argument('-s', '--server', type=str, help='Server name', required=True)
parser.add_argument('-p', '--port', type=str, help='Port number', required=True, default='8888')
#action
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--ping', action='store_true', help='Ping server')
group.add_argument('--pub', action='store_true', help='Request the public key from authority')
group.add_argument('--stat', help='Run size tests', nargs='+') #choices=['mean', 'median', 'variance']
group.add_argument('--test', action='store_true', help='Verifies that the remote encryption and decryption work properly')
#Experiments/Unit Tests
group.add_argument('--sketch_size', action='store_true', help='Sketch size vs quality vs time')
group.add_argument('--DP', action='store_true', help='DP vs quality')
group.add_argument('--mean_test', action='store_true', help='Unit test for mean computation')
args = parser.parse_args()
#print comp_median('data/data_large.xls', 'iadatasheet2', 'Lone Parents', 'Lone parents not in employment', '2011')
#print comp_variance('data/data_large.xls', 'iadatasheet2', 'Lone Parents', 'Lone parents not in employment', '2011')
ip = args.server
port = args.port
print ip
if args.ping:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
print utilities.ping(s)
s.close()
elif args.pub:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
#pubkey
data = {'request':'pubkey'}
SockExt.send_msg(s, json.dumps(data))
result = json.loads(SockExt.recv_msg(s))
print EcPt.from_binary(binascii.unhexlify(result['return']), G)
s.close()
elif args.test:
value = 12345
tmp_obj = remote_encrypt(ip, port, value)
new_value = remote_decrypt(ip, port, tmp_obj)
#print new_value
if value==new_value:
print "Test Successful!"
else:
print "Test failed."
elif args.stat:
if args.stat[0] == 'mean':
tic = time.clock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
#data = {'request':'stat', 'contents': {'type':'mean', 'dp':'True', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':'Adults in Employment', 'column_2':'No adults in employment in household: With dependent children', 'column_3':'2011'}}}
data = {'request':'stat', 'contents': {'type':'mean', 'dp':'True', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':args.stat[1], 'column_2':args.stat[2], 'column_3':args.stat[3]}}}
SockExt.send_msg(s, json.dumps(data))
print "Request Sent"
data['contents']['attributes']['epsilon'] = str(conf.EPSILON)
data = json.loads(SockExt.recv_msg(s))
print "Response:"
result = data['return']
if result['success']=='True':
approx_res = result['value']
cor_res = comp_mean('data/data_large.xls', 'iadatasheet2', args.stat[1], args.stat[2], args.stat[3])
toc = time.clock()
dt = (toc - tic)
elif args.stat[0] == 'median':
tic = time.clock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
#data = {'request':'stat', 'contents': {'type':'median', 'dp':'True', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':'Adults in Employment', 'column_2':'No adults in employment in household: With dependent children', 'column_3':'2011'}}}
data = {'request':'stat', 'contents': {'type':'median', 'dp':'True', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':args.stat[1], 'column_2':args.stat[2], 'column_3':args.stat[3]}}}
#Compute sketch parameters
tmp_w = int(math.ceil(math.e / conf.EPSILON))
tmp_d = int(math.ceil(math.log(1.0 / conf.DELTA)))
#print conf.EPSILON
data['contents']['attributes']['epsilon'] = str(conf.EPSILON)
data['contents']['attributes']['delta'] = str(conf.DELTA)
data['contents']['attributes']['sk_w'] = tmp_w
data['contents']['attributes']['sk_d'] = tmp_d
SockExt.send_msg(s, json.dumps(data))
print "Request Sent"
data = json.loads(SockExt.recv_msg(s))
print "Response:"
result = data['return']
if result['success']=='True':
approx_res = result['value']
cor_res = comp_median('data/data_large.xls', 'iadatasheet2', args.stat[1], args.stat[2], args.stat[3])
toc = time.clock()
dt = (toc - tic)
elif args.stat[0] == 'variance':
tic = time.clock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
data = {'request':'stat', 'contents': {'type':'variance', 'dp':'True', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':args.stat[1], 'column_2':args.stat[2], 'column_3':args.stat[3]}}}
SockExt.send_msg(s, json.dumps(data))
print "Request Sent"
data = json.loads(SockExt.recv_msg(s))
print "Response:"
result = data['return']
if result['success']=='True':
approx_res = result['value']
cor_res = comp_variance('data/data_large.xls', 'iadatasheet2', args.stat[1], args.stat[2], args.stat[3])
toc = time.clock()
dt = (toc - tic)
#Print stats results
if result['success']=='True':
print "The %s of %s is: %s" %(result['type'] , result['attribute'], approx_res)
print "The correct result is: " + str(cor_res)
print "The err is: " + str(abs(float(approx_res) - float(cor_res)))
print "Total time: " + str(dt)
else:
print "Stat could not be computed."
elif args.sketch_size:
wd_time = {}
wd_error = {}
cor_res = comp_median('data/data_large.xls', 'iadatasheet2', 'Adults in Employment', 'No adults in employment in household: With dependent children', '2011')
#[0.5, 0.35, 0.25, 0.15, 0.1, 0.05, 0.025, 0.01]: #, 0.005, 0.001]:
for param_d in range(1,10):
wd_time[str(param_d)] = {}
wd_error[str(param_d)] = {}
print "d: " + str(param_d)
for param_w in range(3,70):
tic = time.time()
#print 'tic: ' + str(tic)
print "w: " + str(param_w)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
data = {'request':'stat', 'contents': {'type':'median', 'dp':'True', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':'Adults in Employment', 'column_2':'No adults in employment in household: With dependent children', 'column_3':'2011'}}}
#data = {'request':'stat', 'contents': {'type':'median', 'dp':'True', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':args.stat[1], 'column_2':args.stat[2], 'column_3':args.stat[3]}}}
#Compute sketch parameters
#tmp_w = int(math.ceil(math.e / param_w))
#tmp_d = int(math.ceil(math.log(1.0 / param_d)))
print "Sketch bins: tmp_w " + str(param_w) + " tmp_d " + str(param_d)
data['contents']['attributes']['sk_w'] = param_w
data['contents']['attributes']['sk_d'] = param_d
data['contents']['attributes']['epsilon'] = 0
data['contents']['attributes']['delta'] = 0
SockExt.send_msg(s, json.dumps(data))
#print "Request Sent"
data = json.loads(SockExt.recv_msg(s))
#print "Response:"
result = data['return']
if result['success']=='True':
approx_res = result['value']
#cor_res = comp_median('data/data_large.xls', 'iadatasheet2', 'Adults in Employment', 'No adults in employment in household: With dependent children', '2011')
toc = time.time()
#print 'toc: ' + str(toc)
dt = (toc - tic)
print "The %s of %s is: %s" %(result['type'] , result['attribute'], approx_res)
#print "The correct result is: " + str(cor_res)
print "The err is: " + str(abs(float(approx_res) - float(cor_res)))
print "Total time: " + str(dt)
print "-----------"
wd_time[str(param_d)][str(param_w)] = str(dt)
wd_error[str(param_d)][str(param_w)] = str(abs(float(approx_res) - float(cor_res)))
utilities.dict_to_csv("time.csv", wd_time)
utilities.dict_to_csv("errors.csv", wd_error)
elif args.mean_test:
tic = time.clock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
data = {'request':'stat', 'contents': {'type':'mean', 'dp':'False', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':'Adults in Employment', 'column_2':'No adults in employment in household: With dependent children', 'column_3':'2011'}}}
#data = {'request':'stat', 'contents': {'type':'mean', 'dp':'False', 'attributes':{'file':'data/data_large.xls', 'sheet':'iadatasheet2', 'column_1':args.stat[1], 'column_2':args.stat[2], 'column_3':args.stat[3]}}}
SockExt.send_msg(s, json.dumps(data))
#print "Request Sent"
data = json.loads(SockExt.recv_msg(s))
#print "Response:"
result = data['return']
if result['success']=='True':
approx_res = result['value']
cor_res = comp_mean('data/data_large.xls', 'iadatasheet2', 'Adults in Employment', 'No adults in employment in household: With dependent children', '2011')
toc = time.clock()
dt = (toc - tic)
if abs(float(approx_res)-float(cor_res))<=0.1:
print "Mean accuracy test passed!"
else:
print "Mean accuracy test failed."
print cor_res
print approx_res
```
|
{
"source": "jdeweese1/ksu_acacia_server",
"score": 2
}
|
#### File: acacia_server/public/api.py
```python
import httpx
from flask_restful import Resource, reqparse
from acacia_server import utils
from slack import WebClient
from acacia_server import settings
import json
from flask import request
import threading
user_ids_that_can_post_cleaning_duties = [] # TODO Add the Ids of the people that can post cleaning duties
class InfoBot(Resource):
def __init__(self):
super().__init__()
self.parser = reqparse.RequestParser()
self.parser.add_argument('challenge')
self.parser.add_argument('event')
def post(self):
args = self.parser.parse_args()
event = json.loads(args['event'].replace("'", '"'))
assert event['channel_type'] == 'im'
if 'user' in event.keys():
client = WebClient(token=settings.SLACK_TOKEN)
message_text = '''ADD IMPORTANT LINKS HERE FOR YOUR ORGANIZATION'''
print(args)
print(request.headers)
def target():
return client.\
chat_postMessage(channel=event['channel'], text=message_text)
t1 = threading.Thread(group=None, target=target)
t1.start()
return {'status': 'ok'}, 200, {'X-Slack-No-Retry': 1}
class CleaningDuties(Resource):
def __init__(self):
super().__init__()
self.parser = reqparse.RequestParser()
self.parser.add_argument('user_id')
self.parser.add_argument('command')
self.parser.add_argument('response_url')
def post(self):
args = self.parser.parse_args()
user_id = args['user_id']
response_url = args.get('response_url', None)
resp_msg = ''
rtn_val = ()
try:
if user_id in user_ids_that_can_post_cleaning_duties:
utils.post_duties()
resp_msg = 'duties posted'
rtn_val = {'status': 'ok'}, 200
else:
rtn_val = {'status': 'bad', 'reason': 'you are not authorized to post cleaning duties'}
resp_msg = 'you are not authorized'
except: # noqa E722
rtn_val = {'status': 'bad', 'reason': 'could not post duties'}, 500
resp_msg = 'an error occurred when posting'
finally:
data = {'text': resp_msg}
if response_url:
httpx.post(response_url, data=data, headers={'Content-type': 'application/json'})
return rtn_val
```
|
{
"source": "jdewells/jschon",
"score": 3
}
|
#### File: jschon/jschon/uri.py
```python
import rfc3986
import rfc3986.exceptions
import rfc3986.misc
import rfc3986.validators
from jschon.exceptions import URIError
__all__ = [
'URI',
]
class URI:
def __init__(self, value: str) -> None:
self._uriref = rfc3986.uri_reference(value)
def __str__(self) -> str:
return self._uriref.unsplit()
def __repr__(self) -> str:
return f"URI({str(self)!r})"
def __len__(self) -> int:
return len(str(self))
def __hash__(self) -> int:
return hash(self._uriref)
def __eq__(self, other) -> bool:
if isinstance(other, URI):
return self._uriref == other._uriref
if other is None:
return False
return self._uriref.__eq__(other)
@property
def scheme(self) -> str:
return self._uriref.scheme
@property
def authority(self) -> str:
return self._uriref.authority
@property
def path(self) -> str:
return self._uriref.path
@property
def query(self) -> str:
return self._uriref.query
@property
def fragment(self) -> str:
return self._uriref.fragment
def is_absolute(self) -> bool:
return self._uriref.is_absolute()
def has_absolute_base(self) -> bool:
return self.copy(fragment=False).is_absolute()
def resolve(self, base_uri: 'URI') -> 'URI':
"""Produce a new URI by resolving self against the given base URI."""
uri = object.__new__(URI)
uri._uriref = self._uriref.resolve_with(base_uri._uriref)
return uri
def copy(
self,
scheme=True,
authority=True,
path=True,
query=True,
fragment=True,
) -> 'URI':
"""Produce a new URI composed of the specified components of self.
- True => use existing
- False/None => remove
- Otherwise => replace
"""
uri = object.__new__(URI)
uri._uriref = self._uriref.copy_with(
scheme=rfc3986.misc.UseExisting if scheme is True else None if scheme is False else scheme,
authority=rfc3986.misc.UseExisting if authority is True else None if authority is False else authority,
path=rfc3986.misc.UseExisting if path is True else None if path is False else path,
query=rfc3986.misc.UseExisting if query is True else None if query is False else query,
fragment=rfc3986.misc.UseExisting if fragment is True else None if fragment is False else fragment,
)
return uri
def validate(
self,
require_scheme: bool = False,
require_normalized: bool = False,
allow_fragment: bool = True,
allow_non_empty_fragment: bool = True,
) -> None:
"""Validate self.
:raise URIError: if self fails validation
"""
validator = rfc3986.validators.Validator()
if require_scheme:
validator = validator.require_presence_of('scheme')
try:
validator.validate(self._uriref)
except rfc3986.exceptions.ValidationError as e:
msg = f"'{self}' is not a valid URI"
if require_scheme:
msg += " or does not contain a scheme"
raise URIError(msg) from e
if require_normalized and self._uriref != self._uriref.normalize():
raise URIError(f"'{self}' is not normalized")
if not allow_fragment and self._uriref.fragment is not None:
raise URIError(f"'{self}' has a fragment")
if not allow_non_empty_fragment and self._uriref.fragment:
raise URIError(f"'{self}' has a non-empty fragment")
```
|
{
"source": "JDewhirst/advent-of-code",
"score": 4
}
|
#### File: 2020/day_12/day_12.py
```python
import numpy as np
from math import sin, cos, radians
def ReadCommands(filename):
with open(filename,"r") as file:
data = [line.strip("\n") for line in file.readlines()]
return data
class Part1:
def __init__(self):
# Begins at (0,0) facing East
self.position = ([0.0],[0.0])
self.vector = ([1.0],[0.0])
def __str__(self):
return (f"position={self.position}, facing ={self.vector}")
def rotate(self,direction,angle):
# L turn right by given num of degrees
# R turn right by given num of degress
if direction == "R":
angle = -1.0*angle
elif direction == "L":
angle = 1.0*angle
else:
print(f"Rotation {direction+angle} not recognised")
rot = ([cos(radians(angle)), -sin(radians(angle))],
[sin(radians(angle)),cos(radians(angle))])
self.vector = np.matmul(rot,self.vector)
def command(self,command):
order = command[0]
value = float(command[1:])
# N move north (0,1)
if order == "N":
self.position = np.add( np.dot(value,([0.0],[1.0])),self.position)
# S move south (0,-1)
elif order == "S":
self.position = np.add( np.dot(value,([0.0],[-1.0])),self.position)
# E move east (1,0)
elif order == "E":
self.position = np.add( np.dot(value,([1.0],[0.0])),self.position)
# W move west (-1,0)
elif order == "W":
self.position = np.add( np.dot(value,([-1.0],[0.0])),self.position)
# F move forward in current facing by given value
elif order == "F":
self.position = np.add( np.dot(value,self.vector),self.position)
elif order == "R" or order == "L":
self.rotate(order,value)
def manhattandist(self):
return self.position
class Part2:
def __init__(self):
# Begins at (0,0) with waypoint at (10,1) from the ship
self.position = ([0.0],[0.0])
self.waypoint = ([10.0],[1.0])
def __str__(self):
return (f"position={self.position}, waypoint ={self.waypoint}")
def rotate(self,direction,angle):
# L turn right by given num of degrees
# R turn right by given num of degress
if direction == "R":
angle = -1.0*angle
elif direction == "L":
angle = 1.0*angle
else:
print(f"Rotation {direction+angle} not recognised")
rot = ([cos(radians(angle)), -sin(radians(angle))],
[sin(radians(angle)),cos(radians(angle))])
self.waypoint = np.matmul(rot,self.waypoint)
def command(self,command):
order = command[0]
value = float(command[1:])
# N move north (0,1)
if order == "N":
self.waypoint = np.add( np.dot(value,([0.0],[1.0])),self.waypoint)
# S move south (0,-1)
elif order == "S":
self.waypoint = np.add( np.dot(value,([0.0],[-1.0])),self.waypoint)
# E move east (1,0)
elif order == "E":
self.waypoint = np.add( np.dot(value,([1.0],[0.0])),self.waypoint)
# W move west (-1,0)
elif order == "W":
self.waypoint = np.add( np.dot(value,([-1.0],[0.0])),self.waypoint)
# F move forward in current facing by given value
elif order == "F":
self.position = np.add( np.dot(value,self.waypoint),self.position)
elif order == "R" or order == "L":
self.rotate(order,value)
def manhattandist(self):
return self.position
if __name__=="__main__":
data = ReadCommands("input.txt")
ferry = Part1()
for item in data:
#print(item)
ferry.command(item)
#print(ferry)
print(f"Part 1 Manhattan Distance = {abs(ferry.position[0][0])+abs(ferry.position[1][0])}")
ferry2 = Part2()
for item in data:
#print(item)
ferry2.command(item)
#print(ferry2)
print(f"Part 2 Manhattan Distance = {abs(ferry2.position[0][0])+abs(ferry2.position[1][0])}")
```
#### File: 2020/day_1/solution.py
```python
def findtwo2020(filename):
f = open(filename)
data = f.readlines()
f.close()
data = [int(item.strip("\n")) for item in data]
for i in range(len(data)):
for j in range(i+1,len(data)):
#print(data[i],data[j],data[i]+data[j])
if data[i]+data[j] == 2020:
return data[i]*data[j]
return "Did not find it"
def findthree2020(filename):
f = open(filename)
data = f.readlines()
f.close()
data = [int(item.strip("\n")) for item in data]
for i in range(len(data)):
for j in range(i+1,len(data)):
for k in range(j+1,len(data)):
#print(data[i]+data[j]+data[k])
if data[i]+data[j]+data[k] == 2020:
return data[i]*data[j]*data[k]
return "Did not find it"
```
#### File: 2021/day_1/solution.py
```python
import re
def ReadData(filename):
with open(filename) as f:
lines = f.readlines()
lines = [int(item.strip("\n")) for item in lines]
return lines
def NumOfIncreases(data):
count = 0
for i in range(1,len(data)):
if data[i-1] < data[i]:
count +=1
return count
def NumOfIncreasesWindow(data, window):
count = 0
for i in range(1,len(data)-window+1):
#print(data[i:i+window])
#print(data[i+window:i+window])
if sum(data[i-1:i-1+window]) < sum(data[i:i+window]):
count += 1
return count
if __name__=="__main__":
sonarReadings = ReadData("input.txt")
print(f'Part 1: {NumOfIncreases(sonarReadings)}')
print(f'Part 2: {NumOfIncreasesWindow(sonarReadings,3)}')
```
#### File: 2021/day_3/solution.py
```python
def readData(filename):
with open(filename) as f:
data = f.readlines()
data = [list(item.strip("\n")) for item in data]
return data
def CountBits(input):
bits = len(input[0])
bitCount = [0 for i in range(bits)]
for entry in input:
for i in range(len(bitCount)):
bitCount[i] += int(entry[i])
return bitCount
def FindRates(bitCount, numEntries):
gammaRate = [ '0' for i in range(len(bitCount)) ]
for i in range(len(bitCount)):
if bitCount[i] > numEntries/2:
gammaRate[i] = '1'
epsilonRate = ['0' if i == '1' else '1' for i in gammaRate]
print(gammaRate, epsilonRate)
return (''.join(gammaRate),''.join(epsilonRate))
def BinaryToDecimal(num, expo=1):
if num== 0:
return 0
else:
digit= num % 10
num= int(num / 10)
digit= digit * expo
return digit + BinaryToDecimal(num, expo * 2)
if __name__=="__main__":
input = readData("input.txt")
for item in input:
print(item)
numEntries = len(input)
bitCount = CountBits(input)
print(bitCount)
rates = FindRates(bitCount, numEntries)
print(f'Gamma Rate = {BinaryToDecimal(int(rates[0]))}, Epsilon Rate = {BinaryToDecimal(int(rates[1]))}; Solution {BinaryToDecimal(int(rates[0])) * BinaryToDecimal(int(rates[1]))}')
```
|
{
"source": "jdewinne/pyspinnaker",
"score": 2
}
|
#### File: pyspinnaker/tests/test_client.py
```python
from spinnaker import SpinnakerClient
class TestClient(object):
def test_init(self):
x = SpinnakerClient("http://localhost")
assert x is not None
```
|
{
"source": "jdewinne/xlr-sdelements-plugin",
"score": 2
}
|
#### File: test/jython/test_get_task.py
```python
import json
import requests_mock
from nose.tools import eq_, raises
from responses import GET_TASK_RESPONSE
from sdelements.SDEClient import SDEClient
@requests_mock.Mocker()
class TestGetTask(object):
def test_get_task_basic_auth(self, m):
sde_client = SDEClient("http://localhost/sde", "Basic", None, "admin", "admin", None)
m.register_uri('GET', SDEClient.GET_TASK_URI % (sde_client.url, '1', '1-T2'), json=json.loads(GET_TASK_RESPONSE))
eq_(json.loads(GET_TASK_RESPONSE), sde_client.get_task('1', '1-T2'))
def test_get_task_token_auth(self, m):
sde_client = SDEClient("http://localhost/sde", "PAT", None, None, None, "1234abcd")
m.register_uri('GET', SDEClient.GET_TASK_URI % (sde_client.url, '1', '1-T2'), json=json.loads(GET_TASK_RESPONSE))
eq_(json.loads(GET_TASK_RESPONSE), sde_client.get_task('1', '1-T2'))
@raises(Exception)
def test_get_unknown_task(self, m):
sde_client = SDEClient("http://localhost/sde", "Basic", None, "admin", "admin", None)
m.register_uri('GET', SDEClient.GET_TASK_URI % (sde_client.url, '1', 'FAILED'), status_Code=403)
sde_client.get_task('1', 'FAILED')
@raises(Exception)
def test_get_unknown_authentication_method(self, m):
sde_client = SDEClient("http://localhost/sde", "Unknown", None, None, None, None)
m.register_uri('GET', SDEClient.GET_TASK_URI % (sde_client.url, '1', '1-T2'), json=json.loads(GET_TASK_RESPONSE))
sde_client.get_task('1', 'FAILED')
```
|
{
"source": "jdey4/DF-CNN",
"score": 2
}
|
#### File: jdey4/DF-CNN/exp_5000.py
```python
import pickle
import numpy as np
from itertools import product
import os
import tensorflow as tf
import tensorflow.keras as keras
import warnings
warnings.filterwarnings("default", category=DeprecationWarning)
# In[ ]:
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo)
return dict
def change_label(label, task):
labels = label.copy()
lbls_to_change = range(0,10,1)
lbls_to_transform = range((task-1)*10,task*10,1)
for count, i in enumerate(lbls_to_change):
indx = np.where(labels == i)
labels[indx] = -lbls_to_transform[count]
for count, i in enumerate(lbls_to_transform):
indx = np.where(labels == i)
labels[indx] = lbls_to_change[count]
indx = np.where(labels<0)
labels[indx] = -labels[indx]
return labels
# In[ ]:
def cross_val_data(data_x, data_y, num_points_per_task, slot_no, total_task=10, shift=1):
x = data_x.copy()
y = data_y.copy()
idx = [np.where(data_y == u)[0] for u in np.unique(data_y)]
sample_per_class = num_points_per_task//total_task
for task in range(total_task):
for class_no in range(task*10,(task+1)*10,1):
indx = np.roll(idx[class_no],(shift-1)*100)
if class_no==0 and task==0:
train_x = x[indx[slot_no*sample_per_class:(slot_no+1)*sample_per_class],:]
train_y = y[indx[slot_no*sample_per_class:(slot_no+1)*sample_per_class]]
else:
train_x = np.concatenate((train_x, x[indx[slot_no*sample_per_class:(slot_no+1)*sample_per_class],:]), axis=0)
train_y = np.concatenate((train_y, y[indx[slot_no*sample_per_class:(slot_no+1)*sample_per_class]]), axis=0)
if class_no==0:
test_x = x[indx[500:600],:]
test_y = y[indx[500:600]]
else:
test_x = np.concatenate((test_x, x[indx[500:600],:]), axis=0)
test_y = np.concatenate((test_y, y[indx[500:600]]), axis=0)
return train_x, train_y, test_x, test_y
# In[ ]:
def experiment():
get_ipython().system('python ./main_train_cl.py --gpu 0 --data_type CIFAR100_10 --data_percent 100 --model_type DFCNN --lifelong --save_mat_name CIFAR_res2.mat')
#get_ipython().system('python ./main_train_cl.py --gpu 1 --data_type CIFAR100_10 --data_percent 100 --model_type PROG --lifelong --save_mat_name CIFAR_res2.mat')
#!python ./main_train_cl.py --data_type CIFAR100_10 --data_percent 100 --model_type PROG --lifelong --save_mat_name CIFAR_res.mat
# In[ ]:
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data()
data_x = np.concatenate([X_train, X_test])
data_y = np.concatenate([y_train, y_test])
data_y = data_y[:, 0]
# In[ ]:
#saving the file
alg = ['Prog_NN', 'DF_CNN']
task_to_complete = 10
with open('./task_count.pickle','wb') as f:
pickle.dump(task_to_complete, f)
# In[ ]:
filename = './slot_res'
tmp_file = './tmp'
data_folder = './Data/cifar-100-python'
if not os.path.exists(filename):
os.mkdir(filename)
if not os.path.exists(tmp_file):
os.mkdir(tmp_file)
if not os.path.exists(data_folder):
os.mkdir('Data')
os.mkdir(data_folder)
num_points_per_task = 500
slot_fold = [1] #range(10)
shift_fold = range(1,2,1)
algs = range(2)
for shift in shift_fold:
for slot in slot_fold:
tmp_train = {}
tmp_test = {}
train_x, train_y, test_x, test_y = cross_val_data(
data_x,data_y,num_points_per_task,slot,shift=shift
)
tmp_train[b'data'] = train_x
tmp_train[b'fine_labels'] = train_y
tmp_test[b'data'] = test_x
tmp_test[b'fine_labels'] = test_y
with open('./Data/cifar-100-python/train.pickle', 'wb') as f:
pickle.dump(tmp_train, f)
with open('./Data/cifar-100-python/test.pickle', 'wb') as f:
pickle.dump(tmp_test, f)
get_ipython().system('rm ./Data/cifar100_mtl_data_group_410_80_1000_10.pkl')
experiment()
res = unpickle('./tmp/res.pickle')
with open(filename+'/'+alg[1]+str(shift)+'_'+str(slot)+'.pickle','wb') as f:
pickle.dump(res,f)
#get_ipython().system('sudo shutdown now')
```
#### File: jdey4/DF-CNN/main_train_cl.py
```python
from os import getcwd, listdir, mkdir
from utils.utils_env_cl import num_data_points, model_setup
from classification.gen_data import mnist_data, mnist_data_print_info, cifar10_data, cifar100_data, cifar_data_print_info, officehome_data, officehome_data_print_info
from classification.train_wrapper import train_run_for_each_model
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--gpu', help='GPU device ID', type=int, default=-1)
parser.add_argument('--data_type', help='Type of Data (MNIST5/MNIST10/CIFAR10_5/CIFAR10_10/CIFAR100_10/CIFAR100_20/OfficeHome)', type=str, default='MNIST5')
parser.add_argument('--data_percent', help='Percentage of train data to be used', type=int, default=50)
parser.add_argument('--model_type', help='Architecture of Model(STL/SNN/HPS/TF/PROG/DEN/DFCNN/DFCNN_direct/DFCNN_tc2)', type=str, default='STL')
parser.add_argument('--save_mat_name', help='Name of file to save training results', type=str, default='delete_this.mat')
parser.add_argument('--cnn_padtype_valid', help='Set CNN padding type VALID', action='store_false', default=True)
parser.add_argument('--lifelong', help='Train in lifelong learning setting', action='store_true', default=False)
parser.add_argument('--saveparam', help='Save parameter of NN', action='store_true', default=False)
parser.add_argument('--savegraph', help='Save graph of NN', action='store_true', default=False)
parser.add_argument('--tensorfactor_param_path', help='Path to parameters initializing tensor factorized model(below Result, above run0/run1/etc', type=str, default=None)
args = parser.parse_args()
gpu_device_num = args.gpu
if gpu_device_num > -1:
use_gpu = True
else:
use_gpu = False
do_lifelong = args.lifelong
if not 'Result' in listdir(getcwd()):
mkdir('Result')
## Name of .mat file recording all information of training and evaluation
mat_file_name = args.save_mat_name
data_type, data_percent = args.data_type.lower(), args.data_percent
data_hyperpara = {}
data_hyperpara['num_train_group'] = 5 # the number of the set of pre-processed data (each set follows the same experimental setting, but has sets of different images randomly selected.)
data_hyperpara['multi_class_label'] = False # Binary classification vs multi-class classification
train_hyperpara = {}
train_hyperpara['improvement_threshold'] = 1.002 # for accuracy (maximizing it)
train_hyperpara['patience_multiplier'] = 1.5 # for early-stopping
if 'mnist' in data_type:
## MNIST case
data_hyperpara['image_dimension'] = [28, 28, 1]
data_hyperpara['num_train_data'], data_hyperpara['num_valid_data'], data_hyperpara['num_test_data'] = num_data_points(data_type, data_percent)
if '5' in data_type:
## Heterogeneous MTL/LL (each sub-task has distinct set of image classes)
data_hyperpara['num_tasks'] = 5
elif '10' in data_type:
## Homogeneous MTL/LL (each sub-task has the same set of image classes, but image class set as True only differs)
data_hyperpara['num_tasks'] = 10
data_file_name = 'mnist_mtl_data_group_' + str(data_hyperpara['num_train_data']) + '_' + str(data_hyperpara['num_valid_data']) + '_' + str(data_hyperpara['num_test_data']) + '_' + str(data_hyperpara['num_tasks']) + '.pkl'
train_hyperpara['num_run_per_model'] = 5
train_hyperpara['train_valid_data_group'] = list(range(5)) + list(range(5))
train_hyperpara['lr'] = 0.001
train_hyperpara['lr_decay'] = 1.0/250.0
train_hyperpara['learning_step_max'] = 500
train_hyperpara['patience'] = 500
train_data, validation_data, test_data = mnist_data(data_file_name, data_hyperpara['num_train_data'], data_hyperpara['num_valid_data'], data_hyperpara['num_test_data'], data_hyperpara['num_train_group'], data_hyperpara['num_tasks'], data_percent)
mnist_data_print_info(train_data, validation_data, test_data)
elif ('cifar10' in data_type) and not ('cifar100' in data_type):
## CIFAR-10 case
data_hyperpara['image_dimension'] = [32, 32, 3]
data_hyperpara['num_train_data'], data_hyperpara['num_valid_data'], data_hyperpara['num_test_data'] = num_data_points(data_type, data_percent)
if '_5' in data_type:
## Heterogeneous MTL/LL (each sub-task has distinct set of image classes)
data_hyperpara['num_tasks'] = 5
elif '_10' in data_type:
## Homogeneous MTL/LL (each sub-task has the same set of image classes, but image class set as True only differs)
data_hyperpara['num_tasks'] = 10
data_file_name = 'cifar10_mtl_data_group_' + str(data_hyperpara['num_train_data']) + '_' + str(data_hyperpara['num_valid_data']) + '_' + str(data_hyperpara['num_test_data']) + '_' + str(data_hyperpara['num_tasks']) + '.pkl'
train_hyperpara['num_run_per_model'] = 5
train_hyperpara['train_valid_data_group'] = range(5)
train_hyperpara['lr'] = 0.00025
train_hyperpara['lr_decay'] = 1.0/1000.0
train_hyperpara['learning_step_max'] = 2000
train_hyperpara['patience'] = 2000
train_data, validation_data, test_data = cifar10_data(data_file_name, data_hyperpara['num_train_data'], data_hyperpara['num_valid_data'], data_hyperpara['num_test_data'], data_hyperpara['num_train_group'], data_hyperpara['num_tasks'], multiclass=data_hyperpara['multi_class_label'], data_percent=data_percent)
cifar_data_print_info(train_data, validation_data, test_data)
elif 'cifar100' in data_type:
## CIFAR-100 case
data_hyperpara['multi_class_label'] = True
data_hyperpara['image_dimension'] = [32, 32, 3]
data_hyperpara['num_train_data'], data_hyperpara['num_valid_data'], data_hyperpara['num_test_data'] = num_data_points(data_type, data_percent)
if '_10' in data_type:
## Heterogeneous MTL/LL (each sub-task has distinct set of image classes)
data_hyperpara['num_tasks'] = 10
elif '_20' in data_type:
## Half-homogeneous MTL/LL (there are pairs of sub-tasks which share 5 classes of images)
data_hyperpara['num_tasks'] = 20
data_file_name = 'cifar100_mtl_data_group_' + str(data_hyperpara['num_train_data']) + '_' + str(data_hyperpara['num_valid_data']) + '_' + str(data_hyperpara['num_test_data']) + '_' + str(data_hyperpara['num_tasks']) + '.pkl'
train_hyperpara['num_run_per_model'] = 5
train_hyperpara['train_valid_data_group'] = range(5)
train_hyperpara['lr'] = 0.0001
train_hyperpara['lr_decay'] = 1.0/4000.0
train_hyperpara['patience'] = 200
train_hyperpara['learning_step_max'] = data_hyperpara['num_tasks'] * train_hyperpara['patience']
train_data, validation_data, test_data = cifar100_data(data_file_name, data_hyperpara['num_train_data'], data_hyperpara['num_valid_data'], data_hyperpara['num_test_data'], data_hyperpara['num_train_group'], data_hyperpara['num_tasks'], multiclass=data_hyperpara['multi_class_label'])
cifar_data_print_info(train_data, validation_data, test_data)
elif 'officehome' in data_type:
## Office-Home case
data_hyperpara['multi_class_label'] = True
data_hyperpara['image_dimension'] = [128, 128, 3]
data_hyperpara['num_train_data'], data_hyperpara['num_valid_data'], data_hyperpara['num_test_data'] = 0.6, 0.1, 0.3
data_hyperpara['num_classes'] = 13
data_hyperpara['num_tasks'] = 10
data_file_name = 'officehome_mtl_data_group_' + str(data_hyperpara['num_train_data']) + '_' + str(data_hyperpara['num_valid_data']) + '_' + str(data_hyperpara['num_test_data']) + '_t' + str(data_hyperpara['num_tasks']) + '_c' + str(data_hyperpara['num_classes']) + '_i' + str(data_hyperpara['image_dimension'][0]) + '.pkl'
train_hyperpara['num_run_per_model'] = 5
train_hyperpara['train_valid_data_group'] = list(range(5)) + list(range(5))
train_hyperpara['lr'] = 5e-6
train_hyperpara['lr_decay'] = 1.0/1000.0
train_hyperpara['patience'] = 1000
train_hyperpara['learning_step_max'] = data_hyperpara['num_tasks'] * train_hyperpara['patience']
train_data, validation_data, test_data = officehome_data(data_file_name, data_hyperpara['num_train_data'], data_hyperpara['num_valid_data'], data_hyperpara['num_test_data'], data_hyperpara['num_train_group'], data_hyperpara['image_dimension'])
officehome_data_print_info(train_data, validation_data, test_data)
## Model Set-up
model_architecture, model_hyperpara = model_setup(data_type, data_hyperpara['image_dimension'], args.model_type, args.cnn_padtype_valid)
train_hyperpara['num_tasks'] = data_hyperpara['num_tasks']
save_param_path = None
if args.saveparam:
if not 'params' in listdir(getcwd()+'/Result'):
mkdir('./Result/params')
save_param_dir_name = data_type + '_' + str(data_percent) + 'p_' + args.model_type
while save_param_dir_name in listdir(getcwd()+'/Result/params'):
## Add dummy characters to directory name to avoid overwriting existing parameter files
save_param_dir_name += 'a'
save_param_path = getcwd()+'/Result/params/'+save_param_dir_name
mkdir(save_param_path)
print(model_architecture)
if ('tensorfactor' in model_architecture) and (args.tensorfactor_param_path is not None):
tensorfactor_param_path = getcwd()+'/Result/'+args.tensorfactor_param_path
else:
tensorfactor_param_path = None
## Training the Model
saved_result = train_run_for_each_model(model_architecture, model_hyperpara, train_hyperpara, [train_data, validation_data, test_data], data_type, mat_file_name, saved_result=None, useGPU=use_gpu, GPU_device=gpu_device_num, doLifelong=do_lifelong, saveParam=args.saveparam, saveParamDir=save_param_path, saveGraph=args.savegraph, tfInitParamPath=tensorfactor_param_path)
if __name__ == '__main__':
main()
```
|
{
"source": "jdey4/progressive-learning",
"score": 2
}
|
#### File: experiments/random_class_exp/random_class_exp.py
```python
import warnings
warnings.simplefilter("ignore")
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
from itertools import product
import pandas as pd
import numpy as np
import pickle
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
import sys
sys.path.append("../../src/")
from lifelong_dnn import LifeLongDNN
from joblib import Parallel, delayed
from multiprocessing import Pool
import tensorflow as tf
from sklearn.model_selection import train_test_split
#%%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
#%%
def LF_experiment(data_x, data_y, ntrees, shift, slot, model, num_points_per_task, acorn=None):
df = pd.DataFrame()
shifts = []
slots = []
accuracies_across_tasks = []
train_x_task0, train_y_task0, test_x_task0, test_y_task0 = cross_val_data(data_x, data_y, num_points_per_task, total_task=10, shift=shift, slot=slot)
lifelong_forest = LifeLongDNN(model = model, parallel = True if model == "uf" else False)
lifelong_forest.new_forest(
train_x_task0,
train_y_task0,
max_depth=ceil(log2(num_points_per_task)), n_estimators=ntrees
)
task_0_predictions=lifelong_forest.predict(
test_x_task0, representation='all', decider=0
)
shifts.append(shift)
slots.append(slot)
accuracies_across_tasks.append(np.mean(
task_0_predictions == test_y_task0
))
print(accuracies_across_tasks)
for task_ii in range(29):
train_x, train_y, _, _ = cross_val_data(data_x, data_y, num_points_per_task, total_task=10, shift=shift, slot=slot, task = task_ii)
print("Starting Task {} For Fold {} For Slot {}".format(task_ii, shift, slot))
lifelong_forest.new_forest(
train_x,
train_y,
max_depth=ceil(log2(num_points_per_task)), n_estimators=ntrees
)
task_0_predictions=lifelong_forest.predict(
test_x_task0, representation='all', decider=0
)
shifts.append(shift)
slots.append(slot)
accuracies_across_tasks.append(np.mean(
task_0_predictions == test_y_task0
))
print(accuracies_across_tasks)
df['data_fold'] = shifts
df['slot'] = slots
df['accuracy'] = accuracies_across_tasks
file_to_save = 'result/'+model+str(ntrees)+'_'+str(shift)+'_'+str(slot)+'.pickle'
with open(file_to_save, 'wb') as f:
pickle.dump(df, f)
#%%
def cross_val_data(data_x, data_y, num_points_per_task, total_task=10, shift=1, slot=0, task=0):
skf = StratifiedKFold(n_splits=6)
for _ in range(shift + 1):
train_idx, test_idx = next(skf.split(data_x, data_y))
data_x_train, data_y_train = data_x[train_idx], data_y[train_idx]
data_x_test, data_y_test = data_x[test_idx], data_y[test_idx]
selected_classes = np.random.choice(range(0, 100), 10)
train_idxs_of_selected_class = np.array([np.where(data_y_train == y_val)[0] for y_val in selected_classes])
num_points_per_class_per_slot = [int(len(train_idxs_of_selected_class[class_idx]) // 10) for class_idx in range(len(selected_classes))]
selected_idxs = np.concatenate([np.random.choice(train_idxs_of_selected_class[class_idx], num_points_per_class_per_slot[class_idx]) for class_idx in range(len(selected_classes))])
train_idxs = np.random.choice(selected_idxs, num_points_per_task)
data_x_train = data_x_train[train_idxs]
data_y_train = data_y_train[train_idxs]
test_idxs_of_selected_class = np.concatenate([np.where(data_y_test == y_val)[0] for y_val in selected_classes])
data_x_test = data_x_test[test_idxs_of_selected_class]
data_y_test = data_y_test[test_idxs_of_selected_class]
return data_x_train, data_y_train, data_x_test, data_y_test
#%%
def run_parallel_exp(data_x, data_y, n_trees, model, num_points_per_task, slot=0, shift=1):
if model == "dnn":
with tf.device('/gpu:'+str(shift % 4)):
LF_experiment(data_x, data_y, n_trees, shift, slot, model, num_points_per_task, acorn=12345)
else:
LF_experiment(data_x, data_y, n_trees, shift, slot, model, num_points_per_task, acorn=12345)
#%%
### MAIN HYPERPARAMS ###
model = "dnn"
num_points_per_task = 500
########################
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data()
data_x = np.concatenate([X_train, X_test])
if model == "uf":
data_x = data_x.reshape((data_x.shape[0], data_x.shape[1] * data_x.shape[2] * data_x.shape[3]))
data_y = np.concatenate([y_train, y_test])
data_y = data_y[:, 0]
#%%
slot_fold = range(3, 10)
if model == "uf":
shift_fold = range(1,7,1)
n_trees=[10]
iterable = product(n_trees,shift_fold, slot_fold)
Parallel(n_jobs=-2,verbose=1)(
delayed(run_parallel_exp)(
data_x, data_y, ntree, model, num_points_per_task, slot=slot, shift=shift
) for ntree,shift,slot in iterable
)
elif model == "dnn":
'''
print("Performing Stage 1 Shifts")
for slot in slot_fold:
def perform_shift(shift):
return run_parallel_exp(data_x, data_y, 0, model, num_points_per_task, slot=slot, shift=shift)
stage_1_shifts = range(1, 5)
with Pool(4) as p:
p.map(perform_shift, stage_1_shifts)
'''
print("Performing Stage 2 Shifts")
for slot in slot_fold:
def perform_shift(shift):
return run_parallel_exp(data_x, data_y, 0, model, num_points_per_task, slot=slot, shift=shift)
stage_2_shifts = range(5, 7)
with Pool(4) as p:
p.map(perform_shift, stage_2_shifts)
# %%
```
#### File: progressive-learning/replaying/jovo_exp.py
```python
import random
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow.keras as keras
from itertools import product
import pandas as pd
import numpy as np
import pickle
import matplotlib
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
import sys
sys.path.append("../src_sampling/")
#sys.path.append("../src/")
from lifelong_dnn import LifeLongDNN
from joblib import Parallel, delayed
from multiprocessing import Pool
import tensorflow as tf
# %%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def get_colors(colors, inds):
c = [colors[i] for i in inds]
return c
def generate_2d_rotation(theta=0, acorn=None):
if acorn is not None:
np.random.seed(acorn)
R = np.array([
[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]
])
return R
def generate_gaussian_parity(n, mean=np.array([-1, -1]), cov_scale=1, angle_params=None, k=1, acorn=None):
if acorn is not None:
np.random.seed(acorn)
d = len(mean)
if mean[0] == -1 and mean[1] == -1:
mean = mean + 1 / 2**k
mnt = np.random.multinomial(n, 1/(4**k) * np.ones(4**k))
cumsum = np.cumsum(mnt)
cumsum = np.concatenate(([0], cumsum))
Y = np.zeros(n)
X = np.zeros((n, d))
for i in range(2**k):
for j in range(2**k):
temp = np.random.multivariate_normal(mean, cov_scale * np.eye(d),
size=mnt[i*(2**k) + j])
temp[:, 0] += i*(1/2**(k-1))
temp[:, 1] += j*(1/2**(k-1))
X[cumsum[i*(2**k) + j]:cumsum[i*(2**k) + j + 1]] = temp
if i % 2 == j % 2:
Y[cumsum[i*(2**k) + j]:cumsum[i*(2**k) + j + 1]] = 0
else:
Y[cumsum[i*(2**k) + j]:cumsum[i*(2**k) + j + 1]] = 1
if d == 2:
if angle_params is None:
angle_params = np.random.uniform(0, 2*np.pi)
R = generate_2d_rotation(angle_params)
X = X @ R
else:
raise ValueError('d=%i not implemented!'%(d))
return X, Y.astype(int)
#%%
def produce_heatmap_data(leaf_profile, posterior, delta=0.001):
x = np.arange(leaf_profile[0][0],leaf_profile[0][1],step=delta)
y = np.arange(leaf_profile[1][0],leaf_profile[1][1],step=delta)
#print(leaf_profile[0][0],leaf_profile[0][1],leaf_profile[1][0],leaf_profile[1][1])
x,y = np.meshgrid(x,y)
'''points = np.concatenate(
(
x.reshape(-1,1),
y.reshape(-1,1)
),
axis=1
)'''
if x.shape[0] == 1:
x = np.concatenate(
(x,x),
axis=0
)
if x.shape[1] == 1:
x = np.concatenate(
(x,x),
axis=1
)
if y.shape[0] == 1:
y = np.concatenate(
(y,y),
axis=0
)
if y.shape[1] == 1:
y = np.concatenate(
(y,y),
axis=1
)
prob = posterior*np.ones(
x.shape,
dtype=float
)
#print(x.shape,prob.shape)
return x, y, prob
# %%
reps = 100
max_depth = 200
sample_no = 750
err = np.zeros(reps,dtype=float)
fte = np.zeros(reps,dtype=float)
bte = np.zeros(reps,dtype=float)
#np.random.seed(1)
for i in range(reps):
xor, label_xor = generate_gaussian_parity(sample_no,cov_scale=0.1,angle_params=0)
test_xor, test_label_xor = generate_gaussian_parity(1000,cov_scale=0.1,angle_params=0)
''' min_xor = np.min(xor)
xor = (xor - min_xor)
max_xor = np.max(xor)
xor = xor/max_xor
test_xor = (test_xor-min_xor)/max_xor'''
nxor, label_nxor = generate_gaussian_parity(150,cov_scale=0.1,angle_params=np.pi/2)
test_nxor, test_label_nxor = generate_gaussian_parity(1000,cov_scale=0.1,angle_params=np.pi/2)
'''min_nxor = np.min(nxor)
nxor = (nxor - min_nxor)
max_nxor = np.max(nxor)
nxor = nxor/max_nxor
test_nxor = (test_nxor-min_nxor)/max_nxor'''
l2f = LifeLongDNN(parallel=False)
#np.random.seed(2)
l2f.new_forest(xor, label_xor, n_estimators=1, max_depth=max_depth)
delta = .001
#sample the grid
x = np.arange(-1,1,step=delta)
y = np.arange(-1,1,step=delta)
x,y = np.meshgrid(x,y)
sample = np.concatenate(
(
x.reshape(-1,1),
y.reshape(-1,1)
),
axis=1
)
sample_label = l2f._estimate_posteriors(sample, representation='all', decider=0)
l2f.X_across_tasks[0] = sample
l2f.y_across_tasks[0] = sample_label
#np.random.seed(3)
l2f.new_forest(nxor, label_nxor, n_estimators=1, max_depth=max_depth)
l2f_task1 = l2f.predict(test_xor, representation='all', decider=0)
uf_task1 = l2f.predict(test_xor, representation=0, decider=0)
l2f_task2 = l2f.predict(test_nxor, representation='all', decider=1)
uf_task2 = l2f.predict(test_nxor, representation=1, decider=1)
fte = (1-np.mean(uf_task2 == test_label_nxor))/(1-np.mean(l2f_task2 == test_label_nxor))
bte = (1-np.mean(uf_task1 == test_label_xor))/(1-np.mean(l2f_task1 == test_label_xor))
print(np.mean(fte), np.mean(bte))
# %%
#make the heatmap data matrix
task_no = len(l2f.voters_across_tasks_matrix)
sns.set_context("talk")
fig, ax = plt.subplots(2,2, figsize=(16,16))#, sharex=True, sharey=True)
for task_id in range(task_no):
for voter_id in range(task_no):
#print(task_id, voter_id)
current_voter = l2f.voters_across_tasks_matrix[task_id][voter_id]
posterior_map = current_voter.tree_idx_to_node_ids_to_posterior_map
leaf_map = current_voter.tree_id_to_leaf_profile
for tree_id in list(leaf_map.keys()):
tree_leaf_map = leaf_map[tree_id]
for no, leaf_id in enumerate(list(tree_leaf_map.keys())):
x, y, prb = produce_heatmap_data(
tree_leaf_map[leaf_id],
posterior_map[tree_id][leaf_id][0]
)
'''if no == 0:
x = points
y = prb
else:
x = np.concatenate((x,points),axis=0)
y = np.concatenate((y,prb),axis=0)'''
axs = ax[task_id][voter_id].contourf(x,y,prb,cmap='gray')#,alpha=prb[0][0])
ax[task_id][voter_id].set_xticks([0,.2,.4,.6,.8,1])
ax[task_id][voter_id].set_yticks([0,.2,.4,.6,.8,1])
#data = pd.DataFrame(data={'x':x[:,0], 'y':x[:,1], 'z':y})
#data = data.pivot(index='x', columns='y', values='z')
#ax = sns.heatmap(data,ax=axes[task_id][voter_id], vmin=0, vmax=1,)
#ax.set_xticklabels(['0','' , '', '', '', '', '','','','.5','','' , '', '', '', '', '','','1'])
#ax.set_yticklabels(['0','' , '', '', '', '', '','','','','','.5','','' , '', '', '', '', '','','','','1'])
#ax.set_xlabel('transformer task '+str(voter_id+1)+' decider task '+str(task_id+1),fontsize=20)
#ax.set_ylabel('')
#ax.set_xticks([0,.5,1])
fig.colorbar(matplotlib.cm.ScalarMappable(cmap='gray'),ax=ax[0][1]).set_ticklabels([0,.2,.4,.6,.8,1])
fig.colorbar(matplotlib.cm.ScalarMappable(cmap='gray'),ax=ax[1][1]).set_ticklabels([0,.2,.4,.6,.8,1])
#plt.savefig('result/figs/heatmap_mapping'+str(max_depth)+'_'+str(sample_no)+'.pdf')
# %%
```
#### File: progressive-learning/replaying/parity_experiment.py
```python
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
import seaborn as sns
import timeit
import numpy as np
import pickle
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
import sys
sys.path.append("../src_mapping_3/")
from lifelong_dnn import LifeLongDNN
from joblib import Parallel, delayed
# %%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def generate_parity(low, high, n, d, type='xor',acorn=None):
r'''
A function that generates d dimensional parity data
with n samples with each dimension sampled as iid , i.e.,
X1,......,X_p ~ U(low,high)
'''
if acorn != None:
np.random.seed(acorn)
#loop through each dimension to make them iid
x = np.random.uniform(
low=low,high=high,size=(n,1)
)
for d_ in range(d-1):
x = np.concatenate(
(
x, np.random.uniform(
low=low,high=high,size=(n,1)
)
),
axis=1
)
positive_value_per_sample = np.sum((x>0),axis=1)
y = positive_value_per_sample%2
if type =='nxor':
y = y - (y==1) + (y==0)
return x, y
# %%
def experiment(n, d, n_test, n_trees, reps, acorn=None):
if acorn != None:
np.random.seed(acorn)
depth = ceil(log2(n))
xor_err = np.zeros((reps,2),dtype=float)
nxor_err = np.zeros((reps,2),dtype=float)
time_elapsed = np.zeros(reps,dtype=float)
for rep in range(reps):
#train data
xor, label_xor = generate_parity(-1,1,n,d)
nxor, label_nxor = generate_parity(-1,1,n,d,type='nxor')
#test data
xor_test, label_xor_test = generate_parity(
-1,1,n_test,d
)
nxor_test, label_nxor_test = generate_parity(
-1,1,n_test,d,type='nxor'
)
start = timeit.timeit()
l2f = LifeLongDNN(parallel=False)
l2f.new_forest(
xor, label_xor, n_estimators=n_trees, max_depth=depth
)
end = timeit.timeit()
time_train_first_task = end-start
predict_xor = l2f.predict(
xor_test, representation=0, decider=0
)
xor_err[rep,0] = np.mean(predict_xor!=label_xor_test)
################################
start = timeit.timeit()
l2f.new_forest(
nxor, label_nxor, n_estimators=n_trees, max_depth=depth
)
end = timeit.timeit()
time_elapsed[rep] = time_train_first_task + (end-start)
predict_xor = l2f.predict(
xor_test, representation='all', decider=0
)
nxor_err[rep,1] = np.mean(predict_xor!=label_xor_test)
################################
predict_nxor = l2f.predict(
nxor_test, representation=1, decider=1
)
nxor_err[rep,0] = np.mean(predict_xor!=label_nxor_test)
################################
predict_nxor = l2f.predict(
nxor_test, representation='all', decider=1
)
nxor_err[rep,1] = np.mean(predict_xor!=label_nxor_test)
return np.mean(xor_err,axis=0), np.mean(nxor_err,axis=0), np.std(xor_err,ddof=1,axis=0), np.std(nxor_err,ddof=1,axis=0), np.mean(time_elapsed)
# %%
#main hyperparameters#
#######################
n = 1000
n_test = 1000
n_trees = 10
reps = 100
max_dim = 1000
# %%
result = Parallel(n_jobs=-1,verbose=1)(
delayed(experiment)(n, d, n_test, n_trees, reps, acorn=d) for d in range(2,max_dim)
)
with open('result/parity_without_replay.pickle', 'wb') as f:
pickle.dump(result,f)
```
#### File: progressive-learning/replaying/xor_nxor_pdf.py
```python
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
import seaborn as sns
import numpy as np
import pickle
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
#%%
def pdf(x):
mu01 = np.array([-0.5,0.5])
mu02 = np.array([0.5,-0.5])
mu11 = np.array([0.5,0.5])
mu12 = np.array([-0.5,-0.5])
cov = 0.1 * np.eye(2)
inv_cov = np.linalg.inv(cov)
p0 = (
np.exp(-(x - mu01)@inv_cov@(x-mu01).T)
+ np.exp(-(x - mu02)@inv_cov@(x-mu02).T)
)/(2*np.pi*np.sqrt(np.linalg.det(cov)))
p1 = (
np.exp(-(x - mu11)@inv_cov@(x-mu11).T)
+ np.exp(-(x - mu12)@inv_cov@(x-mu12).T)
)/(2*np.pi*np.sqrt(np.linalg.det(cov)))
return p0/(p0+p1)
# %%
delta = 0.01
x = np.arange(-1,1,step=delta)
y = np.arange(-1,1,step=delta)
x,y = np.meshgrid(x,y)
sample = np.concatenate(
(
x.reshape(-1,1),
y.reshape(-1,1)
),
axis=1
)
z = np.zeros(len(sample),dtype=float)
for ii,x in enumerate(sample):
z[ii] = pdf(x)
data = pd.DataFrame(data={'x':sample[:,0], 'y':sample[:,1], 'z':z})
data = data.pivot(index='x', columns='y', values='z')
sns.set_context("talk")
fig, ax = plt.subplots(1,1, figsize=(8,8))
cmap= sns.diverging_palette(240, 10, n=9)
ax1 = sns.heatmap(data, ax=ax, vmin=0, vmax=1,cmap=cmap)
ax1.set_xticklabels(['-1','' , '', '', '', '', '','','','','0','','','','','','','','','1'])
ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','','','','0','','','','','','','','','','','','1'])
#ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','' , '', '', '', '', '', '','','','','', '0','','' , '', '', '', '', '','','','','','','','','','','','','1'])
ax.set_title('True PDF of xor-nxor simulation data',fontsize=24)
ax.invert_yaxis()
plt.savefig('result/figs/true_pdf.pdf')
# %%
def generate_2d_rotation(theta=0, acorn=None):
if acorn is not None:
np.random.seed(acorn)
R = np.array([
[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]
])
return R
# %%
delta = 0.01
x = np.arange(-1,1,step=delta)
y = np.arange(-1,1,step=delta)
x,y = np.meshgrid(x,y)
sample = np.concatenate(
(
x.reshape(-1,1),
y.reshape(-1,1)
),
axis=1
)
z = np.zeros(len(sample),dtype=float)
R = generate_2d_rotation(theta=np.pi*45/180)
for ii,x in enumerate(sample):
z[ii] = pdf(R@x)
data = pd.DataFrame(data={'x':sample[:,0], 'y':sample[:,1], 'z':z})
data = data.pivot(index='x', columns='y', values='z')
sns.set_context("talk")
fig, ax = plt.subplots(1,1, figsize=(8,8))
cmap= sns.diverging_palette(240, 10, n=9)
ax1 = sns.heatmap(data, ax=ax, vmin=0, vmax=1,cmap=cmap)
ax1.set_xticklabels(['-1','' , '', '', '', '', '','','','','0','','','','','','','','','1'])
ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','','','','0','','','','','','','','','','','','1'])
#ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','' , '', '', '', '', '', '','','','','', '0','','' , '', '', '', '', '','','','','','','','','','','','','1'])
ax.set_title('True PDF of xor-rxor simulation data',fontsize=24)
ax.invert_yaxis()
plt.savefig('result/figs/true_pdf_xor_rxor.pdf')
# %%
```
#### File: progressive-learning/slides/NLP_plot.py
```python
import pickle
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import numpy as np
import pandas as pd
from itertools import product
import seaborn as sns
import matplotlib.gridspec as gridspec
import matplotlib
#%%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def get_error_matrix(single_task_err,multitask_err,task_num):
err = [[] for _ in range(task_num)]
single_err = np.zeros(task_num,dtype=float)
for ii in range(task_num):
single_err[ii] = 1-single_task_err[ii]
for jj in range(ii+1):
#print(multitask_err[jj])
tmp = 1 - multitask_err[jj][ii-jj]
if tmp==0:
tmp = 1e-6
err[ii].append(tmp)
return single_err, err
def get_fte_bte(err, single_err, task_num):
bte = [[] for i in range(task_num)]
te = [[] for i in range(task_num)]
fte = []
for i in range(task_num):
for j in range(i,task_num):
#print(err[j][i],j,i)
bte[i].append(err[i][i]/err[j][i])
te[i].append(single_err[i]/err[j][i])
for i in range(task_num):
fte.append(single_err[i]/err[i][i])
return fte,bte,te
def calc_mean_bte(btes,task_num=10,reps=6):
mean_bte = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += np.array(btes[i][j])
tmp=tmp/reps
mean_bte[j].extend(tmp)
return mean_bte
def calc_mean_te(tes,task_num=10,reps=6):
mean_te = [[] for i in range(task_num)]
for j in range(task_num):
tmp = 0
for i in range(reps):
tmp += np.array(tes[i][j])
tmp=tmp/reps
mean_te[j].extend(tmp)
return mean_te
def calc_mean_fte(ftes,task_num=10,reps=6):
fte = np.asarray(ftes)
return list(np.mean(np.asarray(fte),axis=0))
#%%
task_num1=15
task_num2=10
filename1 = '../experiments/NLP/result/PL_llf_language_random_overlap_batches.p'
filename2 = '../experiments/NLP/result/satori_llf_overlap_random_batch.p'
res1 = unpickle(filename1)
res2 = unpickle(filename2)
# %%
reps = 15
count = 0
bte_tmp = [[] for _ in range(reps)]
fte_tmp = [[] for _ in range(reps)]
te_tmp = [[] for _ in range(reps)]
for seed,single_task_err,multitask_err in res1:
single_err, err = get_error_matrix(single_task_err,multitask_err,task_num=task_num1)
fte, bte, te = get_fte_bte(err,single_err,task_num=task_num1)
bte_tmp[count].extend(bte)
fte_tmp[count].extend(fte)
te_tmp[count].extend(te)
count+=1
btes1 = calc_mean_bte(bte_tmp,task_num=task_num1,reps=reps)
ftes1 = calc_mean_fte(fte_tmp,task_num=task_num1,reps=reps)
tes1 = calc_mean_te(te_tmp,task_num=task_num1,reps=reps)
# %%
reps = 10
count = 0
bte_tmp = [[] for _ in range(reps)]
fte_tmp = [[] for _ in range(reps)]
te_tmp = [[] for _ in range(reps)]
for seed,single_task_err,multitask_err in res2:
single_err, err = get_error_matrix(single_task_err,multitask_err,task_num=task_num2)
fte, bte, te = get_fte_bte(err,single_err,task_num=task_num2)
bte_tmp[count].extend(bte)
fte_tmp[count].extend(fte)
te_tmp[count].extend(te)
count+=1
btes2 = calc_mean_bte(bte_tmp,task_num=task_num2,reps=reps)
ftes2 = calc_mean_fte(fte_tmp,task_num=task_num2,reps=reps)
tes2 = calc_mean_te(te_tmp,task_num=task_num2,reps=reps)
# %%
fig = plt.figure(figsize=(8,8))
gs = fig.add_gridspec(6, 6)
fontsize=30
ticksize=26
legendsize=14
ax = fig.add_subplot(gs[:6,:6])
for i in range(task_num1-1):
et = np.zeros((1,task_num1-i))
ns = np.arange(i + 1, task_num1 + 1)
ax.plot(ns, btes1[i], marker='.', markersize=8, color='r', linewidth = 3)
ax.plot(task_num1, btes1[task_num1-1], marker='.', markersize=8, color='r', linewidth = 3)
ax.set_xlabel('Number of tasks seen', fontsize=fontsize)
ax.set_ylabel('Backward Transfer Efficiency', fontsize=fontsize)
ax.tick_params(labelsize=ticksize)
#ax.set_yticks([.4,.6,.8,.9,1, 1.1,1.2])
ax.set_xticks(np.arange(1,task_num1+1,2))
#ax.set_ylim(0.99, 1.2)
ax.tick_params(labelsize=ticksize)
#ax[0][1].grid(axis='x')
ax.set_ylim([.5,1.5])
right_side = ax.spines["right"]
right_side.set_visible(False)
top_side = ax.spines["top"]
top_side.set_visible(False)
ax.hlines(1, 1,task_num1, colors='grey', linestyles='dashed',linewidth=1.5)
plt.savefig('figs/language.svg')
#%%
fig = plt.figure(figsize=(8,8))
gs = fig.add_gridspec(7, 6)
fontsize=30
ticksize=26
legendsize=14
ax = fig.add_subplot(gs[1:7,:6])
for i in range(task_num2-1):
et = np.zeros((1,task_num2-i))
ns = np.arange(i + 1, task_num2 + 1)
ax.plot(ns, btes2[i], marker='.', markersize=8, color='r', linewidth = 3)
ax.plot(task_num2, btes2[task_num2-1], marker='.', markersize=8, color='r', linewidth = 3)
ax.set_xlabel('Number of tasks seen', fontsize=fontsize)
ax.set_ylabel('Backward Transfer Efficiency', fontsize=fontsize)
ax.tick_params(labelsize=ticksize)
ax.set_yticks([0.98,1,1.02,1.04,1.06])
ax.set_xticks(np.arange(1,task_num2+1,2))
#ax.set_ylim(0.99, 1.2)
ax.tick_params(labelsize=ticksize)
#ax[0][1].grid(axis='x')
ax.set_ylim([.98,1.06])
right_side = ax.spines["right"]
right_side.set_visible(False)
top_side = ax.spines["top"]
top_side.set_visible(False)
ax.hlines(1, 1,task_num2, colors='grey', linestyles='dashed',linewidth=1.5)
plt.savefig('figs/web.svg')
# %%
```
#### File: progressive-learning/src_mapping_3/lifelong_dnn.py
```python
from sklearn.base import clone
import numpy as np
from joblib import Parallel, delayed
class LifeLongDNN():
def __init__(self, acorn = None, verbose = False, model = "uf", parallel = True, n_jobs = None):
self.X_across_tasks = []
self.y_across_tasks = []
self.transformers_across_tasks = []
#element [i, j] votes on decider from task i under representation from task j
self.voters_across_tasks_matrix = []
self.n_tasks = 0
self.classes_across_tasks = []
#self.estimators_across_tasks = []
self.tree_profile_across_transformers = []
if acorn is not None:
np.random.seed(acorn)
self.verbose = verbose
self.model = model
self.parallel = parallel
self.n_jobs = n_jobs
def check_task_idx_(self, task_idx):
if task_idx >= self.n_tasks:
raise Exception("Invalid Task IDX")
def new_forest(self,
X,
y,
epochs = 100,
lr = 5e-4,
n_estimators = 100,
max_samples = .63,
bootstrap = False,
max_depth = 30,
min_samples_leaf = 1,
acorn = None,
parallel = False,
n_jobs = None):
if self.model == "dnn":
from honest_dnn import HonestDNN
if self.model == "uf":
from uncertainty_forest import UncertaintyForest
self.X_across_tasks.append(X)
self.y_across_tasks.append(y)
if self.model == "dnn":
new_honest_dnn = HonestDNN(verbose = self.verbose)
new_honest_dnn.fit(X, y, epochs = epochs, lr = lr)
if self.model == "uf":
new_honest_dnn = UncertaintyForest(n_estimators = n_estimators,
max_samples = max_samples,
bootstrap = bootstrap,
max_depth = max_depth,
min_samples_leaf = min_samples_leaf,
parallel = parallel,
n_jobs = n_jobs)
new_honest_dnn.fit(X, y)
new_transformer = new_honest_dnn.get_transformer()
new_voter = new_honest_dnn.get_voter()
new_classes = new_honest_dnn.classes_
new_tree_profile = new_honest_dnn.tree_id_to_leaf_profile
self.tree_profile_across_transformers.append(new_tree_profile)
#self.estimators_across_tasks.append(new_honest_dnn.ensemble.estimators_)
self.transformers_across_tasks.append(new_transformer)
self.classes_across_tasks.append(new_classes)
#add n_tasks voters to new task voter list under previous transformations
new_voters_under_previous_task_transformation = []
for task_idx in range(self.n_tasks):
transformer_of_task = self.transformers_across_tasks[task_idx]
if self.model == "dnn":
X_under_task_transformation = transformer_of_task.predict(X)
if self.model == "uf":
X_under_task_transformation = transformer_of_task(X)
unfit_new_task_voter_under_task_transformation = clone(self.voters_across_tasks_matrix[task_idx][0])
if self.model == "uf":
unfit_new_task_voter_under_task_transformation.classes_ = new_voter.classes_
new_task_voter_under_task_transformation = unfit_new_task_voter_under_task_transformation.fit(
nodes_across_trees=X_under_task_transformation,
y=y
)
new_voters_under_previous_task_transformation.append(new_task_voter_under_task_transformation)
#make sure to add the voter of the new task under its own transformation
new_voters_under_previous_task_transformation.append(new_voter)
self.voters_across_tasks_matrix.append(new_voters_under_previous_task_transformation)
'''if self.n_tasks==1:
print(
self.voters_across_tasks_matrix[1][0].tree_idx_to_node_ids_to_sample_count_map
)'''
#add one voter to previous task voter lists under the new transformation
for task_idx in range(self.n_tasks):
X_of_task, y_of_task = self.X_across_tasks[task_idx], self.y_across_tasks[task_idx]
if self.model == "dnn":
X_of_task_under_new_transform = new_transformer.predict(X_of_task)
#if self.model == "uf":
#X_of_task_under_new_transform = new_transformer(X_of_task)
# estimators_of_task = self.estimators_across_tasks[task_idx]
unfit_task_voter_under_new_transformation = clone(new_voter)
#posterior_map_to_be_mapped = self.voters_across_tasks_matrix[task_idx][task_idx].tree_idx_to_node_ids_to_posterior_map
voters_to_be_mapped = []
for voter_id in range(task_idx+1):
voters_to_be_mapped.append(self.voters_across_tasks_matrix[task_idx][voter_id])
sample_map_to_scale_current_task_data = [new_voter,
self.voters_across_tasks_matrix[self.n_tasks][task_idx]]
if self.model == "uf":
unfit_task_voter_under_new_transformation.classes_ = self.voters_across_tasks_matrix[task_idx][0].classes_
task_voter_under_new_transformation = unfit_task_voter_under_new_transformation.fit(
voters_to_be_mapped=voters_to_be_mapped,
current_task_voters=new_voters_under_previous_task_transformation,
map=True
)
# print(
# self.voters_across_tasks_matrix[task_idx][0].tree_idx_to_node_ids_to_posterior_map, 'hi'
#)
self.voters_across_tasks_matrix[task_idx].append(task_voter_under_new_transformation)
'''print(
self.voters_across_tasks_matrix[0][0].tree_idx_to_node_ids_to_sample_count_map
)'''
self.n_tasks += 1
def _estimate_posteriors(self, X, representation = 0, decider = 0):
self.check_task_idx_(decider)
if representation == "all":
representation = range(self.n_tasks)
elif isinstance(representation, int):
representation = np.array([representation])
def worker(transformer_task_idx):
transformer = self.transformers_across_tasks[transformer_task_idx]
voter = self.voters_across_tasks_matrix[decider][transformer_task_idx]
if self.model == "dnn":
return voter.predict_proba(transformer.predict(X))
if self.model == "uf":
return voter.predict_proba(transformer(X))
if self.parallel:
posteriors_across_tasks = np.array(
Parallel(n_jobs=self.n_jobs if self.n_jobs != None else len(representation))(
delayed(worker)(transformer_task_idx) for transformer_task_idx in representation
)
)
else:
#print(worker(0).shape, representation)
posteriors_across_tasks = np.array([worker(transformer_task_idx) for transformer_task_idx in representation])
return np.mean(posteriors_across_tasks, axis = 0)
def predict(self, X, representation = 0, decider = 0):
task_classes = self.classes_across_tasks[decider]
return task_classes[np.argmax(self._estimate_posteriors(X, representation, decider), axis = -1)]
```
#### File: progressive-learning/src_mapping_3/uncertainty_forest.py
```python
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
#Infrastructure
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import NotFittedError
#Data Handling
from sklearn.utils.validation import (
check_X_y,
check_array,
NotFittedError,
)
from sklearn.utils.multiclass import check_classification_targets
#Utils
from joblib import Parallel, delayed
import numpy as np
def _finite_sample_correction(posteriors, num_points_in_partition, num_classes):
'''
encourage posteriors to approach uniform when there is low data
'''
correction_constant = 1 / (num_classes * num_points_in_partition)
zero_posterior_idxs = np.where(posteriors == 0)[0]
posteriors[zero_posterior_idxs] = correction_constant
posteriors /= sum(posteriors)
return posteriors
class UncertaintyForest(BaseEstimator, ClassifierMixin):
'''
based off of https://arxiv.org/pdf/1907.00325.pdf
'''
def __init__(
self,
max_depth=30,
min_samples_leaf=1,
max_samples = 0.63,
max_features_tree = "auto",
n_estimators=100,
bootstrap=False,
parallel=True,
n_jobs = None):
#Tree parameters.
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.max_features_tree = max_features_tree
#Bag parameters
self.n_estimators = n_estimators
self.bootstrap = bootstrap
self.max_samples = max_samples
#Model parameters.
self.parallel = parallel
if self.parallel and n_jobs == None:
self.n_jobs = self.n_estimators
else:
self.n_jobs = n_jobs
self.fitted = False
def _check_fit(self):
'''
raise a NotFittedError if the model isn't fit
'''
if not self.fitted:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
raise NotFittedError(msg % {"name": type(self).__name__})
def transform(self, X):
'''
get the estimated posteriors across trees
'''
X = check_array(X)
def worker(tree_idx, tree):
#get the nodes of X
# Drop each estimation example down the tree, and record its 'y' value.
return tree.apply(X)
if self.parallel:
return np.array(
Parallel(n_jobs=self.n_jobs)(
delayed(worker)(tree_idx, tree) for tree_idx, tree in enumerate(self.ensemble.estimators_)
)
)
else:
return np.array(
[worker(tree_idx, tree) for tree_idx, tree in enumerate(self.ensemble.estimators_)]
)
# function added to do partition mapping
def _profile_leaf(self):
self.tree_id_to_leaf_profile = {}
leaf_profile = {}
#print('hi')
def worker(node, children_left, children_right, feature, threshold, profile_mat):
if children_left[node] == children_right[node]:
profile_mat_ = profile_mat.copy()
leaf_profile[node] = profile_mat_
#print(node,'nodes')
else:
feature_indx = feature[node]
profile_mat_ = profile_mat.copy()
profile_mat_[feature_indx,1] = threshold[node]
worker(
children_left[node],
children_left,
children_right,
feature,
threshold,
profile_mat_
)
profile_mat_ = profile_mat.copy()
profile_mat_[feature_indx,0] = threshold[node]
worker(
children_right[node],
children_left,
children_right,
feature,
threshold,
profile_mat_
)
profile_mat = np.concatenate(
(
np.zeros((self._feature_dimension,1),dtype=float),
np.ones((self._feature_dimension,1),dtype=float)
),
axis = 1
)
for tree_id, estimator in enumerate(self.ensemble.estimators_):
leaf_profile = {}
feature = estimator.tree_.feature
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
threshold = estimator.tree_.threshold
#print(children_left,children_right)
worker(
0,
children_left,
children_right,
feature,
threshold,
profile_mat.copy()
)
self.tree_id_to_leaf_profile[tree_id] = leaf_profile
#print(self.tree_id_to_leaf_profile,'gdgfg')
def get_transformer(self):
return lambda X : self.transform(X)
def vote(self, nodes_across_trees):
return self.voter.predict(nodes_across_trees)
def get_voter(self):
return self.voter
def fit(self, X, y):
#format X and y
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
#define the ensemble
self.ensemble = BaggingClassifier(
DecisionTreeClassifier(
max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
max_features=self.max_features_tree
),
n_estimators=self.n_estimators,
max_samples=self.max_samples,
bootstrap=self.bootstrap,
n_jobs = self.n_jobs
)
#fit the ensemble
self.ensemble.fit(X, y)
self._feature_dimension = X.shape[1]
#profile trees for partition mapping
self._profile_leaf()
class Voter(BaseEstimator):
def __init__(self, estimators, estimators_samples_, classes, tree_id_to_leaf_profile, parallel, n_jobs):
self.estimators = estimators
self.n_estimators = len(estimators_samples_)
self.classes_ = classes
self.tree_id_to_leaf_profile = tree_id_to_leaf_profile
self.parallel = parallel
self.estimators_samples_ = estimators_samples_
self.n_jobs = n_jobs
def fit(self, nodes_across_trees=None, y=None, voters_to_be_mapped=None, current_task_voters=None, fitting = False, map=False):
self.tree_idx_to_node_ids_to_posterior_map = {}
self.tree_idx_to_node_ids_to_sample_count_map = {}
if map == False:
def worker(tree_idx):
nodes = nodes_across_trees[tree_idx]
oob_samples = np.delete(range(len(nodes)), self.estimators_samples_[tree_idx])
cal_nodes = nodes[oob_samples] if fitting else nodes
y_cal = y[oob_samples] if fitting else y
#create a map from the unique node ids to their classwise posteriors
node_ids_to_posterior_map = {}
node_ids_to_sample_count_map = {}
#fill in the posteriors
for node_id in np.unique(nodes):
cal_idxs_of_node_id = np.where(cal_nodes == node_id)[0]
cal_ys_of_node = y_cal[cal_idxs_of_node_id]
class_counts = [len(np.where(cal_ys_of_node == y)[0]) for y in np.unique(y) ]
sample_no = np.sum(class_counts)
if sample_no != 0:
posteriors = np.nan_to_num(np.array(class_counts) / sample_no)
else:
posteriors = np.zeros(len(self.classes_),dtype=float)
#finite sample correction
total_samples = len(cal_idxs_of_node_id)
if total_samples == 0:
total_samples = 1
posteriors_corrected = _finite_sample_correction(posteriors, total_samples, len(self.classes_))
node_ids_to_posterior_map[node_id] = posteriors_corrected
node_ids_to_sample_count_map[node_id] = sample_no
#add the node_ids_to_posterior_map to the overall tree_idx map
self.tree_idx_to_node_ids_to_posterior_map[tree_idx] = node_ids_to_posterior_map
self.tree_idx_to_node_ids_to_sample_count_map[tree_idx] = node_ids_to_sample_count_map
for tree_idx in range(self.n_estimators):
worker(tree_idx)
return self
else:
node_ids_to_posterior_map = {}
_leaf_posteriors = []
_leaf_sample_covered = []
_leaf_current_task_data_correction = []
def worker(
node,
feature, children_left,
children_right,
threshold,
mul,
profile_mat,
posterior_map,
sample_map,
profile_map,
current_task_posterior_map,
current_task_sample_map,
current_task_mul
):
if children_left[node] == children_right[node]:
if node in list(posterior_map.keys()):
k1 = np.prod(
(profile_mat[:,1]-profile_mat[:,0])/(profile_map[node][:,1]- profile_map[node][:,0])
)
#print(profile_mat, profile_map[node], 'kukuta')
#print(mul, 'kutta')
#print(node, current_task_posterior_map,'osovyo')
if node in list(current_task_posterior_map.keys()):
current_task_cls_sample_count = current_task_posterior_map[node]*current_task_sample_map[node]
total_current_task_cls = len(current_task_mul)
individual_count = sample_map[node]*posterior_map[node]
correction = np.zeros(len(individual_count), dtype=float)
for idx, cls_count in enumerate(list(individual_count)):
#print(np.sum(((cls_count*mul*current_task_mul)/current_task_cls_sample_count)/total_current_task_cls), correction.shape)
correction[idx] = np.sum(((cls_count*mul*current_task_mul)/current_task_cls_sample_count)/total_current_task_cls)
#print(correction)
_leaf_posteriors.append(
k1*sample_map[node]*posterior_map[node]*correction
)
_leaf_sample_covered.append(
k1*sample_map[node]*np.sum(correction)
)
else:
_leaf_posteriors.append(
k1*sample_map[node]*posterior_map[node]
)
_leaf_sample_covered.append(
k1*sample_map[node]
)
'''target_task = voter_sample_map[2].tree_dx
current_task_data_correction = '''
else:
profile_mat_left = profile_mat.copy()
profile_mat_right = profile_mat.copy()
current_feature = feature[node]
current_threshold = threshold[node]
feature_range = profile_mat[current_feature]
if current_threshold>feature_range[0] and current_threshold<feature_range[1]:
profile_mat_left[current_feature][1] = current_threshold
profile_mat_right[current_feature][0] = current_threshold
mul_left = mul*(
current_threshold - feature_range[0]
)/(
feature_range[1] - feature_range[0]
)
mul_right = mul*(
feature_range[1] - current_threshold
)/(
feature_range[1] - feature_range[0]
)
worker(
children_left[node],
feature,
children_left,
children_right,
threshold,
mul_left,
profile_mat_left,
posterior_map,
sample_map,
profile_map,
current_task_posterior_map,
current_task_sample_map,
current_task_mul
)
worker(
children_right[node],
feature,
children_left,
children_right,
threshold,
mul_right,
profile_mat_right,
posterior_map,
sample_map,
profile_map,
current_task_posterior_map,
current_task_sample_map,
current_task_mul
)
elif current_threshold <= feature_range[0]:
return worker(
children_right[node],
feature,
children_left,
children_right,
threshold,
mul,
profile_mat_right,
posterior_map,
sample_map,
profile_map,
current_task_posterior_map,
current_task_sample_map,
current_task_mul
)
elif current_threshold >= feature_range[1]:
return worker(
children_left[node],
feature,
children_left,
children_right,
threshold,
mul,
profile_mat_left,
posterior_map,
sample_map,
profile_map,
current_task_posterior_map,
current_task_sample_map,
current_task_mul
)
def map_leaf(voters_to_be_mapped,leaf,profile,current_task_mul):
#print(leaf,'leaf id')
#print(voters_to_be_mapped)
for ids, current_voter in enumerate(voters_to_be_mapped):
estimators = current_voter.estimators
#print(estimators[0].tree_, 'eije ami asi')
posteriors_to_be_mapped = current_voter.tree_idx_to_node_ids_to_posterior_map
sample_count_map = current_voter.tree_idx_to_node_ids_to_sample_count_map
profile_map = current_voter.tree_id_to_leaf_profile
corresponding_current_task_voter = current_task_voters[ids]
corresponding_current_task_voter_sample_map = corresponding_current_task_voter.tree_idx_to_node_ids_to_sample_count_map
corresponding_current_task_voter_posterior_map = corresponding_current_task_voter.tree_idx_to_node_ids_to_posterior_map
#print(posteriors_to_be_mapped,'modon',corresponding_current_task_voter_posterior_map,'hello')
posterior = np.zeros(
(len(estimators), len(current_voter.classes_)),
dtype = float
)
for tree_id,tree in enumerate(estimators):
feature = tree.tree_.feature
children_left = tree.tree_.children_left
children_right = tree.tree_.children_right
threshold = tree.tree_.threshold
worker(
0,
feature,
children_left,
children_right,
threshold,
1,
profile,
posteriors_to_be_mapped[tree_id],
sample_count_map[tree_id],
profile_map[tree_id],
corresponding_current_task_voter_posterior_map[tree_id],
corresponding_current_task_voter_sample_map[tree_id],
current_task_mul
)
num = np.sum(
np.array(_leaf_posteriors),
axis = 0
)
den = np.sum(
np.array(_leaf_sample_covered)
)
if den == 0:
'''print(np.ones(
self.classes_)/self.classes_,self.classes_,'hlw')'''
posterior[tree_id] = np.ones(
len(self.classes_),
dtype=float
)/len(self.classes_)
else:
posterior[tree_id] = num/den
_leaf_posteriors.clear()
_leaf_sample_covered.clear()
_leaf_current_task_data_correction.clear()
#print(posterior[tree_id],'kukuta')
if ids == 0:
posterior_ = posterior
else:
posterior_ = np.concatenate(
(
posterior_,posterior
),
axis=0
)
node_ids_to_posterior_map[leaf] = np.mean(
posterior_, axis=0
)
#################################################################################
tree_idx = list(self.tree_id_to_leaf_profile.keys())
node_ids_to_posterior_map = {}
current_task_new_voter_sample_count = current_task_voters[-1].tree_idx_to_node_ids_to_sample_count_map
current_task_new_voter_posterior_map = current_task_voters[-1].tree_idx_to_node_ids_to_posterior_map
for idx in tree_idx:
leaf_id = list(self.tree_id_to_leaf_profile[idx].keys())
for leaf in leaf_id:
#print(leaf,'fervebgtr')
current_task_mul = current_task_new_voter_sample_count[idx][leaf]*current_task_new_voter_posterior_map[idx][leaf]
profile = self.tree_id_to_leaf_profile[idx][leaf]
map_leaf(
voters_to_be_mapped,
leaf,
profile,
current_task_mul
)
#print(profile,'profile',leaf, idx)
#print(node_ids_to_posterior_map,'jerhubiruu')
self.tree_idx_to_node_ids_to_posterior_map[idx] = node_ids_to_posterior_map
#node_ids_to_posterior_map.clear()
#print(self.tree_idx_to_node_ids_to_posterior_map,'hello')
return self
def predict_proba(self, nodes_across_trees):
def worker(tree_idx):
#get the node_ids_to_posterior_map for this tree
node_ids_to_posterior_map = self.tree_idx_to_node_ids_to_posterior_map[tree_idx]
#get the nodes of X
nodes = nodes_across_trees[tree_idx]
posteriors = []
node_ids = node_ids_to_posterior_map.keys()
#loop over nodes of X
for node in nodes:
#if we've seen this node before, simply get the posterior
if node in node_ids:
posteriors.append(node_ids_to_posterior_map[node])
#if we haven't seen this node before, simply use the uniform posterior
else:
posteriors.append(np.ones((len(np.unique(self.classes_)))) / len(self.classes_))
return posteriors
if self.parallel:
return np.mean(
Parallel(n_jobs=self.n_jobs)(
delayed(worker)(tree_idx) for tree_idx in range(self.n_estimators)
), axis = 0
)
else:
return np.mean(
[worker(tree_idx) for tree_idx in range(self.n_estimators)], axis = 0)
#get the nodes of the calibration set
nodes_across_trees = self.transform(X)
self.voter = Voter(
estimators = self.ensemble.estimators_,
estimators_samples_ = self.ensemble.estimators_samples_,
classes = self.classes_,
tree_id_to_leaf_profile = self.tree_id_to_leaf_profile,
parallel = self.parallel,
n_jobs = self.n_jobs
)
self.voter.fit(
nodes_across_trees = nodes_across_trees,
y=y,
fitting = True
)
self.fitted = True
def predict(self, X):
return self.classes_[np.argmax(self.predict_proba(X), axis=-1)]
def predict_proba(self, X):
return self.voter.predict_proba(self.transform(X))
```
|
{
"source": "jdeyton/forge-keeper",
"score": 3
}
|
#### File: src/test/test_app_factory.py
```python
import sys
import unittest
from unittest.mock import Mock, call
import flask
from digital.forge.app.abstract_decorator import Decorator
from digital.forge.app.factory import Factory
class TestFactory(unittest.TestCase):
"""
This class tests the app factory and its use of decorators.
"""
def test_add_valid_decorator(self):
"""
Valid decorators should be added to the internal list.
"""
factory = Factory()
self.assertListEqual(
factory._decorators,
[]
)
decorator = Decorator()
decorator2 = Decorator()
factory.add_decorator(decorator)
self.assertListEqual(
factory._decorators,
[decorator]
)
factory.add_decorator(decorator2)
self.assertListEqual(
factory._decorators,
[decorator, decorator2]
)
factory.add_decorator(decorator)
self.assertListEqual(
factory._decorators,
[decorator, decorator2, decorator]
)
def test_add_invalid_decorator(self):
"""
Invalid decorators should throw exceptions when added.
"""
factory = Factory()
with self.assertRaises(ValueError):
factory.add_decorator(None)
with self.assertRaises(ValueError):
factory.add_decorator('protomolecule')
self.assertListEqual(
factory._decorators,
[]
)
def test_create_app(self):
"""
Creating an app should return a Flask app with the input name.
"""
factory = Factory()
app = factory.create_app('Detective Miller')
self.assertIsInstance(app, flask.Flask)
self.assertEqual('Detective Miller', app.name)
def test_create_app_with_invalid_name(self):
"""
Creating an app with an invalid name should raise an exception.
"""
factory = Factory()
with self.assertRaises(ValueError):
factory.create_app(None)
def test_create_decorated_app(self):
"""
Creating an app with decorators should call the decorator function on
each added decorator (including duplicates) in the order they were
added.
"""
factory = Factory()
decorate1 = Mock(name='decorate')
decorate2 = Mock(name='decorate')
decorator1 = Decorator()
decorator2 = Decorator()
decorator1.decorate = decorate1
decorator2.decorate = decorate2
manager = Mock()
manager.attach_mock(decorate1, 'alex_kamal')
manager.attach_mock(decorate2, 'amos_burton')
factory.add_decorator(decorator1)
factory.add_decorator(decorator2)
factory.add_decorator(decorator1) # duplicate is OK!
app = factory.create_app('Rocinante')
decorate1.assert_called_with(app)
decorate2.assert_called_once_with(app)
self.assertListEqual(
manager.mock_calls,
[call.alex_kamal(app), call.amos_burton(app), call.alex_kamal(app)]
)
def test_start_app(self):
"""
Starting an app should start up a server using the app with the input
(or default) host and port.
"""
factory = Factory()
factory.add_decorator(Decorator())
app = factory.create_app('<NAME>')
app.run = Mock(name='run')
factory.start_app(app)
app.run.assert_called_once()
def test_start_app_with_host_and_port(self):
"""
Starting an app should start up a server using the app with the input
(or default) host and port.
"""
factory = Factory()
factory.add_decorator(Decorator())
app = factory.create_app('<NAME>')
app.run = Mock(name='run')
for host, port in [
(None, None),
(None, 1337),
('127.0.0.1', None),
('localhost', 8080),
('0.0.0.0', 8081),
('192.168.0.1', 54321),
('outer.space', 65432),
]:
msg = 'Valid host/port: '
msg += 'None' if host is None else host
msg += ', '
msg += 'None' if port is None else str(port)
print(msg, file=sys.stderr)
factory.start_app(app)
app.run.assert_called_once()
# TODO - Check the host and port.
app.run.reset_mock()
def test_start_invalid_app(self):
"""
Starting an invalid app object should raise an exception.
"""
factory = Factory()
with self.assertRaises(ValueError):
factory.start_app(None)
with self.assertRaises(ValueError):
factory.start_app('Star Helix')
def test_start_app_with_bad_host_or_port(self):
"""
Starting an app with an invalid host/port should raise an exception.
"""
factory = Factory()
factory.add_decorator(Decorator())
app = factory.create_app('Naomi Nagata')
app.run = Mock(name='run')
for host, port in [
('', None),
('.', None),
('256.0.0.0', None),
('192.168.0.-1', None),
('192.168.256.0', None),
('#$%!)(|2', None),
(None, -1),
(None, 0),
(None, 65536),
(None, 3.14),
]:
msg = 'Invalid host/port: '
msg += 'None' if host is None else host
msg += ', '
msg += 'None' if port is None else str(port)
print(msg, file=sys.stderr)
with self.assertRaises(ValueError):
factory.start_app(app, host, port)
```
#### File: data/models/event.py
```python
from __future__ import absolute_import
from datetime import datetime
from digital.forge.data.models.base_model_ import Model
from digital.forge.data import util
class Event(Model):
"""NOTE: This class is auto generated by OpenAPI Generator.
(https://openapi-generator.tech)
Do not edit the class manually.
"""
openapi_types = {
'archive_uuid': str,
'drone_uuid': str,
'event_time': datetime,
'event_value': str
}
attribute_map = {
'archive_uuid': 'archiveUUID',
'drone_uuid': 'droneUUID',
'event_time': 'eventTime',
'event_value': 'eventValue'
}
def __init__(self, archive_uuid=None, drone_uuid=None, event_time=None, event_value=None): # noqa: E501
"""Event - a model defined in OpenAPI
:param archive_uuid: The archive_uuid of this Event. # noqa: E501
:type archive_uuid: str
:param drone_uuid: The drone_uuid of this Event. # noqa: E501
:type drone_uuid: str
:param event_time: The event_time of this Event. # noqa: E501
:type event_time: datetime
:param event_value: The event_value of this Event. # noqa: E501
:type event_value: str
"""
if archive_uuid is None:
raise ValueError('`archive_uuid` is a required value')
self._archive_uuid = archive_uuid
if drone_uuid is None:
raise ValueError('`drone_uuid` is a required value')
self._drone_uuid = drone_uuid
if event_time is None:
raise ValueError('`event_time` is a required value')
self._event_time = event_time
if event_value is None:
raise ValueError('`event_value` is a required value')
self._event_value = event_value
@classmethod
def from_dict(cls, dikt) -> 'Event':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Event of this Event. # noqa: E501
:rtype: Event
"""
return util.deserialize_model(dikt, cls)
@property
def archive_uuid(self):
"""Gets the archive_uuid of this Event.
A unique identifier for the archive in which this data is stored. # noqa: E501
:return: The archive_uuid of this Event.
:rtype: str
"""
return self._archive_uuid
@archive_uuid.setter
def archive_uuid(self, archive_uuid):
"""Sets the archive_uuid of this Event.
A unique identifier for the archive in which this data is stored. # noqa: E501
:param archive_uuid: The archive_uuid of this Event.
:type archive_uuid: str
"""
if archive_uuid is None:
raise ValueError("Invalid value for `archive_uuid`, must not be `None`")
if archive_uuid is not None and not isinstance(archive_uuid, str):
raise ValueError("Invalid value for `archive_uuid`, must be a `str`")
self._archive_uuid = archive_uuid
@property
def drone_uuid(self):
"""Gets the drone_uuid of this Event.
A unique identifier for the drone that observed this event. # noqa: E501
:return: The drone_uuid of this Event.
:rtype: str
"""
return self._drone_uuid
@drone_uuid.setter
def drone_uuid(self, drone_uuid):
"""Sets the drone_uuid of this Event.
A unique identifier for the drone that observed this event. # noqa: E501
:param drone_uuid: The drone_uuid of this Event.
:type drone_uuid: str
"""
if drone_uuid is None:
raise ValueError("Invalid value for `drone_uuid`, must not be `None`")
if drone_uuid is not None and not isinstance(drone_uuid, str):
raise ValueError("Invalid value for `drone_uuid`, must be a `str`")
self._drone_uuid = drone_uuid
@property
def event_time(self):
"""Gets the event_time of this Event.
The time of the data measurement/collection. # noqa: E501
:return: The event_time of this Event.
:rtype: datetime
"""
return self._event_time
@event_time.setter
def event_time(self, event_time):
"""Sets the event_time of this Event.
The time of the data measurement/collection. # noqa: E501
:param event_time: The event_time of this Event.
:type event_time: datetime
"""
if event_time is None:
raise ValueError("Invalid value for `event_time`, must not be `None`")
if event_time is not None and not isinstance(event_time, datetime):
raise ValueError("Invalid value for `event_time`, must be a `datetime`")
self._event_time = event_time
@property
def event_value(self):
"""Gets the event_value of this Event.
The archived data point collected by the drone at the event time. # noqa: E501
:return: The event_value of this Event.
:rtype: str
"""
return self._event_value
@event_value.setter
def event_value(self, event_value):
"""Sets the event_value of this Event.
The archived data point collected by the drone at the event time. # noqa: E501
:param event_value: The event_value of this Event.
:type event_value: str
"""
if event_value is None:
raise ValueError("Invalid value for `event_value`, must not be `None`")
if event_value is not None and not isinstance(event_value, str):
raise ValueError("Invalid value for `event_value`, must be a `str`")
self._event_value = event_value
```
#### File: data/server/__main__.py
```python
import argparse
from pathlib import Path
import sys
import connexion
import validators
from digital.forge.app.factory import Factory as AppFactory
from digital.forge.app.decorator.cors import CORSDecorator
from digital.forge.app.decorator.sqlalchemy import SQLAlchemyDecorator
from digital.forge.data.server import encoder
def _get_secret(name):
secret_file = Path('/run/secrets').joinpath(name)
with open(secret_file) as file:
return file.readline().strip()
def main(argv=None):
"""
The standard main method.
"""
if argv is None:
argv = sys.argv[1:]
args = _parse_args(argv)
factory = AppFactory()
factory.add_decorator(CORSDecorator())
db_user = _get_secret('psql-conductor-user')
db_pass = _get_secret('psql-conductor-pass')
db_host = _get_secret('psql-host')
db_port = _get_secret('psql-port')
db_name = _get_secret('psql-name')
factory.add_decorator(SQLAlchemyDecorator(
'postgresql://%s:%s@%s:%s/%s' %
(db_user, db_pass, db_host, db_port, db_name)
))
app = factory.create_app(name=__name__)
app.json_encoder = encoder.JSONEncoder
connexion_app = connexion.App(__name__, specification_dir='../openapi/')
# Set these since we want to replace connexion's built-in flask app and run
# method.
connexion_app.app = app
connexion_app.host = args.host
connexion_app.port = args.port
connexion_app.add_api(
'openapi.yaml',
arguments={'title': 'Forge Keeper - Conductor'},
pythonic_params=True
)
factory.start_app(app, host=args.host, port=args.port)
def _parse_args(argv):
main_parser = argparse.ArgumentParser(add_help=False)
main_parser.add_argument(
'-h', '--host',
default='0.0.0.0',
type=_parse_host,
)
main_parser.add_argument(
'-p', '--port',
default=50080,
type=_parse_port,
)
return main_parser.parse_args(argv)
def _parse_host(value):
if not validators.domain(value) and not validators.ipv4(value):
raise argparse.ArgumentTypeError(
'Host must be an FQDN or IPv4 address'
)
return value
def _parse_port(value):
try:
value = int(value)
if not 1 <= value <= 65535:
raise ValueError
except ValueError:
raise argparse.ArgumentTypeError(
'Port must be an integer between 1 and 65535'
)
return int(value)
if __name__ == '__main__':
main()
```
#### File: src/test/__init__.py
```python
import logging
import connexion
from flask_testing import TestCase
from digital.forge.data.server.encoder import JSONEncoder
class BaseTestCase(TestCase):
"""
The base class for unit tests (which in Python are classes, of course).
"""
def create_app(self):
logging.getLogger('connexion.operation').setLevel('ERROR')
openapi_dir = '/home/x88/data/git/forge-keeper/src/data-server/src/digital/forge/data/openapi'
app = connexion.App(__name__, specification_dir=openapi_dir)
app.app.json_encoder = JSONEncoder
app.add_api('openapi.yaml', pythonic_params=True)
return app.app
```
#### File: src/monitor/__main__.py
```python
import argparse
import contextlib
from datetime import datetime, timedelta
import getpass
import sys
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sqlalchemy import create_engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import sessionmaker
from digital.forge.data.sql.model import Archive, Event
from monitor import __version__
@contextlib.contextmanager
def _connection(Session):
"""
A context manager for database connections.
"""
session = Session()
try:
yield session
session.commit()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
def main(argv=None):
"""
The main function!
"""
if argv is None:
argv = sys.argv[1:]
args = _parse_args(argv)
# Set up a way to connect to the database.
db_pass = getpass.getpass(prompt='Password: ')
uri = 'postgresql://%s:%s@%s:%s/%s' % \
(args.db_user, db_pass, args.db_host, args.db_port, args.db_name)
engine = create_engine(uri)
Session = sessionmaker(bind=engine)
# Filter inputs:
drone = 'd42cd874-d6a2-4b93-9398-7650a4f81170'
data_start = datetime.now() + timedelta(days=-1)
# Query the relevant Events.
# This could be simpler if we establish the 'relationship' in the ORM pkg.
humidity = {'times': [], 'values': []}
temp = {'times': [], 'values': []}
with _connection(Session) as db:
for record in db.query(Event.event_time, Event.event_value, Archive.units, Archive.name).\
join(Archive, Event.archive_uuid == Archive.archive_uuid).\
filter(Event.drone_uuid == drone).\
filter(Event.event_time >= data_start).\
order_by(Event.event_time.asc()):
# Store the data elsewhere for use.
store = temp
if record.name == 'humidity':
store = humidity
store['times'].append(record.event_time)
store['values'].append(record.event_value)
figure = make_subplots(specs=[[{"secondary_y": True}]])
figure.add_trace(go.Scatter(x=temp['times'], y=temp['values'], mode='lines', name='temperature'), secondary_y=False)
figure.add_trace(go.Scatter(x=humidity['times'], y=humidity['values'], mode='lines', name='humdity'), secondary_y=True)
figure.update_layout(
showlegend=True,
title_text='Temperature and Humidity',
)
figure.update_xaxes(title_text='Time')
figure.update_yaxes(title_text='Temperature', secondary_y=False)
figure.update_yaxes(title_text='Humidity', secondary_y=True)
figure.write_image("test.png")
def _parse_args(argv):
main_parser = argparse.ArgumentParser(
description='This utility doe something neat.',
)
main_parser.add_argument(
'-v', '--version',
action='version',
help='Get the version',
version='%(prog)s ' + __version__
)
main_parser.add_argument(
'--db-host',
default='db',
help='The database host.',
metavar='DB_HOST',
type=str,
)
main_parser.add_argument(
'--db-port',
default='5432',
help='The database port.',
metavar='DB_PORT',
type=int,
)
main_parser.add_argument(
'--db-user',
default='postgres',
help='The database user.',
metavar='DB_USER',
type=str,
)
main_parser.add_argument(
'--db-name',
default='postgres',
help='The database name.',
metavar='DB_NAME',
type=str,
)
return main_parser.parse_args(argv)
if __name__ == '__main__':
main()
```
#### File: forge/drone/reader.py
```python
from datetime import datetime
import re
import sys
from queue import Queue
from threading import Thread
import serial
from serial.serialutil import SerialException
class Reader(Thread):
"""
Instances of this class transfer sensor data to a queue for further
processing by other classes.
"""
def __init__(self, data_queue=None, port=None, rate=9600, **kwargs):
"""
The default constructor.
:param data_queue: The data_queue that will receive timestamped data.
:type data_queue: queue.Queue
:param port: The serial port to read.
:type port: str
:param rate: The symbol/modulation rate for the serial comms (in bauds)
:type rate: int
"""
super().__init__(kwargs=kwargs)
if data_queue is None:
raise ValueError('`data_queue` is a required argument')
if not isinstance(data_queue, Queue):
raise ValueError('`data_queue` must be a queue.Queue')
if port is None:
raise ValueError('`port` is a required argument')
if not re.match(r'\/dev\/tty(ACM|S|USB)[0-3]$', str(port)):
raise ValueError('`port` must be a Linux port (/dev/ttyS0-3)')
valid_rates = [
300, 600, 1200, 2400, 4800, 9600,
14400, 19200, 28800, 38400, 57600, 115200,
]
if rate not in valid_rates:
message = '`rate` must be a valid rate in bauds: ' \
', '.join(str(rate) for rate in valid_rates)
raise ValueError(message)
self._data_queue = data_queue
self._port = port
self._rate = rate
self._run = False
def run(self):
"""
Starts reading sensor data.
"""
try:
with serial.Serial(self._port, self._rate, timeout=5) as conn:
self._run = True
while self._run:
data = conn.readline()
if data:
time = datetime.now()
self._data_queue.put((time, data), block=True)
except SerialException as err:
print(
'Error reading {}: {}'.format(self._port, str(err)),
file=sys.stderr
)
def stop(self, timeout=None):
"""
Stops the thread. timeout is the standard thread join timeout arg. In
other words, set it to a floating point number in seconds after which
the calling thread will unblock and continue.
"""
self._run = False
self.join(timeout)
```
|
{
"source": "jdeyton/Sandbox",
"score": 3
}
|
#### File: foo/pyground/LicenseFixer.py
```python
import sys
# Used to check path validity.
import os.path
# Import the logging utility.
import logging as log
# Import the regex library.
import re
# Used for running git commands and getting their output.
import subprocess
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from datetime import datetime
__all__ = []
__version__ = 0.1
__date__ = '2015-05-28'
__updated__ = '2015-05-28'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
class LicenseFixer():
'''
This class provides the functionality required to generate licenses for the
specified source files. Callers should use the method fixLicense(path) to
update the license for a particular file.
'''
# ---- global configuration ---- #
# These should only be set once.
'''
The license format string read in from the file. It should include the date,
copyright owner(s), initial author, and contributor tags for fast
replacement when generating a license for a file.
'''
_licenseFormat = None
'''
The author dictionary maps names that commonly appear in the existing
documentation or in the git history to preferred user names.
'''
_authorDictionary = None
'''
The employer dictionary maps author names to their employers. Employer names
should be appended in parentheses after the author's name in the license
text.
'''
_employerDictionary = None
'''
The default contributor set provides a list of contributors for a given
employer. All contributors should be listed in cases where the history of
the file pre-dates the git repo and has no history since the git repo's
inception.
'''
_defaultContributorSet = None
'''
The string of copyright owners to apply to each license.
'''
_defaultCopyrightOwners = None
# TODO Add the cutoff date of Nov. 4, 2014. That's when most everything was
# migrated over.
# ------------------------------ #
# ---- git metadata ---- #
_dateList = None
_authorSet = None
_isOld = False
# ---------------------- #
# ---- existing documentation metadata ---- #
_existingDateList = None
_existingAuthorSet = None
# ----------------------------------------- #
_charLimit = 80
_commentBlocks = None
def __init__(self, licenseFile, authorsFile, contributorsFile):
'''
Constructs a new LicenseFixer instance.
@param licenseFile: The path to a file containing a license format. This
file should not include any comment characters, but may include the
following format strings that will be replaced:
{DATE} - The start/end years for the license.
{COPYRIGHT_OWNERS} - All copyright owners, usually in a comma
separated list.
{AUTHOR} - The initial author that committed the work.
{CONTRIBUTORS} - Any additional contributors go here.
@param authorsFile: The path to a file containing a row of
comma-separated values for common authors. The first value in a row
should be the preferred name of an author in the documentation, while
subsequent values in the row are common names used elsewhere in the
documentation.
@param contributorsFile: The path to a file containing all "default"
contributors to add when the history of the file is unknown past a
certain point. Each contributor name should be on a separate line.
'''
# Log the specified input files.
log.debug('license file: {0}'.format(licenseFile))
log.debug('authors file: {0}'.format(authorsFile))
log.debug('contributors file: {0}'.format(contributorsFile))
# Set up the defaults. These shouldn't change.
self._createLicenseFormat(licenseFile)
self._createAuthorDictionary(authorsFile)
self._createContributorSet(contributorsFile)
self._defaultCopyrightOwners = 'UT-Battelle, LLC.'
return
def _createLicenseFormat(self, licenseFile):
'''
Reads in the content from the specified file into a string.
@param licenseFile: The file from which to read. If not a file or cannot
be opened, no license format is returned.
@return: The license text format read from the file, or the empty string
if the license text file could not be read.
'''
licenseFormat = ""
# If possible, read the license format from the file.
if os.path.isfile(licenseFile):
# The with here ensures the file is properly closed afterwards.
with open(licenseFile, 'r') as lFile:
for line in lFile:
licenseFormat += line
file.closed
# Otherwise, post a warning message.
else:
log.warn('The specified file "{0}" is not a file or cannot be read.', licenseFile)
# Post a debug message showing the license format read from the file.
log.debug('License format: {0}{1}'.format(os.linesep, licenseFormat))
# Set the global value.
self._licenseFormat = licenseFormat
return licenseFormat
def _createAuthorDictionary(self, authorsFile):
'''
Reads in the content from the specified file into a dictionary of
authors. Each name will be mapped to its preferred name. Alternate names
are sepearated by commas after the first (preferred) name. Empty rows in
the file are ignored.
@param authorsFile: The file from which to read. If not a file or cannot
be opened, no preferred names will be stored, and all committers listed
in the git history will be listed as contributors.
@return: A dictionary of author names. The keys will be names that
appear in the metadata, while the values will be the preferred name.
'''
authorDictionary = {}
authorCount = 0;
# If possible, read the author list from the file.
if os.path.isfile(authorsFile):
# The with here ensures the file is properly closed afterwards.
with open(authorsFile, 'r') as aFile:
# Each line should be a comma-separated list of names. The
# preferred name is the first one.
for line in aFile:
names = line.split(',')
# Get the preferred name. This is the first value in the row
# that is a non-empty string after trimming spaces.
for name in names:
trimmedName = name.strip()
if trimmedName:
authorCount += 1
preferredName = trimmedName
# Map each listed, non-empty name to the preferred
# one in the dictionary.
for name in names:
trimmedName = name.strip();
if trimmedName:
# Print out the read name to the debug log.
log.debug('Common name "{0}" mapped to preferred name "{1}".'.format(trimmedName, preferredName))
authorDictionary[trimmedName] = preferredName
break
file.closed
# Otherwise, post a warning message.
else:
log.warn('The specified file "{0}" is not a file or cannot be read.'.format(authorsFile))
# Post a debug message showing the number of authors and common names
# read from the file.
log.debug('Authors: {0} authors, {1} total names.'.format(authorCount, len(authorDictionary)))
self._authorDictionary = authorDictionary
self._employerDictionary = {} # TODO
return authorDictionary
def _createContributorSet(self, contributorsFile):
'''
Reads in the list of default contributors from a file. This list will be
placed after the author line in the license provided the git history is
incomplete.
@param contributorsFile: The file from which to read. If not a file or
cannot be opened, no contributors will be listed when the git history is
incomplete.
@return: A set of contributor names. May be empty if the file was empty
or could not be read.
'''
contributorSet = set()
contributorCount = 0;
# If possible, read the contributor list from the file.
if os.path.isfile(contributorsFile):
# The with here ensures the file is properly closed afterwards.
with open(contributorsFile, 'r') as aFile:
# Each line can represent a list of contributors for a given
# employer. Only add non-empty lines to the set.
for line in aFile:
trimmedLine = line.strip()
if trimmedLine:
contributorSet.add(trimmedLine)
contributorCount += len(trimmedLine.split(','))
file.closed
# Otherwise, post a warning message.
else:
log.warn('The specified file "{0}" is not a file or cannot be read.'.format(contributorsFile))
# Post a debug message showing the number of contributors read from the
# file.
log.debug('Contributors: {0} total representing {1} employers.'.format(contributorCount, len(contributorSet)))
# Set the global value.
self._defaultContributorSet = contributorSet
return contributorSet
def generateLicense(self, path):
licenseText = ""
# Check the input path before proceeding.
if not os.path.isfile(path):
log.warn('The specified file "{0}" is not a file or cannot be read.', path)
log.warn('Generating an empty license.')
return licenseText
log.debug('Generating license for file:{0}'.format(path))
# The line separator. This is useful in various places in this method.
sep = '\n' # os.linesep
# Grab all of the comments from the file.
self._findComments(path)
# Gather all possible metadata from the file and its git history.
self._findDocMetadata(path)
self._findGitMetadata(path)
# Determine the substitutions that need to go in the license text.
dates = self._getDates()
copyrightOwners = self._getCopyrightOwners()
initialAuthor = self._getInitialAuthor()
contributorList = self._getContributors()
# Convert the contributors into a (perhaps multi-line) string.
contributorsString = ""
for contributor in contributorList:
contributorsString += contributor
contributorsString += sep
# Replace the format keys in the license format with the determined
# values for the file.
licenseFormat = self._licenseFormat
pattern = re.compile('{DATE}')
licenseFormat = pattern.sub(dates, licenseFormat);
pattern = re.compile('{COPYRIGHT_OWNERS}')
licenseFormat = pattern.sub(copyrightOwners, licenseFormat);
pattern = re.compile('{AUTHOR}')
licenseFormat = pattern.sub(initialAuthor, licenseFormat);
pattern = re.compile('{CONTRIBUTORS}')
licenseFormat = pattern.sub(contributorsString, licenseFormat);
# Build the license text using the appropriate multiline comment
# characters for the source file.
starter = self._getMultilineCommentLineStarter(path)
licenseText = self._getMultilineCommentFirstLine(path) + sep
for line in licenseFormat.splitlines():
# Get the next line of the output license text.
outputLine = starter + line
# If the line is longer than the character limit, split it over
# multiple lines. There's also logic here to not break words.
while len(outputLine) > self._charLimit:
i = outputLine.rfind(' ', 0, self._charLimit) + 1
licenseText += outputLine[:i] + sep
outputLine = starter + outputLine[i:]
# Add the last (or only) output line.
licenseText += outputLine + sep
licenseText += self._getMultilineCommentLastLine()
return licenseText
def _findComments(self, path):
'''
Finds all comment blocks and places them (excluding the comment opener,
line openers (if present), and the comment ender) as separate strings,
one for each block, in self._commentBlocks
@param path: The path to the source file in question.
@return: Nothing. _commentBlocks is modified.
'''
# Opens the file and searches for all content in multiline comments.
# Note: This is potentially dangerous as the files contents are read
# into memory. Although this is (at least currently) highly unusual for
# a source file to be beyond a few thousand lines.
with open(path, 'r') as f:
self._commentBlocks = re.findall('/\*+(.*?)\*+/', f.read(), re.DOTALL)
file.closed
# Replace all leading asterisks. We shouldn't destroy empty lines, hence
# the \n is omitted from the second amount of whitespace characters.
regex = re.compile('^\s*\*+[ \t\r\f\v]*', re.MULTILINE)
for i in range(len(self._commentBlocks)):
self._commentBlocks[i] = regex.sub('', self._commentBlocks[i])
return
def _findDocMetadata(self, path):
'''
Constructs all metadata that can be obtained from the specified file's
existing documentation. This includes clearing and updating
_existingDateList and _existingAuthorSet.
@param path: The path to the file whose documentation will be scanned.
@return: Nothing. _existingDateList and _existingAuthorSet are modified.
'''
# Clear out the previous metadata.
self._existingDateList = []
self._existingAuthorSet = set()
# If the header comment contains the copyright date info, try to get the
# first (and last year, if available) from it.
if len(self._commentBlocks) > 0:
headerComment = self._commentBlocks[0]
result = re.match('^.*Copyright.*?\s+(\d{4})(,\s*(\d{4}))?.*$', headerComment, re.MULTILINE)
if result:
self._existingDateList.append(int(result.group(1)))
if len(result.groups()) == 3:
self._existingDateList.append(int(result.group(3)))
# Print the found dates to the debug log.
if len(self._existingDateList) == 0:
log.debug('Found no existing copyright date.')
else:
log.debug('Found existing copyright dates: {0}'.format(self._existingDateList))
# Find the authors for the @author tags.
regex = re.compile('^author')
for commentBlock in self._commentBlocks:
# Split the comment block into sections by @ tags.
tagSplit = commentBlock.split('@')
# Process each section after an @ sign where the first string is
# 'author' (this is in the pre-compiled regex).
for i in range(1, len(tagSplit)):
tagBlock = tagSplit[i]
result = regex.match(tagBlock)
# An author tag was found!
if result:
# Ignore the 'author' part of the tag "block".
authors = tagBlock.split()
authors.pop(0)
# Replaces all whitespace with a single space.
authors = ' '.join(authors).split(',')
# Loops over each found author and either adds their
# preferred name or the trimmed name to the set of existing
# authors.
for author in authors:
author = author.strip()
if author in self._authorDictionary:
self._existingAuthorSet.add(self._authorDictionary[author])
else:
self._existingAuthorSet.add(author)
# Print the found authors to the debug log.
if len(self._existingAuthorSet) == 0:
log.debug('Found no existing authors from author tags.')
else:
log.debug('Found existing authors from author tags: {0}'.format(self._existingAuthorSet))
return
def _findGitMetadata(self, path):
'''
Constructs all metadata that can be obtained from the specified file's
git history. This includes clearing and updating _dateList, _authorSet,
and _isOld.
@param path: The path to the file whose git history will be queried.
@return: Nothing. _dateList, _authorSet, and _isOld are modified.
'''
# Clear out the previous metadata.
self._dateList = []
self._authorSet = set()
self._isOld = False
# Call git log on the file. We need to pass --pretty=format:"%ci,%an" to
# get the log output in a simple format: yyyy-mm-dd -gmtdiff,<author>
directory = path[:path.rfind(os.sep)]
result = subprocess.check_output(['git', '-C', directory, 'log', '--pretty=format:"%ci,%an"', path], stderr=subprocess.STDOUT)
commits = result.replace('"', '').splitlines()
# Determine the years for the first and last commits.
commit = commits[0]
lastYear = int(commit[:commit.find('-')])
commit = commits[len(commits) - 1]
firstYear = int(commit[:commit.find('-')])
# Update self._dateList to hold the first (and last year if different).
self._dateList.append(firstYear)
if firstYear != lastYear:
self._dateList.append(lastYear)
# Print the found first/last date(s) to the log.
log.debug('Found the dates from the git history: {0}'.format(self._dateList))
# Determine whether the file is old.
firstDateString = commit.split()[0].split('-')
firstMonth = int(firstDateString[1])
firstDay = int(firstDateString[2])
if datetime(firstYear, firstMonth, firstDay) < datetime(2014, 11, 4):
self._isOld = True
# Print out whether or not the file is old to the log.
if self._isOld:
log.debug('The file predates the repo relocation.')
else:
log.debug('The file is more recent than the repo relocation.')
# Add all authors from the commit log to the set of authors. Use the
# preferred name if available.
authorSet = set()
for commit in commits:
authorSet.add(commit.split(',')[1])
for author in authorSet:
if author in self._authorDictionary:
self._authorSet.add(self._authorDictionary[author])
else:
self._authorSet.add(author)
# Print out the added authors to the log.
log.debug('Found authors from the git history: {0}'.format(self._authorSet))
return
def _getDates(self):
'''
Uses the current metadata for the file to determine the proper date
string to use for the file's license. If the dates span multiple years,
the returned string will be of the format "first_year, last_year".
Otherwise, the returned string will be of the format "first_year".
This method should not return a null value.
'''
dates = ""
# TODO
dates = "2001 a space odyssey"
return dates
def _getCopyrightOwners(self):
'''
Determines the copyright owner string to use for the file's license.
This method should not return a null value.
'''
copyrightOwners = self._defaultCopyrightOwners
return copyrightOwners
def _getInitialAuthor(self):
'''
Uses the current metadata for the file to determine the proper initial
author string to use for the file's license. If the file pre-dates the
move to the git repo, then the author provided by the class author tag
will be used. If the file has no such author specified, the default
author will be used.
This method should not return a null value.
'''
author = ""
# TODO
author = "PRIMECUTMIGGITYMOEMACKDADDYJIZZYBANGDOGGYDOGDAWG"
return author
def _getContributors(self):
'''
Uses the current metadata for the file to determine the proper list of
contributors to use for the file's license. If the file pre-dates the
move to the git repo AND has no history after the move, then the default
list of contributors will be used.
This method returns a *list*. It method should not return a null value,
but may return an empty list.
'''
contributors = []
# TODO
for contributor in self._defaultContributorSet:
contributors.append(contributor)
return contributors
# TODO Make these better.
def _getMultilineCommentFirstLine(self, path):
return "/*******************************************************************************"
def _getMultilineCommentLineStarter(self, path):
return " * "
def _getMultilineCommentLastLine(self):
return " *******************************************************************************/"
def fixLicense(self, path):
log.info('Processing file: {0}'.format(path))
licenseText = self.generateLicense(path)
log.info('License text:{0}{1}'.format(os.linesep, licenseText))
return
#### Main ####
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by user_name on %s.
Copyright 2015 organization_name. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-l", "--license", dest="licenseFile", default="defaultLicense.txt")
parser.add_argument("-a", "--authors", dest="authorsFile", default="defaultAuthors.txt")
parser.add_argument("-c", "--contributors", dest="contributorsFile", default="defaultContributors.txt")
parser.add_argument("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %(default)s]")
parser.add_argument(dest="paths", help="paths to folder(s) with source file(s) [default: %(default)s]", metavar="path", nargs='+')
# Process arguments
args = parser.parse_args()
paths = args.paths
verbose = args.verbose
licenseFile = args.licenseFile
authorsFile = args.authorsFile
contributorsFile = args.contributorsFile
# Set up the log level.
logLevel = log.INFO
if verbose > 0:
logLevel = log.DEBUG
log.basicConfig(format="%(levelname)s: %(message)s", level=logLevel)
licenseFixer = LicenseFixer(licenseFile, authorsFile, contributorsFile)
for inpath in paths:
licenseFixer.fixLicense(inpath)
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception, e:
if DEBUG or TESTRUN:
raise(e)
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-v")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'com.bar.foo.pyground.LicenseFixer_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
```
|
{
"source": "jdf18/PirateBot",
"score": 3
}
|
#### File: jdf18/PirateBot/utils.py
```python
from json import dumps, loads
from textwrap import indent
from typing_extensions import Self
class EnvironmentContainer:
def __init__(self, filename='.env', required=()):
try:
with open(filename) as file:
variables = loads(file.read())
for key, value in variables.items():
self.__setattr__(key, value)
except FileNotFoundError():
for key in required:
value = input(f'Enter {key}: ')
self.__setattr__(key, value)
except Exception as e:
raise e
class LangContatiner:
class SubContainer:
def __init__(self, data):
self.data = data
def exists(self, name):
return bool(name in self.data)
def update(self, dictionary):
self.data.update(dictionary)
def __getattr__(self, name):
return self.data[name]
def __getitem__(self, name):
return self.data[name]
def __init__(self, locale='en_GB.lang'):
if not locale:
import locale, ctypes
locale = locale.windows_locale[ctypes.windll.kernel32.GetUserDefaultUILanguage()] + '.lang'
self.locale = locale
self.data = {}
try:
with open(locale) as file:
lines = file.readlines()
except FileNotFoundError:
pass
except Exception as e:
raise e
for line in lines:
if line.find('#') != -1:
line = line[:line.index('#')]
if line.strip() == '':
continue
key = line[:line.index('=')]
value = line[line.index('=')+1:]
parts = key.split('.')
current = self.data
for i in range(len(parts)):
part = parts[i]
if i+1 < len(parts): next_part = parts[i+1]
try:
current = current[part]
except:
needs_creating = parts[i:]
needs_creating.reverse()
curr = value
for p in needs_creating[:-1]:
curr = self.SubContainer({p:curr})
current.update({needs_creating[-1]: curr})
def __getattr__(self, name):
return self.data[name]
```
|
{
"source": "jdf3/CtCI",
"score": 4
}
|
#### File: python/10-sorting-and-searching/searchinrotatedarray.py
```python
def find_in_rotated(n, a):
def r(n, a, l, h):
if l == h:
if a[l] == n: return l
return -1
m = (l + h) // 2
if n == a[m]:
return m
elif n < a[m]:
if a[l] < n:
return r(n, a, l, m - 1)
elif a[l] == a[m]:
return max(r(n, a, l, m-1), r(n, a, m+1, h))
else:
return r(n, a, m + 1, h)
else:
if a[h] > n:
return r(n, a, m + 1, h)
elif a[h] == a[m]:
return max(r(n, a, l, m-1), r(n, a, m+1, h))
else:
return r(n, a, l, m - 1)
return r(n, a, 0, len(a) - 1)
print("Should be 8:", find_in_rotated(5, [15, 16, 19, 20, 25, 1, 3, 4, 5, 7, 10, 14]))
```
#### File: python/10-sorting-and-searching/sortedsearchnosize.py
```python
class Listy():
def __init__(self, array):
self.len = len(array)
self.array = array
def element_at(self, i):
if i >= self.len:
return -1
else:
return self.array[i]
def sorted_no_size(listy, n):
def binary_search(a, n, l, r):
if l > r: return -1
elif l == r:
if a.element_at(l) == n: return l
else: return -1
m = (l + r) // 2
if a.element_at(m) == n:
return m
elif a.element_at(m) > n or a.element_at(m) == -1:
return binary_search(a, n, l, m - 1)
else:
return binary_search(a, n, m + 1, r)
i = 1
while listy.element_at(i) != -1:
i <<= 1
return binary_search(listy, n, 0, i - 1)
print(sorted_no_size(Listy([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 13))
```
#### File: python/8-recursion-and-dynamic-programming/eightqueens.py
```python
def queens():
# Assuming that (x, y) positions given by columns[x] = y are valid,
# determine whether placing a piece at (row, col) is valid
# We don't need to be concerned about sentinel values, since this method only
# looks to rows below "row".
def isvalid(columns, row, col):
for r in range(row):
if col == columns[r]: return False
if (row - r) == abs(col - columns[r]): return False
return True
def place(row, columns):
if row == 8:
return 1
else:
ways = 0
for col in range(8):
if isvalid(columns, row, col):
columns[row] = col
ways += place(row + 1, columns)
return ways
return place(0, [0]*8)
def printboard(columns):
for r in columns:
col = columns[r]
print(" "*col + "x" + " "*(8 - 1 - col))
print()
print(queens())
```
#### File: python/8-recursion-and-dynamic-programming/parens.py
```python
def parens(n):
def h(current_open, opens, closes):
if opens == 0 and closes == 0:
yield ""
return
if opens > 0:
yield from map(lambda s: "(" + s, h(current_open + 1, opens - 1, closes))
if current_open > 0 and closes > 0:
yield from map(lambda s: ")" + s, h(current_open - 1, opens, closes - 1))
return h(0, n, n)
print(list(parens(2)))
print(list(parens(3)))
```
#### File: python/8-recursion-and-dynamic-programming/powerset.py
```python
def powersets(s):
if len(s) == 0:
yield []
return
for ps in powersets(s[1:]):
yield ps
ps2 = ps[:]
ps2.append(s[0])
yield ps2
print("HI")
print(list(powersets([1, 2, 3])))
# does not assume such things! but also, does not return an iterator
def powersets_iter(collection):
lists = [[]]
for item in collection:
clone = []
for l in lists:
clone.append(l[:])
for l in clone:
l.append(item)
lists.extend(clone)
return lists
print(list(powersets_iter([1, 2, 3])))
```
#### File: python/8-recursion-and-dynamic-programming/recursivemultiply.py
```python
def mult(a, b):
small, big = sorted([a, b])
product = 0
pos = 0
while small > 0:
if small & 1:
product += (big << pos)
small >>= 1
pos += 1
return product
print("100 x 16 =", mult(100, 16), "\n(Should be", 100*16, ")")
print("653 x 242 =", mult(653, 242), "\n(Should be", 653*242, ")")
def multr(a, b):
def h(l, g):
if l == 0: return 0
result = 0
if l & 1:
result += g
return result + (h(l >> 1, g) << 1)
small, big = sorted([a, b])
return h(small, big)
print("100 x 16 =", multr(100, 16), "\n(Should be", 100*16, ")")
print("653 x 242 =", multr(653, 242), "\n(Should be", 653*242, ")")
# for improvement, think about how this performs for (e.g.) powers of 2, or
# powers of 2 minus 1. Can I use subtraction somehow?
```
|
{
"source": "jdfalk/xlsx-converter",
"score": 3
}
|
#### File: jdfalk/xlsx-converter/xlsx_converter.py
```python
import argparse
import logging
import os
import sys
import pandas as PD
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--dir', help='Directory to start in', default=os.getcwd())
parser.add_argument('-o', '--output', help='File path and name for output')
parser.add_argument('-e', '--header', help='Header row for file', default="foo,bar")
parser.add_argument('-s', '--sheet', help='Identify Sheet in spreadsheet')
parser.add_argument('-l', '--log-level', help='Logging level (default WARNING)',
default='WARNING')
args = parser.parse_args()
if args.output is None:
sys.exit("Need output file. See xlsx_converter.py --help")
# assuming loglevel is bound to the string value obtained from the
# command line argument. Convert to upper case to allow the user to
# specify --log=DEBUG or --log=debug
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log_level)
logging.basicConfig(
level=numeric_level,
format='%(asctime)s %(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
# converting the path this program receives to
# what python understands
output_file = os.path.abspath(args.output)
header = args.header + "\n"
# sets the header row of the output file
# added mode="a"because default mode is r
# r = read only
with open(output_file, mode="a") as f:
f.write(header)
# looks at a specific directory, and exports the files into a list with the full path
# after the word "for" it declares
for root, _, files in os.walk(args.dir):
for file in files:
if file.endswith((".xls", ".xlsx")):
# declares the meaning of variable "full_path" used in this code
full_path = os.path.join(root, file)
# adding logging.debug allows the program to print the list if you want
# to see it when you enable debug logging on
# the program "full_path is: "
# is a string that displays the variable "full_path" and
# the added characters "is: "
# logging.debug uses c style formatting, the %s defines
# whatever goes there as a string.
logging.debug("full_path is: %s", full_path)
# read excel file into a dataframe, declare details about the dataframe.
# We assumed all default values.
try:
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_excel.html
data_xls = PD.read_excel(
io=full_path,
sheet_name=int(args.sheet),
skiprows=0,
header=1
)
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html
data_xls.to_csv(
output_file,
mode='a',
header=False,
index=False
)
except IndexError as err:
logging.error("Error occured: %s", err)
logging.error("Bad file %s. Skipping and continuing", full_path)
continue
if __name__ == '__main__':
main()
```
|
{
"source": "jdfekete/progressivis",
"score": 2
}
|
#### File: progressivis/benchmarks/benchmarkit.py
```python
import numpy as np
import pandas as pd
import six
from memory_profiler import memory_usage
import sys, os, time
from collections import OrderedDict, Iterable, namedtuple
from sqlalchemy import (Table, Column, Integer, Float, String, Sequence, BLOB,
MetaData, ForeignKey, create_engine, select,
UniqueConstraint)
#from multiprocessing import Process
import matplotlib
import matplotlib.pyplot as plt
import numpy.distutils.cpuinfo as cpuinfo
import platform
import json
import cProfile
import subprocess, time
LoopVarDesc = namedtuple("LoopVarDesc", "type, title, desc, func")
# lvd = LoopVarDesc(type=str,title="X", desc="Blah, blah", func=lambda x: x.upper())
multi_processing = True # use False only for debug!
if multi_processing:
from multiprocessing import Process
else:
class Process(object):
def __init__(self, target, args):
target(*args)
def start(self):
pass
def join(self):
pass
def get_cpu_info():
return json.dumps(cpuinfo.cpu.info)
def table_exists(tbl, conn):
cur = conn.cursor()
return list(cur.execute(
"""SELECT name FROM sqlite_master WHERE type=? AND name=?""",
('table', tbl)))
def describe_table(tbl, conn):
cur = conn.cursor()
return list(c.execute("PRAGMA table_info([{}])".format(tbl)))
def dump_table(table, db_name):
#lst_table=['bench_tbl', 'case_tbl','measurement_tbl']
engine = create_engine('sqlite:///' + db_name, echo=False)
metadata = MetaData(bind=engine)
tbl = Table(table, metadata, autoload=True)
conn = engine.connect()
#print(metadata.tables[tbl])
s = tbl.select() #select([[tbl]])
df = pd.read_sql_query(s, conn)
print(df)
class BenchEnv(object):
def __init__(self, db_name, append_mode=True):
self._db_name = db_name
self._engine = create_engine('sqlite:///' + db_name, echo=False)
self._metadata = MetaData(bind=self._engine)
self._bench_tbl = None
self._case_tbl = None
self._measurement_tbl = None
self._append_mode = append_mode
self.create_tables_if()
@property
def engine(self):
return self._engine
@property
def db_name(self):
return self._db_name
@property
def bench_tbl(self):
return self._bench_tbl
@property
def measurement_tbl(self):
return self._measurement_tbl
@property
def bench_list(self):
tbl = self._bench_tbl
s = (select([tbl]).with_only_columns([tbl.c.name]))
with self.engine.connect() as conn:
rows = conn.execute(s).fetchall()
return [e[0] for e in rows]
def create_tables_if(self):
if 'bench_tbl' in self.engine.table_names():
self._bench_tbl = Table('bench_tbl', self._metadata, autoload=True)
self._case_tbl = Table('case_tbl', self._metadata, autoload=True)
self._measurement_tbl = Table('measurement_tbl', self._metadata, autoload=True)
return
self._bench_tbl = Table('bench_tbl', self._metadata,
Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
Column('name', String, unique=True),
Column('description', String),
Column('py_version', String),
Column('py_compiler', String),
Column('py_platform', String),
Column('py_impl', String),
Column('cpu_info', String),
Column('repr_type', String),
Column('user_col_label', String),
autoload=False)
self._case_tbl = Table('case_tbl', self._metadata,
Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
Column('name', String, unique=True),
Column('bench_id', Integer, ForeignKey('bench_tbl.id')),
Column('corrected_by', Integer), # TODO: ref integrity
Column('description', String),
UniqueConstraint('name', 'bench_id', name='uc1'),
autoload=False)
self._measurement_tbl = Table('measurement_tbl', self._metadata,
Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
Column('case_id', Integer),
Column('i_th', Integer),
Column('user_col_str', String),
Column('user_col_int', Integer),
Column('user_col_float', Float),
Column('mem_usage', Float),
Column('elapsed_time', Float),
Column('sys_time', Float),
Column('user_time', Float),
Column('ld_avg_1', Float),
Column('ld_avg_5', Float),
Column('ld_avg_15', Float),
Column('prof', BLOB),
autoload=False)
self._metadata.create_all(self._engine, checkfirst=self._append_mode)
def default_loop_var_proc(loop_var):
args = loop_var if isinstance(loop_var, tuple) else (loop_var,)
cols = {}
if isinstance(loop_var, (six.integer_types, np.integer)):
cols['user_col_int'] = int(loop_var)
elif isinstance(loop_var, float):
cols['user_col_float'] = float(loop_var)
else:
cols['user_col_str'] = str(loop_var)
return args, {}, cols
# InputProc = namedtuple("InputProc", "input_type, label, desc, inp_to_args, inp_to_col")
# lvd = LoopVarDesc(type=str,title="X", desc="Blah, blah", func=lambda x: x.upper())
class InputProc(object):
def __init__(self, label, repr_type=str):
if repr_type not in (int, float, str):
raise ValueError("{} type not allowed".format(repr_type))
type_dict = {float: 'user_col_float', int: 'user_col_int',
str: 'user_col_str'}
self._repr_type = repr_type
self._user_col = type_dict[repr_type]
self._label = label
def to_args(self, inp):
return (self._repr_type(inp),), {}
def to_value(self, inp):
return self._repr_type(inp)
def to_dict(self, inp):
return {self.user_col: self.to_value(inp)}
@property
def user_col(self):
return self._user_col
@property
def label(self):
return self._label
class BenchmarkIt(object):
def __init__(self, env, name, loop, repeat=1, desc="",
input_proc=InputProc(repr_type=str, label='UserCol'),
time_bm=True, memory_bm=True, prof=True, after_loop_func=None):
self._env = env
self._name = name
self._loop = loop
self._repeat = repeat
self._desc = desc
self._input_proc = input_proc
self._time_flag = time_bm
self._mem_flag = memory_bm
self._prof_flag = prof
self._after_loop_func = after_loop_func
self._sql_id = None
self._cases = []
self._corrections = []
@staticmethod
def load(env, name):
bm = BenchmarkIt(env, name, -1)
tbl = env._bench_tbl
s = (select([tbl]).
where(tbl.c.name==bm._name))
with env.engine.connect() as conn:
row = conn.execute(s).fetchone()
row = dict(row)
repr_type = {'str':str, 'int':int, 'float':float}[row['repr_type']]
label = row['user_col_label']
bm._input_proc = InputProc(repr_type=repr_type, label=label)
return bm
def __enter__(self):
tbl = self._env._bench_tbl
ins = tbl.insert().values(name=self._name, description=self._desc,
py_version=platform.python_version(),
py_compiler=platform.python_compiler(),
py_platform=platform.platform(),
py_impl=platform.python_implementation(),
cpu_info=get_cpu_info(),
repr_type=self._input_proc._repr_type.__name__,
user_col_label=self._input_proc._label
)
with self.env.engine.connect() as conn:
conn.execute(ins)
## s = (select([tbl]).with_only_columns([tbl.c.id]).
## where(tbl.c.name==self._name))
## #http://www.rmunn.com/sqlalchemy-tutorial/tutorial.html
## with self.env.engine.connect() as conn:
## row = conn.execute(s).fetchone()
## self._sql_id = row[0]
return self
@property
def sql_id(self):
if self._sql_id is not None:
return self._sql_id
tbl = self._env._bench_tbl
s = (select([tbl]).with_only_columns([tbl.c.id, tbl.c.name]).
where(tbl.c.name==self._name))
#http://www.rmunn.com/sqlalchemy-tutorial/tutorial.html
with self.env.engine.connect() as conn:
row = conn.execute(s).fetchone()
self._sql_id = row[0]
return self._sql_id
def __call__(self, case, corrected_by=None):
self._corrections.append((case, corrected_by))
tbl = self._env._case_tbl
ins = tbl.insert().values(name=case, bench_id=self.sql_id)
with self.env.engine.connect() as conn:
conn.execute(ins)
## s = (select([tbl]).with_only_columns([tbl.c.id]).
## where(tbl.c.name==self._name))
## with self.env.engine.connect() as conn:
## row = conn.execute(s).fetchone()
## case_id = row[0]
case_id = self.get_case_id(case)
def fun(func):
runner = BenchRunner(func, case_id, corrected_by, self)
self._cases.append(runner)
return fun
def _update_corrections(self):
for case, corr_by in self._corrections:
if corr_by is None:
continue
case_id = self.get_case_id(case)
corr_id = self.get_case_id(corr_by)
tbl = self._env._case_tbl
stmt = (tbl.update().where(tbl.c.id==case_id).
values(corrected_by=corr_id))
with self.env.engine.connect() as conn:
conn.execute(stmt)
def get_case_id(self, case):
tbl = self._env._case_tbl
s = (select([tbl]).with_only_columns([tbl.c.id]).
where(tbl.c.name==case).
where(tbl.c.bench_id==self.sql_id))
with self.env.engine.connect() as conn:
row = conn.execute(s).fetchone()
return row[0]
def get_case_corrector(self, case):
if isinstance(case, six.string_types):
case_id = self.get_case_id(case)
else:
case_id = case
tbl = self._env._case_tbl
s = (select([tbl]).with_only_columns([tbl.c.corrected_by]).
where(tbl.c.id==case_id))
with self.env.engine.connect() as conn:
row = conn.execute(s).fetchone()
return row[0]
@property
def correctors(self):
tbl = self._env._case_tbl
s = select([tbl]).with_only_columns([tbl.c.corrected_by])
with self.env.engine.connect() as conn:
rows = conn.execute(s).fetchall()
return [e[0] for e in rows if e[0] is not None]
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
raise
#self.create_tables_if()
self._update_corrections()
if isinstance(self._loop, Iterable):
loop_ = self._loop
if isinstance(self._loop, (six.integer_types, np.integer)):
loop_ = range(self._loop)
for elt in self._cases:
for arg in loop_:
elt.run(arg)
if callable(self._after_loop_func):
self._after_loop_func()
@property
def _col_dict(self):
return {'case': 'Case', 'corrected_by':'Corrected by', 'i_th':'Measure',
'mem_usage': 'Memory usage', 'elapsed_time': 'Elapsed time',
'sys_time': 'System time', 'user_time': 'User time',
'ld_avg_1':'Load avg.(-1s)',
'ld_avg_5':'Load avg.(-5s)','ld_avg_15':'Load avg.(-15s)',
self.input_proc.user_col: self.input_proc.label}
@property
def case_names(self):
tbl = self._env._case_tbl
s = (select([tbl]).with_only_columns([tbl.c.name]).
where(tbl.c.bench_id==self.sql_id))
with self.env.engine.connect() as conn:
rows = conn.execute(s).fetchall()
return set([elt.name for elt in rows])
#df = pd.read_sql_query(s, conn)
#return set(df['name'].values)
def user_col_df(self, case_id):
projection = [self.input_proc.user_col]
tbl = self.env.measurement_tbl
s = (select([tbl]).with_only_columns(projection).
where(tbl.c.case_id==case_id).
order_by(tbl.c.id))
conn = self.env.engine.connect()
return pd.read_sql_query(s, conn)
def df_subtract(self, this, other, sub_cols, raw_header):
ret = []
for col in this.columns:
if col in sub_cols:
arr = this[col].values - other[col].values
else:
arr = this[col].values
key = col if raw_header else self._col_dict.get(col,col)
ret.append((key, arr))
return pd.DataFrame.from_items(ret)
def pretty_header(self, df, raw_header):
if raw_header:
return df
header = [self._col_dict.get(col,col) for col in df.columns]
df.columns = header
return df
def to_df(self, case, with_mem=True, with_times=True,
with_user_col=True, corrected=True, raw_header=False):
if isinstance(case, six.string_types):
case_id = self.get_case_id(case)
else:
case_id = case
projection = ['i_th']
projection += [self.input_proc.user_col] if with_user_col==True else []
if with_mem:
projection.append('mem_usage')
if with_times:
projection += ['elapsed_time', 'sys_time', 'user_time']
projection += ['ld_avg_1', 'ld_avg_5', 'ld_avg_15']
tbl = self.env.measurement_tbl
only_columns = [col.name for col in tbl.columns if col.name in projection]
s = (select([tbl]).with_only_columns(only_columns).
where(tbl.c.case_id==case_id).
order_by(tbl.c.id))
conn = self.env.engine.connect()
df = pd.read_sql_query(s, conn)#, index_col=index_col)
if not corrected:
return self.pretty_header(df, raw_header)
corr = self.get_case_corrector(case_id)
if corr is None:
return self.pretty_header(df, raw_header)
corr_df = self.to_df(corr, corrected=False, raw_header=True)
return self.df_subtract(df, corr_df, ['mem_usage','elapsed_time',
'sys_time', 'user_time'],
raw_header)
def plot(self, cases=None, x=None, y=None, corrected=True, plot_opt='all'):
if cases is None:
cases = self.case_names
elif isinstance(cases, six.string_types):
cases = set([cases])
else:
cases = set(cases)
if not cases.issubset(self.case_names):
raise ValueError("Unknown case(s): {}".format(case))
#df = self.to_df(raw_header=True)
from matplotlib.lines import Line2D
favorite_colors = ['red', 'blue', 'magenta', 'orange', 'grey',
'yellow', 'black']
colors = list(matplotlib.colors.cnames.keys())
customized_colors = (favorite_colors +
[c for c in colors if c not in favorite_colors])
Bplot = namedtuple('Bplot','key, title, ylabel')
mode = "corrected mode, show " if corrected else "raw mode, show "
mode += plot_opt
plots = [Bplot('mem_usage', 'Memory usage ({})'.format(mode),
'Used memory (Mb)'),
Bplot('elapsed_time', 'Elapsed time ({})'.format(mode),
'Time (ms)'),
Bplot('sys_time', 'System time ({})'.format(mode),
'Time (ms)'),
Bplot('user_time', 'User time ({})'.format(mode),
'Time (ms)'),]
correctors_ = self.correctors
for bp in plots:
for i, case in enumerate(cases):
if corrected and self.get_case_id(case) in correctors_:
continue
df = self.to_df(case=case, raw_header=True, corrected=corrected)
repeat = df['i_th'].values.max() + 1
if x is None:
#x = df[self.input_proc.user_col]
x = range(1, len(self.user_col_df(self.get_case_id(case)))//repeat+1)
kw = {'label': case}
if plot_opt == 'all':
for r in range(repeat):
dfq = df.query('i_th=={}'.format(r))
y = dfq[bp.key].values
plt.plot(x, y, customized_colors[i], **kw)
kw = {}
elif plot_opt == 'mean':
y = df.groupby([self.input_proc.user_col])[bp.key].mean().values
plt.plot(x, y, customized_colors[i], **kw)
elif plot_opt == 'min':
y = df.groupby([self.input_proc.user_col])[bp.key].min().values
plt.plot(x, y, customized_colors[i], **kw)
elif plot_opt == 'max':
y = df.groupby([self.input_proc.user_col])[bp.key].max().values
plt.plot(x, y, customized_colors[i], **kw)
plt.title(bp.title)
plt.ylabel(bp.ylabel)
plt.xlabel(self.input_proc.label)
plt.legend()
plt.show()
def prof_stats(self, case, step='first', measurement=0):
tbl = self.env.measurement_tbl
df = self.to_df(case, with_mem=False, with_times=False,
with_user_col=True, corrected=False, raw_header=True)
if step=='last' or step==-1:
step_ = df[self.input_proc.user_col].iloc[-1]
elif step=='first' or step==0:
step_ = df[self.input_proc.user_col].iloc[0]
else:
step_ = step
case_id = self.get_case_id(case)
stmt = (tbl.select().with_only_columns([tbl.c.prof]).
where(tbl.c.case_id==case_id).
where(tbl.c.i_th==measurement).
where(tbl.c[self.input_proc.user_col]==step_)
)
with self.env.engine.connect() as conn:
row = conn.execute(stmt).fetchone()
# TODO: use a REAL tmp file
tmp_file_name = '/tmp/benchmarkit_out.prof'
with open(tmp_file_name, 'wb') as tmp_file:
tmp_file.write(row[0])
# snakeviz is launched this way for virtualenv/anaconda compatibility
c_opt = 'import sys, snakeviz.cli;sys.exit(snakeviz.cli.main())'
cmd_ = [sys.executable, '-c', c_opt, tmp_file_name]
p = subprocess.Popen(cmd_)
time.sleep(3)
p.terminate()
@property
def name(self):
return self._name
@property
def env(self):
return self._env
@property
def loop_var_proc(self):
return self._loop_var_proc
@property
def time_flag(self):
return self._time_flag
@property
def mem_flag(self):
return self._mem_flag
@property
def prof_flag(self):
return self._prof_flag
@property
def repeat(self):
return self._repeat
@property
def input_proc(self):
return self._input_proc
class BenchRunner():
def __init__(self, func, case_id, corrected_by, bench):
self._func = func
self._case_id = case_id
self._corrected_by = corrected_by
self._bench = bench
self._args = None
self._kwargs = None
@property
def bench(self):
return self._bench
@property
def env(self):
return self._bench.env
def run_times(self, args, kwargs, v, i_th):
ut, st, cut, cst, et = os.times()
self._func(*args, **kwargs)
ut_, st_, cut_, cst_, et_ = os.times()
elapsed_time = et_ - et
sys_time = st_ -st
user_time = ut_ - ut
ld_avg_1, ld_avg_5, ld_avg_15 = os.getloadavg()
## engine = create_engine('sqlite:///' + self.env.db_name, echo=True)
## metadata = MetaData()
## metadata.reflect(bind=engine)
## measurement_tbl = metadata.tables['measurement_tbl']
##if self.bench.input_proc.user_col == 'user_col_str'
stmt = (self.env.measurement_tbl.update().
where(self.env.measurement_tbl.c.case_id==self._case_id).
where(self.env.measurement_tbl.c.i_th==i_th).
where(self.env.measurement_tbl.c[self.bench.input_proc.user_col]==v).
values(
elapsed_time=elapsed_time,
sys_time=sys_time,
user_time=user_time,
ld_avg_1=ld_avg_1,
ld_avg_5=ld_avg_5,
ld_avg_15=ld_avg_15,
)
)
with self.env.engine.connect() as conn:
conn.execute(stmt)
def run_mem(self, args, kwargs, v, i_th):
mem = memory_usage((self._func, args, kwargs), max_usage=True)[0]
stmt = (self.env.measurement_tbl.update().
where(self.env.measurement_tbl.c.case_id==self._case_id).
where(self.env.measurement_tbl.c.i_th==i_th).
where(self.env.measurement_tbl.c[self.bench.input_proc.user_col]==v).
values(
mem_usage=mem,
)
)
with self.env.engine.connect() as conn:
conn.execute(stmt)
def run_prof(self, args, kwargs, v, i_th):
def to_profile():
self._func(*args, **kwargs)
# TODO: use a REAL tmp file
tmp_file_name = '/tmp/benchmarkit.prof'
cProfile.runctx('to_profile()', globals(), locals(), tmp_file_name)
with open(tmp_file_name, 'rb') as tmp_file:
prof_blob = tmp_file.read()
stmt = (self.env.measurement_tbl.update().
where(self.env.measurement_tbl.c.case_id==self._case_id).
where(self.env.measurement_tbl.c.i_th==i_th).
where(self.env.measurement_tbl.c[self.bench.input_proc.user_col]==v).
values(
prof=prof_blob,
)
)
with self.env.engine.connect() as conn:
conn.execute(stmt)
def run(self, t):
args, kwargs = self.bench.input_proc.to_args(t)
values_ = self.bench.input_proc.to_dict(t)
v = self.bench.input_proc.to_value(t)
values_.update(dict(case_id=self._case_id))
l_val = [dict(i_th=i) for i in six.moves.range(self.bench.repeat)]
#map(lambda d: d.update(values_), l_val)
for d in l_val:
d.update(values_)
ins = self.env.measurement_tbl.insert() #.values(**values_)
with self.env.engine.connect() as conn:
conn.execute(ins, l_val)
for i_th in six.moves.range(self.bench.repeat):
if self.bench.time_flag:
p = Process(target=BenchRunner.run_times, args=(self, args, kwargs, v, i_th))
p.start()
p.join()
#self.run_times(args, kwargs, v)
if self.bench.mem_flag:
p = Process(target=BenchRunner.run_mem, args=(self, args, kwargs, v, i_th))
p.start()
p.join()
if self.bench.prof_flag:
p = Process(target=BenchRunner.run_prof, args=(self, args, kwargs, v, i_th))
p.start()
p.join()
def banner(s, c='='):
hr = c*(len(s) + 2) + '\n'
s2 = ' ' + s + ' \n'
return hr + s2 + hr
def print_banner(s, c='='):
print(banner(s, c))
if __name__ == '__main__':
import argparse
import sys
from tabulate import tabulate
parser = argparse.ArgumentParser()
parser.add_argument("--db", help="measurements database", nargs=1,
required=True)
## parser.add_argument("--bench", help="bench to visualize", nargs=1,
## required=False)
parser.add_argument("--cases", help="case(s) to visualize", nargs='+',
required=False)
parser.add_argument("--summary", help="List of cases in the DB",
action='store_const', const=42, required=False)
parser.add_argument("--plot", help="Plot measurements", #action='store_const',
nargs=1, required=False, choices=['min', 'max', 'mean', 'all'])
parser.add_argument("--pstats", help="Profile stats for: case[:first] | case:last | case:step", #action='store_const',
nargs=1, required=False)
parser.add_argument("--no-corr", help="No correction", action='store_const',
const=1, required=False)
args = parser.parse_args()
db_name = args.db[0]
if args.plot:
plot_opt = args.plot[0]
benv = BenchEnv(db_name=db_name)
bench_name = benv.bench_list[0]
if args.summary:
bench = BenchmarkIt.load(env=benv, name=bench_name)
print("List of cases: {}".format(", ".join(bench.case_names)))
sys.exit(0)
corr = True
if args.no_corr:
corr = False
bench = BenchmarkIt.load(env=benv, name=bench_name)
if args.cases:
cases = args.cases
else:
cases = bench.case_names
print_banner("Cases: {}".format(" ".join(cases)))
for case in cases:
print_banner("Case: {}".format(case))
df = bench.to_df(case=case, corrected=corr)
print(tabulate(df, headers='keys', tablefmt='psql'))
if args.plot:
bench.plot(cases=cases, corrected=corr, plot_opt=plot_opt)
#bench.prof_stats("P10sH5MinMax")
#dump_table('measurement_tbl', 'prof.db')
if args.pstats:
case_step = args.pstats[0]
if ':' not in case_step:
case = case_step
step = 'first'
else:
case, step = case_step.split(':', 1)
bench.prof_stats(case, step)
```
#### File: progressivis/examples/test_multiclass_k_clusters.py
```python
from progressivis import Scheduler, Every#, log_level
from progressivis.cluster import MBKMeans, MBKMeansFilter
from progressivis.io import CSVLoader
from progressivis.vis import MCScatterPlot
from progressivis.datasets import get_dataset
from progressivis.stats import RandomTable
from progressivis.utils.psdict import PsDict
import pandas as pd
import numpy as np
import os.path
import tempfile
from progressivis.datasets.random import generate_random_multivariate_normal_csv as gen_csv
try:
s = scheduler
except NameError:
s = Scheduler()
#log_level(package="progressivis.cluster")
#dir_name = tempfile.mkdtemp(prefix='progressivis_tmp_')
dir_name = os.path.join(tempfile.gettempdir(), 'progressivis_tmp_')
os.makedirs(dir_name, exist_ok=True)
file_name = os.path.join(dir_name, "foobar.csv")
gen_csv(file_name, rows=99999, reset=True) #, header='_0,_1', reset=False)
data = CSVLoader(file_name, skipinitialspace=True, header=None, index_col=False,scheduler=s)
n_clusters = 3
mbkmeans = MBKMeans(columns=['_0', '_1'], n_clusters=n_clusters, batch_size=100, tol=0.01, is_input=False, scheduler=s)
classes = []
for i in range(n_clusters):
cname = f"k{i}"
filt = MBKMeansFilter(i)
filt.create_dependent_modules(mbkmeans, data, 'table')
classes.append({'name': cname, 'x_column': '_0',
'y_column': '_1', 'sample': mbkmeans if i==0 else None,
'input_module': filt, 'input_slot': 'table'})
sp = MCScatterPlot(scheduler=s, classes=classes)
sp.create_dependent_modules()
for i in range(n_clusters):
cname = f"k{i}"
sp[cname].min_value._table = PsDict({'_0': -np.inf, '_1': -np.inf})
sp[cname].max_value._table = PsDict({'_0': np.inf, '_1': np.inf})
mbkmeans.input.table = data.output.table
mbkmeans.create_dependent_modules()
sp.move_point = mbkmeans.moved_center # for input management
def myprint(d):
if d['convergence']!='unknown':
print(d)
else:
print('.', end='')
prn = Every(scheduler=s, proc=print)
prn.input.df = mbkmeans.output.conv
if __name__ == '__main__':
#data.start()
#s.join()
aio.run(s.start())
```
#### File: progressivis/examples/test_scatterplot.py
```python
from progressivis import Scheduler, Every
from progressivis.vis import ScatterPlot
from progressivis.io import CSVLoader
from progressivis.datasets import get_dataset
def filter_(df):
l = df['pickup_longitude']
return df[(l < -70) & (l > -80) ]
def print_len(x):
if x is not None:
print(len(x))
#log_level()
try:
s = scheduler
except:
s = Scheduler()
csv = CSVLoader(get_dataset('bigfile'),header=None,index_col=False,force_valid_ids=True,scheduler=s)
pr = Every(scheduler=s)
pr.input.df = csv.output.table
scatterplot = ScatterPlot(x_column='_1', y_column='_2', scheduler=s)
scatterplot.create_dependent_modules(csv,'table')
if __name__=='__main__':
csv.start()
s.join()
print(len(csv.df()))
```
#### File: progressivis/core/changemanager_dict.py
```python
from .changemanager_base import BaseChangeManager
from ..utils.psdict import PsDict
from ..table.tablechanges import TableChanges
from .slot import Slot
import copy
class DictChangeManager(BaseChangeManager):
"""
Manage changes that occured in a DataFrame between runs.
"""
def __init__(self,
slot,
buffer_created=True,
buffer_updated=True,
buffer_deleted=True,
buffer_exposed=False,
buffer_masked=False):
super(DictChangeManager, self).__init__(
slot,
buffer_created,
buffer_updated,
buffer_deleted,
buffer_exposed,
buffer_masked)
self._last_dict = None
data = slot.data()
if data.changes is None:
data.changes = TableChanges()
def reset(self, name=None):
super(DictChangeManager, self).reset(name)
self._last_dict = None
def update(self, run_number, data, mid):
# pylint: disable=unused-argument
assert isinstance(data, PsDict)
if data is None or (run_number != 0 and
run_number <= self._last_update):
return
data.fix_indices()
last_dict = self._last_dict
if last_dict is None:
data.changes.add_created(data.ids)
else:
data.changes.add_created(data.new_indices(last_dict))
data.changes.add_updated(data.updated_indices(last_dict))
data.changes.add_deleted(data.deleted_indices(last_dict))
changes = data.compute_updates(self._last_update, run_number, mid)
self._last_dict = copy.copy(data)
self._last_update = run_number
self._row_changes.combine(changes,
self.created.buffer,
self.updated.buffer,
self.deleted.buffer)
Slot.add_changemanager_type(PsDict, DictChangeManager)
```
#### File: progressivis/core/config.py
```python
import os
from contextlib import contextmanager
options = {}
default_values = {}
def get_default_val(pat):
return default_values.get(pat)
def _get_option(pat, default_val=None):
return options[pat] if pat in options else default_val
def _set_option(pat, val, default_val=None):
options[pat] = val
if default_val is not None:
default_values[pat] = default_val
def _register_option(pat, val, default_val=None):
_set_option(pat, val, default_val)
get_option = _get_option
set_option = _set_option
register_option = _register_option
class option_context(object):
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError('Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).')
self.ops = list(zip(args[::2], args[1::2]))
self.undo = None
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, get_option(pat)))
self.undo = undo
for pat, val in self.ops:
set_option(pat, val)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
set_option(pat, val)
@contextmanager
def config_prefix(prefix):
global get_option, set_option, register_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
__get_option = get_option
__set_option = set_option
__register_option = register_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = __set_option
get_option = __get_option
register_option = __register_option
storage_ = os.getenv('PROGRESSIVIS_STORAGE')
if storage_ is None:
storage_ = 'mmap'
if len(options) == 0:
register_option('display.precision', 6)
register_option('display.float_format', None)
register_option('display.column_space', 12)
register_option('display.max_rows', 12)
register_option('display.max_columns', 20)
register_option('storage.default', storage_)
```
#### File: progressivis/core/scheduler.py
```python
import logging
import functools
from timeit import default_timer
import time
from .dataflow import Dataflow
from . import aio
from ..utils.errors import ProgressiveError
logger = logging.getLogger(__name__)
__all__ = ['Scheduler']
KEEP_RUNNING = 5
SHORTCUT_TIME = 1.5
class Scheduler(object):
"Base Scheduler class, runs progressive modules"
# pylint: disable=too-many-public-methods,too-many-instance-attributes
default = None
_last_id = 0
@classmethod
def or_default(cls, scheduler):
"Return the specified scheduler of, in None, the default one."
return scheduler or cls.default
def __init__(self, interaction_latency=1):
if interaction_latency <= 0:
raise ProgressiveError('Invalid interaction_latency, '
'should be strictly positive: %s'
% interaction_latency)
# same as clear below
Scheduler._last_id += 1
self._name = Scheduler._last_id
self._modules = {}
self._dependencies = None
self._running = False
self._stopped = True
self._runorder = None
self._new_modules = None
self._new_dependencies = None
self._new_runorder = None
self._new_reachability = None
self._start = None
self._step_once = False
self._run_number = 0
self._tick_procs = []
self._tick_once_procs = []
self._idle_procs = []
self.version = 0
self._run_list = []
self._run_index = 0
self._module_selection = None
self._selection_target_time = -1
self.interaction_latency = interaction_latency
self._reachability = {}
self._start_inter = 0
self._hibernate_cond = None
self._keep_running = KEEP_RUNNING
self.dataflow = Dataflow(self)
self.module_iterator = None
self._enter_cnt = 1
self._lock = None
self._task = None
# self.runners = set()
self.shortcut_evt = None
self.coros = []
async def shortcut_manager(self):
if self.shortcut_evt is None:
self.shortcut_evt = aio.Event()
while True:
await self.shortcut_evt.wait()
if self._stopped or not self._run_list:
break
await aio.sleep(SHORTCUT_TIME)
self._module_selection = None
self.shortcut_evt.clear()
def new_run_number(self):
self._run_number += 1
return self._run_number
def __enter__(self):
if self.dataflow is None:
self.dataflow = Dataflow(self)
self._enter_cnt = 1
else:
self._enter_cnt += 1
return self.dataflow
def __exit__(self, exc_type, exc_value, traceback):
self._enter_cnt -= 1
if exc_type is None:
if self._enter_cnt == 0:
self._commit(self.dataflow)
self.dataflow = None
else:
logger.info('Aborting Dataflow with exception %s', exc_type)
if self._enter_cnt == 0:
self.dataflow.aborted()
self.dataflow = None
@property
def name(self):
"Return the scheduler id"
return str(self._name)
def timer(self):
"Return the scheduler timer."
if self._start is None:
self._start = default_timer()
return 0
return default_timer()-self._start
def run_queue_length(self):
"Return the length of the run queue"
return len(self._run_list)
def to_json(self, short=True):
"Return a dictionary describing the scheduler"
msg = {}
mods = {}
for (name, module) in self.modules().items():
mods[name] = module.to_json(short=short)
modules = sorted(mods.values(),
key=functools.cmp_to_key(self._module_order))
msg['modules'] = modules
msg['is_running'] = self.is_running()
msg['is_terminated'] = self.is_terminated()
msg['run_number'] = self.run_number()
msg['status'] = 'success'
return msg
def _repr_html_(self):
html_head = "<div type='schedule'>"
html_head = """
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>"""
html_end = "</div>"
html_head += """
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>Id</th><th>Class</th><th>State</th><th>Last Update</th><th>Order</th>
</tr>
</thead>
<tbody>"""
columns = ['id', 'classname', 'state', 'last_update', 'order']
for mod in self._run_list:
values = mod.to_json(short=True)
html_head += "<tr>"
html_head += "".join(["<td>%s</td>" %
(values[column]) for column in columns])
html_end = "</tbody></table>"
return html_head + html_end
@staticmethod
def set_default():
"Set the default scheduler."
if not isinstance(Scheduler.default, Scheduler):
Scheduler.default = Scheduler()
def _before_run(self):
logger.debug("Before run %d", self._run_number)
def _after_run(self):
pass
async def start_impl(self, tick_proc=None, idle_proc=None, coros=()):
if self._lock is None:
self._lock = aio.Lock()
async with self._lock:
if self._task is not None:
raise ProgressiveError('Trying to start scheduler task'
' inside scheduler task')
self._task = True
self.coros = list(coros)
if tick_proc:
assert callable(tick_proc) or aio.iscoroutinefunction(tick_proc)
self._tick_procs = [tick_proc]
else:
self._tick_procs = []
if idle_proc:
assert callable(idle_proc)
self._idle_procs = [idle_proc]
else:
self._idle_procs = []
await self.run()
async def start(self, tick_proc=None, idle_proc=None, coros=(), persist=False):
if not persist:
return await self.start_impl(tick_proc, idle_proc, coros)
try:
from ..storage import init_temp_dir_if, cleanup_temp_dir, temp_dir
itd_flag = init_temp_dir_if()
if itd_flag:
print("Init TEMP_DIR in start()", temp_dir())
return await self.start_impl(tick_proc, idle_proc, coros)
finally:
if itd_flag:
cleanup_temp_dir()
def task_start(self, *args, **kwargs):
return aio.create_task(self.start(*args, **kwargs))
def _step_proc(self, s, run_number):
# pylint: disable=unused-argument
self.task_stop()
async def step(self):
"Start the scheduler for on step."
await self.start(tick_proc=self._step_proc)
def on_tick(self, tick_proc):
"Set a procedure to call at each tick."
assert callable(tick_proc)
self._tick_procs.append(tick_proc)
def remove_tick(self, tick_proc):
"Remove a tick callback"
self._tick_procs.remove(tick_proc)
def on_tick_once(self, tick_proc):
"""
Add a oneshot function that will be run at the next scheduler tick.
This is especially useful for setting up module connections.
"""
assert callable(tick_proc)
self._tick_once_procs.append(tick_proc)
def remove_tick_once(self, tick_proc):
"Remove a tick once callback"
self._tick_once_procs.remove(tick_proc)
def on_idle(self, idle_proc):
"Set a procedure that will be called when there is nothing else to do."
assert callable(idle_proc)
self._idle_procs.append(idle_proc)
def remove_idle(self, idle_proc):
"Remove an idle callback."
assert callable(idle_proc)
self._idle_procs.remove(idle_proc)
def idle_proc(self):
pass
async def run(self):
"Run the modules, called by start()."
global KEEP_RUNNING
# from .sentinel import Sentinel
# sl = Sentinel(scheduler=self)
if self.dataflow:
assert self._enter_cnt == 1
self._commit(self.dataflow)
self.dataflow = None
self._enter_cnt = 0
self._stopped = False
self._running = True
self._start = default_timer()
self._before_run()
# if self._new_modules:
# self._update_modules()
runners = [self._run_loop(), self.shortcut_manager()]
runners.extend([aio.create_task(coro)
for coro in self.coros])
# runners.append(aio.create_task(self.unlocker(), "unlocker"))
# TODO: find the "right" initialisation value ...
KEEP_RUNNING = min(50, len(self._run_list) * 3)
self._keep_running = KEEP_RUNNING
await aio.gather(*runners)
modules = [self._modules[m] for m in self._runorder]
for module in reversed(modules):
module.ending()
self._running = False
self._stopped = True
self._after_run()
self.done()
async def _run_loop(self):
"""Main scheduler loop."""
# pylint: disable=broad-except
if self._hibernate_cond is None:
self._hibernate_cond = aio.Condition()
for module in self._next_module():
if self.no_more_data() and self.all_blocked() and \
self.is_waiting_for_input():
if not self._keep_running:
async with self._hibernate_cond:
await self._hibernate_cond.wait()
if self._keep_running:
self._keep_running -= 1
if not self._consider_module(module):
logger.info("Module %s not scheduled"
" because of interactive mode",
module.name)
continue
# increment the run number, even if we don't call the module
self._run_number += 1
# import pdb; pdb.set_trace()
module.prepare_run(self._run_number)
if not(module.is_ready()
or self.has_input()
or module.is_greedy()):
logger.info("Module %s not scheduled"
" because not ready and has no input",
module.name)
continue
await self._run_tick_procs()
module.run(self._run_number)
await module.after_run(self._run_number)
await aio.sleep(0)
if self.shortcut_evt is not None:
self.shortcut_evt.set()
def _next_module(self):
"""
Generator the yields a possibly infinite sequence of modules.
Handles order recomputation and starting logic if needed.
"""
self._run_index = 0
first_run = self._run_number
input_mode = self.has_input()
self._start_inter = 0
while not self._stopped:
# Apply changes in the dataflow
if self._new_modules:
self._update_modules()
self._run_index = 0
first_run = self._run_number
# If run_list empty, we're done
if not self._run_list:
break
# Check for interactive input mode
if input_mode != self.has_input():
if input_mode: # end input mode
logger.info('Ending interactive mode after %s s',
default_timer()-self._start_inter)
self._start_inter = 0
input_mode = False
else:
self._start_inter = default_timer()
logger.info('Starting interactive mode at %s',
self._start_inter)
input_mode = True
# Restart from beginning
self._run_index = 0
first_run = self._run_number
module = self._run_list[self._run_index]
self._run_index += 1 # allow it to be reset
yield module
if self._run_index >= len(self._run_list): # end of modules
self._end_of_modules(first_run)
first_run = self._run_number
def all_blocked(self):
"Return True if all the modules are blocked, False otherwise"
from .module import Module
for module in self._run_list:
if module.state != Module.state_blocked:
return False
return True
def is_waiting_for_input(self):
"Return True if there is at least one input module"
for module in self._run_list:
if module.is_input():
return True
return False
def no_more_data(self):
"Return True if at least one module has data input."
for module in self._run_list:
if module.is_data_input():
return False
return True
def _commit(self, dataflow):
assert dataflow.version == self.version
self._new_runorder = dataflow.order_modules() # raises if invalid
self._new_modules = dataflow.modules()
self._new_dependencies = dataflow.inputs
dataflow._compute_reachability(self._new_dependencies)
self._new_reachability = dataflow.reachability
self.dataflow.committed()
self.version += 1 # only increment if valid
# The slots in the module,_modules, and _runorder will be updated
# in _update_modules when the scheduler decides it is time to do so.
if not self._running: # no need to delay updating the scheduler
self._update_modules()
def _update_modules(self):
if not self._new_modules:
return
prev_keys = set(self._modules.keys())
modules = {module.name: module for module in self._new_modules}
keys = set(modules.keys())
added = keys - prev_keys
deleted = prev_keys - keys
if deleted:
logger.info("Scheduler deleted modules %s", deleted)
for mid in deleted:
self._modules[mid].ending()
self._modules = modules
if not(deleted or added):
logger.info("Scheduler updated with no new module(s)")
self._dependencies = self._new_dependencies
self._new_dependencies = None
self._reachability = self._new_reachability
self._new_reachability = None
logger.info("New dependencies: %s", self._dependencies)
for mid, slots in self._dependencies.items():
modules[mid].reconnect(slots)
if added:
logger.info("Scheduler adding modules %s", added)
for mid in added:
modules[mid].starting()
self._new_modules = None
self._run_list = []
self._runorder = self._new_runorder
self._new_runorder = None
logger.info("New modules order: %s", self._runorder)
for i, mid in enumerate(self._runorder):
module = self._modules[mid]
self._run_list.append(module)
module.order = i
def _end_of_modules(self, first_run):
# Reset interaction mode
#self._proc_interaction_opts()
self._selection_target_time = -1
new_list = [m for m in self._run_list if not m.is_terminated()]
self._run_list = new_list
if first_run == self._run_number: # no module ready
has_run = False
for proc in self._idle_procs:
# pylint: disable=broad-except
try:
logger.debug('Running idle proc')
proc(self, self._run_number)
has_run = True
except Exception as exc:
logger.error(exc)
if not has_run:
logger.info('sleeping %f', 0.2)
time.sleep(0.2)
self._run_index = 0
async def idle_proc_runner(self):
has_run = False
for proc in self._idle_procs:
# pylint: disable=broad-except
try:
logger.debug('Running idle proc')
if aio.iscoroutinefunction(proc):
await proc(self, self._run_number)
else:
proc(self, self._run_number)
has_run = True
except Exception as exc:
logger.error(exc)
if not has_run:
logger.info('sleeping %f', 0.2)
await aio.sleep(0.2)
async def _run_tick_procs(self):
# pylint: disable=broad-except
for proc in self._tick_procs:
logger.debug('Calling tick_proc')
try:
if aio.iscoroutinefunction(proc):
await proc(self, self._run_number)
else:
proc(self, self._run_number)
except Exception as exc:
logger.warning(exc)
for proc in self._tick_once_procs:
try:
if aio.iscoroutinefunction(proc):
await proc()
else:
proc(self, self._run_number)
except Exception as exc:
logger.warning(exc)
self._tick_once_procs = []
async def stop(self):
"Stop the execution."
if self.shortcut_evt is not None:
self.shortcut_evt.set()
async with self._hibernate_cond:
self._keep_running = KEEP_RUNNING
self._hibernate_cond.notify()
self._stopped = True
def task_stop(self):
if self.is_running():
return aio.create_task(self.stop())
def is_running(self):
"Return True if the scheduler is currently running."
return self._running
def is_stopped(self):
"Return True if the scheduler is stopped."
return self._stopped
def is_terminated(self):
"Return True if the scheduler is terminated."
for module in self.modules().values():
if not module.is_terminated():
return False
return True
def done(self):
self._task = None
logger.info('Task finished')
def __len__(self):
return len(self._modules)
def exists(self, moduleid):
"Return True if the moduleid exists in this scheduler."
return moduleid in self
def modules(self):
"Return the dictionary of modules."
return self._modules
def __getitem__(self, mid):
if self.dataflow:
return self.dataflow[mid]
return self._modules.get(mid, None)
def __delitem__(self, name):
if self.dataflow:
del self.dataflow[name]
else:
raise ProgressiveError('Cannot delete module %s'
'outside a context' % name)
def __contains__(self, name):
if self.dataflow:
return name in self.dataflow
return name in self._modules
def run_number(self):
"Return the last run number."
return self._run_number
async def for_input(self, module):
"""
Notify this scheduler that the module has received input
that should be served fast.
"""
async with self._hibernate_cond:
self._keep_running = KEEP_RUNNING
self._hibernate_cond.notify()
sel = self._reachability.get(module.name, False)
if sel:
if not self._module_selection:
logger.info('Starting input management')
self._module_selection = set(sel)
self._selection_target_time = (self.timer() +
self.interaction_latency)
else:
self._module_selection.update(sel)
logger.debug('Input selection for module: %s',
self._module_selection)
self.shortcut_evt.set()
return self.run_number()+1
def has_input(self):
"Return True of the scheduler is in input mode"
if self._module_selection is None:
return False
if not self._module_selection: # empty, cleanup
logger.info('Finishing input management')
self._module_selection = None
self._selection_target_time = -1
return False
return True
def _consider_module(self, module):
# FIxME For now, accept all modules in input management
if not self.has_input():
return True
if module.name in self._module_selection:
# self._module_selection.remove(module.name)
logger.debug('Module %s ready for scheduling', module.name)
return True
logger.debug('Module %s NOT ready for scheduling', module.name)
return False
def time_left(self):
"Return the time left to run for this slot."
if self._selection_target_time <= 0 and not self.has_input():
logger.error('time_left called with no target time')
return 0
return max(0, self._selection_target_time - self.timer())
def fix_quantum(self, module, quantum):
"Fix the quantum of the specified module"
if self.has_input() and module.name in self._module_selection:
quantum = self.time_left() / len(self._module_selection)
if quantum == 0:
quantum = 0.1
logger.info('Quantum is 0 in %s, setting it to'
' a reasonable value', module.name)
return quantum
def close_all(self):
"Close all the resources associated with this scheduler."
for mod in self.modules().values():
# pylint: disable=protected-access
if (hasattr(mod, '_table') and
mod._table is not None and
mod._table.storagegroup is not None):
mod._table.storagegroup.close_all()
if (hasattr(mod, '_params') and
mod._params is not None and
mod._params.storagegroup is not None):
mod._params.storagegroup.close_all()
if (hasattr(mod, 'storagegroup') and
mod.storagegroup is not None):
mod.storagegroup.close_all()
@staticmethod
def _module_order(x, y):
if 'order' in x:
if 'order' in y:
return x['order']-y['order']
return 1
if 'order' in y:
return -1
return 0
if Scheduler.default is None:
Scheduler.default = Scheduler()
```
#### File: progressivis/core/tracer_base.py
```python
from abc import ABCMeta, abstractmethod
class Tracer(metaclass=ABCMeta):
default = None
@abstractmethod
def start_run(self, ts, run_number, **kwds):
pass
@abstractmethod
def end_run(self, ts, run_number, **kwds):
pass
@abstractmethod
def run_stopped(self, ts, run_number, **kwds):
pass
@abstractmethod
def before_run_step(self, ts, run_number, **kwds):
pass
@abstractmethod
def after_run_step(self, ts, run_number, **kwds):
pass
@abstractmethod
def exception(self, ts, run_number, **kwds):
pass
@abstractmethod
def terminated(self, ts, run_number, **kwds):
pass
@abstractmethod
def trace_stats(self, max_runs=None):
_ = max_runs # keeps pylint mute
return []
```
#### File: progressivis/datasets/random.py
```python
import numpy as np
import csv
import os
import os.path
# filename='data/bigfile.csv'
# rows = 1000000
# cols = 30
def generate_random_csv(filename, rows, cols, seed=1234):
if os.path.exists(filename):
return filename
try:
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile)
np.random.seed(seed=seed)
for _ in range(0,rows):
row=list(np.random.normal(loc=0.5, scale=0.8, size=cols))
writer.writerow(row)
except (KeyboardInterrupt, SystemExit):
os.remove(filename)
raise
return filename
def generate_random_multivariate_normal_csv(filename, rows, seed=1234, header=None, reset=False):
"""
Adapted from: https://github.com/e-/PANENE/blob/master/examples/kernel_density/online.py
Author: <NAME>
Date: February 2019
"""
if isinstance(filename, str) and os.path.exists(filename) and not reset:
return filename
def mv(n, mean, cov):
return np.random.multivariate_normal(mean, cov, size=(n)).astype(np.float32)
N = rows//3
X = np.concatenate((
mv(N, [0.1, 0.3], [[0.01, 0], [0, 0.09]]),
mv(N, [0.7, 0.5], [[0.04, 0], [0, 0.01]]),
mv(N, [-0.4, -0.3], [[0.09, 0.04], [0.04, 0.02]])
), axis=0)
np.random.shuffle(X)
kw = {} if header is None else dict(header=header, comments='')
np.savetxt(filename, X, delimiter=',', **kw)
return filename
```
#### File: progressivis/io/vec_loader.py
```python
import numpy as np
import re
import logging
logger = logging.getLogger(__name__)
from bz2 import BZ2File
from gzip import GzipFile
from progressivis.table.table import Table
from progressivis.table.module import TableModule
from sklearn.feature_extraction import DictVectorizer
PATTERN = re.compile(r'\(([0-9]+),([-+.0-9]+)\)[ ]*')
def vec_loader(filename, dtype=np.float64):
"""Loads a tf-idf file in .vec format (or .vec.bz2).
Loads a file and returns a scipy sparse matrix of document features.
>>> from progressivis.datasets import get_dataset
>>> mat,features=vec_loader(get_dataset('warlogs'))
>>> mat.shape
(3077, 4337)
"""
openf=open
if filename.endswith('.bz2'):
openf=BZ2File
elif filename.endswith('.gz') or filename.endswith('.Z'):
openf=GzipFile
with openf(filename) as f:
dataset = []
for d in f:
doc = {}
for match in re.finditer(PATTERN, d.decode('ascii', errors='ignore')):
termidx = int(match.group(1))
termfrx = dtype(match.group(2))
doc[termidx] = termfrx
if len(doc)!=0:
dataset.append(doc)
dictionary = DictVectorizer(dtype=dtype,sparse=True)
return (dictionary.fit_transform(dataset), dictionary.get_feature_names())
class VECLoader(TableModule):
def __init__(self, filename, dtype=np.float64, **kwds):
super(VECLoader, self).__init__(**kwds)
self._dtype = dtype
self.default_step_size = kwds.get('chunksize', 10) # initial guess
openf=open
if filename.endswith('.bz2'):
openf=BZ2File
elif filename.endswith('.gz') or filename.endswith('.Z'):
openf=GzipFile
self.f = openf(filename)
# When created with a specified chunksize, it returns the parser
self._rows_read = 0
self._csr_matrix = None
self.result = Table(self.generate_table_name('documents'),
dshape="{document: var * float32}",
fillvalues={'document': 0},
storagegroup=self.storagegroup)
def rows_read(self):
return self._rows_read
def toarray(self):
if self._csr_matrix is None:
docs = self.table()['document']
dv=DictVectorizer()
#TODO: race condition when using threads, cleanup_run can reset between
#setting the value here and returning it at the next instruction
self._csr_matrix = dv.fit_transform(docs)
return self._csr_matrix
def cleanup_run(self, run_number):
self._csr_matrix = None
super(VECLoader, self).cleanup_run(run_number)
def run_step(self,run_number,step_size, howlong):
if step_size==0: # bug
logger.error('Received a step_size of 0')
#return self._return_run_step(self.state_ready, steps_run=0, creates=0)
print('step_size %d'%step_size)
if self.f is None:
raise StopIteration()
dataset = []
dims = 0
try:
while len(dataset) < step_size:
line = next(self.f)
line=line.rstrip(b'\n\r')
if len(line)==0:
continue
doc = {}
for match in re.finditer(PATTERN, line):
termidx = int(match.group(1))
termfrx = self._dtype(match.group(2))
doc[termidx] = termfrx
dims = max(dims, termidx)
if len(doc)!=0:
dataset.append(doc)
except StopIteration:
self.f.close()
self.f = None
creates = len(dataset)
if creates==0:
raise StopIteration()
dims += 1
documents = self.result['document']
if self._rows_read == 0:
documents.set_shape((dims,))
else:
current_dims = documents.shape[1]
if current_dims < dims:
documents.set_shape((dims,))
else:
dims = current_dims
self.result.resize(self._rows_read+creates)
tmp = np.zeros(dims, dtype=np.float)
i = self._rows_read
#with self.lock:
if True:
for row in dataset:
tmp[:] = 0
for (col, val) in row.items():
tmp[col] = val
documents[i] = tmp
i += 1
self._rows_read += creates
return self._return_run_step(self.state_ready, steps_run=creates, creates=creates)
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: progressivis/linalg/linear_map.py
```python
import numpy as np
from ..core.utils import indices_len, fix_loc, filter_cols
from ..table.module import TableModule
from ..table.table import Table
from ..table.dshape import dshape_projection
from ..core.decorators import process_slot, run_if_any
from .. import SlotDescriptor
class LinearMap(TableModule):
inputs = [SlotDescriptor('vectors', type=Table, required=True),
SlotDescriptor('transformation', type=Table, required=True)]
def __init__(self, transf_columns=None, **kwds):
super().__init__(**kwds)
self._k_dim = len(self._columns) if self._columns else None
self._transf_columns = transf_columns
self._kwds = {} # self._filter_kwds(kwds, ufunc)
self._transf_cache = None
def reset(self):
if self.result is not None:
self.result.resize(0)
self._transf_cache = None
@process_slot("vectors", "transformation", reset_cb="reset")
@run_if_any
def run_step(self, run_number, step_size, howlong):
"""
vectors: (n, k)
transf: (k, m)
result: (n, m)
"""
with self.context as ctx:
vectors = ctx.vectors.data()
if not self._k_dim:
self._k_dim = len(vectors.columns)
trans = ctx.transformation
transformation = trans.data()
trans.clear_buffers()
if len(transformation) < self._k_dim:
if trans.output_module.state <= self.state_blocked:
return self._return_run_step(self.state_blocked,
steps_run=0)
else: # transformation.output_module is zombie etc.=> no hope
raise ValueError("vectors size don't match "
"the transformation matrix shape")
elif len(transformation) > self._k_dim:
raise ValueError("vectors size don't match "
" the transformation matrix shape (2)")
# here len(transformation) == self._k_dim
if self._transf_cache is None:
tf = filter_cols(transformation, self._transf_columns)
self._transf_cache = tf.to_array()
indices = ctx.vectors.created.next(step_size) # returns a slice
steps = indices_len(indices)
if steps == 0:
return self._return_run_step(self.state_blocked, steps_run=0)
vs = self.filter_columns(vectors, fix_loc(indices))
vs = vs.to_array()
res = np.matmul(vs, self._transf_cache)
if self.result is None:
dshape_ = dshape_projection(transformation,
self._transf_columns)
self.result = Table(self.generate_table_name('linear_map'),
dshape=dshape_, create=True)
self.result.append(res)
return self._return_run_step(self.next_state(ctx.vectors),
steps_run=steps)
```
#### File: progressivis/linalg/nexpr.py
```python
import numpy as np
from ..core.utils import indices_len, fix_loc, filter_cols
from ..table.module import TableModule
from ..table.table import Table
import numexpr as ne
def make_local(df, px):
arr = df.to_array()
result = {}
class _Aux: pass
aux = _Aux()
for i, n in enumerate(df.columns):
key = f'_{px}__{i}'
result[key] = arr[:, i]
setattr(aux, n, key)
return aux, result
class NumExprABC(TableModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ref_expr = self.expr
def reset(self):
if self.result is not None:
self.result.resize(0)
def run_step(self, run_number, step_size, howlong):
"""
"""
reset_all = False
for slot in self._input_slots.values():
if slot is None:
continue
if slot.updated.any() or slot.deleted.any():
reset_all = True
break
if reset_all:
for slot in self._input_slots.values():
slot.reset()
slot.update(run_number)
self.reset()
for slot in self._input_slots.values():
if slot is None:
continue
if not slot.has_buffered():
return self._return_run_step(self.state_blocked, steps_run=0)
if self.result is None:
if self.has_output_datashape("result"):
dshape_ = self.get_output_datashape("result")
else:
dshape_ = self.get_datashape_from_expr()
self.ref_expr = {k.split(":")[0]:v for (k, v) in self.expr.items()}
self.result = Table(self.generate_table_name(f'num_expr'),
dshape=dshape_, create=True)
local_env = {}
vars_dict = {}
for n, sl in self._input_slots.items():
if n == '_params':
continue
step_size = min(step_size, sl.created.length())
data_ = sl.data()
if step_size == 0 or data_ is None:
return self._return_run_step(self.state_blocked, steps_run=0)
first_slot = None
for n, sl in self._input_slots.items():
if n == '_params':
continue
if first_slot is None:
first_slot = sl
indices = sl.created.next(step_size)
df = self.filter_columns(sl.data(), fix_loc(indices), n)
fobj, dict_ = make_local(df, n)
local_env.update(dict_)
vars_dict[n] = fobj
result = {}
steps = None
for c in self.result.columns:
col_expr_ = self.ref_expr[c]
col_expr_ = col_expr_.format(**vars_dict)
result[c] = ne.evaluate(col_expr_, local_dict=local_env)
if steps is None:
steps = len(result[c])
self.result.append(result)
return self._return_run_step(self.next_state(first_slot), steps_run=steps)
```
#### File: progressivis/stats/histogram1d.py
```python
from ..core.utils import indices_len, fix_loc, integer_types
from ..core.slot import SlotDescriptor
from ..table.module import TableModule
from ..table.table import Table
from ..utils.psdict import PsDict
from ..core.decorators import *
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Histogram1D(TableModule):
"""
"""
parameters = [('bins', np.dtype(int), 128),
('delta', np.dtype(float), -5)] # 5%
inputs = [
SlotDescriptor('table', type=Table, required=True),
SlotDescriptor('min', type=PsDict, required=True),
SlotDescriptor('max', type=PsDict, required=True)
]
schema = "{ array: var * int32, min: float64, max: float64, time: int64 }"
def __init__(self, column, **kwds):
super(Histogram1D, self).__init__(dataframe_slot='table', **kwds)
self.column = column
self.total_read = 0
self.default_step_size = 1000
self._histo = None
self._edges = None
self._bounds = None
self._h_cnt = 0
self.result = Table(self.generate_table_name('Histogram1D'),
dshape=Histogram1D.schema,
chunks={'array': (16384, 128)},
create=True)
def reset(self):
self._histo = None
self._edges = None
self._bounds = None
self.total_read = 0
self._h_cnt = 0
if self.result:
self.result.resize(0)
def is_ready(self):
if self._bounds and self.get_input_slot('table').created.any():
return True
return super(Histogram1D, self).is_ready()
@process_slot("table", reset_cb="reset")
@process_slot("min", "max", reset_if=False)
@run_if_any
def run_step(self, run_number, step_size, howlong):
with self.context as ctx:
dfslot = ctx.table
min_slot = ctx.min
max_slot = ctx.max
if not (dfslot.created.any() or min_slot.has_buffered() or max_slot.has_buffered()):
logger.info('Input buffers empty')
return self._return_run_step(self.state_blocked, steps_run=0)
min_slot.clear_buffers()
max_slot.clear_buffers()
bounds = self.get_bounds(min_slot, max_slot)
if bounds is None:
logger.debug('No bounds yet at run %d', run_number)
return self._return_run_step(self.state_blocked, steps_run=0)
bound_min, bound_max = bounds
if self._bounds is None:
delta = self.get_delta(*bounds)
self._bounds = (bound_min - delta, bound_max + delta)
logger.info("New bounds at run %d: %s", run_number, self._bounds)
else:
(old_min, old_max) = self._bounds
delta = self.get_delta(*bounds)
if(bound_min < old_min or bound_max > old_max) \
or bound_min > (old_min + delta) or bound_max < (old_max - delta):
self._bounds = (bound_min - delta, bound_max + delta)
logger.info('Updated bounds at run %d: %s', run_number, self._bounds)
dfslot.reset()
dfslot.update(run_number)
min_slot.reset()
min_slot.update(run_number)
max_slot.reset()
max_slot.update(run_number)
self.reset()
return self._return_run_step(self.state_blocked, steps_run=0)
(curr_min, curr_max) = self._bounds
if curr_min >= curr_max:
logger.error('Invalid bounds: %s', self._bounds)
return self._return_run_step(self.state_blocked, steps_run=0)
input_df = dfslot.data()
indices = dfslot.created.next(step_size) # returns a slice or ... ?
steps = indices_len(indices)
logger.info('Read %d rows', steps)
self.total_read += steps
column = input_df[self.column]
column = column.loc[fix_loc(indices)]
bins = self._edges if self._edges is not None else self.params.bins
histo = None
if len(column) > 0:
histo, self._edges = np.histogram(column,
bins=bins,
range=[curr_min, curr_max],
density=False)
self._h_cnt += len(column)
if self._histo is None:
self._histo = histo
elif histo is not None:
self._histo += histo
values = {'array': [self._histo], 'min': [curr_min], 'max': [curr_max], 'time': [run_number]}
self.result['array'].set_shape((self.params.bins,))
self.result.append(values)
return self._return_run_step(self.next_state(dfslot), steps_run=steps)
def get_bounds(self, min_slot, max_slot):
min_df = min_slot.data()
if len(min_df) == 0 and self._bounds is None:
return None
min_ = min_df[self.column]
max_df = max_slot.data()
if len(max_df) == 0 and self._bounds is None:
return None
max_ = max_df[self.column]
return (min_, max_)
def get_delta(self, min_, max_):
delta = self.params['delta']
extent = max_ - min_
if delta < 0:
return extent*delta/-100.0
def get_histogram(self):
min_ = self._bounds[0] if self._bounds else None
max_ = self._bounds[1] if self._bounds else None
edges = self._edges
if edges is None:
edges = []
elif isinstance(edges, integer_types):
edges = [edges]
else:
edges = edges.tolist()
return {"edges": edges,
"values": self._histo.tolist() if self._histo else [],
"min": min_,
"max": max_}
def is_visualization(self):
return True
def get_visualization(self):
return "histogram1d"
def to_json(self, short=False):
json = super(Histogram1D, self).to_json(short)
if short:
return json
return self._hist_to_json(json)
def _hist_to_json(self, json):
json['histogram'] = self.get_histogram()
return json
```
#### File: progressivis/stats/random_table.py
```python
from collections import OrderedDict
import logging
import numpy as np
from ..utils.errors import ProgressiveError, ProgressiveStopIteration
from ..table.module import TableModule
from ..table.table import Table
from ..table.constant import Constant
from ..utils.psdict import PsDict
from ..core.utils import integer_types
logger = logging.getLogger(__name__)
RAND = np.random.rand
class RandomTable(TableModule):
"Random table generator module"
def __init__(self, columns, rows=-1, random=RAND, dtype='float64',throttle=False, **kwds):
super(RandomTable, self).__init__(**kwds)
self.default_step_size = 1000
if isinstance(columns, integer_types):
self.columns = ["_%d"%i for i in range(1, columns+1)]
elif isinstance(columns, (list, np.ndarray)):
self.columns = columns
else:
raise ProgressiveError('Invalid type for columns')
self.rows = rows
self.random = random
if throttle and isinstance(throttle, integer_types+(float,)):
self.throttle = throttle
else:
self.throttle = False
dshape = ", ".join([f"{col}: {dtype}" for col in self.columns])
dshape = "{" + dshape + "}"
self.result = Table(self.generate_table_name('table'),
dshape=dshape,
create=True)
self.columns = self.result.columns
def is_source(self):
return True
def run_step(self, run_number, step_size, howlong):
if step_size == 0:
logger.error('Received a step_size of 0')
return self._return_run_step(self.state_ready, steps_run=0)
logger.info('generating %d lines', step_size)
if self.throttle:
step_size = np.min([self.throttle, step_size])
if self.rows >= 0 and (len(self.result)+step_size) > self.rows:
step_size = self.rows - len(self.result)
logger.info('truncating to %d lines', step_size)
if step_size <= 0:
raise ProgressiveStopIteration
values = OrderedDict()
for column in self.columns:
s = self.random(step_size)
values[column] = s
self.result.append(values)
if len(self.result) == self.rows:
next_state = self.state_zombie
elif self.throttle:
next_state = self.state_blocked
else:
next_state = self.state_ready
return self._return_run_step(next_state, steps_run=step_size)
class RandomDict(Constant):
def __init__(self, columns, **kwds):
keys = [f'_{i}' for i in range(1, columns+1)]
vals = np.random.rand(columns)
super().__init__(PsDict(dict(zip(keys, vals))), **kwds)
```
#### File: progressivis/storage/base.py
```python
from abc import ABCMeta, abstractmethod, abstractproperty
from contextlib import contextmanager
class StorageObject(metaclass=ABCMeta):
@abstractproperty
def name(self):
pass
@abstractproperty
def attrs(self):
pass
@abstractproperty
def __len__(self):
pass
class Attribute(metaclass=ABCMeta):
@abstractmethod
def __getitem__(self, name):
pass
@abstractmethod
def __setitem__(self, name, value):
pass
@abstractmethod
def __delitem__(self, name):
pass
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __iter__(self):
pass
@abstractmethod
def __contains__(self, name):
pass
class DatasetFactory(StorageObject):
@abstractmethod
def create_dataset(self, name, shape=None, dtype=None, data=None, **kwds):
pass
@abstractmethod
def require_dataset(self, name, shape, dtype, exact=False, **kwds):
pass
@abstractmethod
def __getitem__(self, name):
pass
@abstractmethod
def __delitem__(self, name):
pass
@abstractmethod
def __contains__(self, name):
pass
class Group(DatasetFactory):
default = None
default_internal = None
@abstractmethod
def create_dataset(self, name, shape=None, dtype=None, data=None, **kwds):
pass
@abstractmethod
def require_dataset(self, name, shape, dtype, exact=False, **kwds):
pass
@abstractmethod
def require_group(self, name):
pass
@abstractmethod
def __getitem__(self, name):
pass
@abstractmethod
def __delitem__(self, name):
pass
@abstractmethod
def __contains__(self, name):
pass
def close_all():
pass
class Dataset(StorageObject):
@abstractproperty
def shape(self):
pass
@abstractproperty
def dtype(self):
pass
@abstractproperty
def maxshape(self):
pass
@abstractproperty
def fillvalue(self):
pass
@abstractproperty
def chunks(self):
pass
@abstractmethod
def resize(self, size, axis=None):
pass
@abstractproperty
def size(self):
pass
@abstractmethod
def __getitem__(self, args):
pass
@abstractmethod
def __setitem__(self, args, val):
pass
def read_direct(self, dest, source_sel=None, dest_sel=None):
dest[dest_sel] = self[source_sel]
class StorageEngine(Group):
_engines = dict()
default = None
def __init__(self, name, create_dataset_kwds=None):
# print('# creating storage engine %s'% name)
# import pdb; pdb.set_trace()
assert name not in StorageEngine._engines
self._name = name
StorageEngine._engines[name] = self
if StorageEngine.default is None:
StorageEngine.default = self.name
self._create_dataset_kwds = create_dataset_kwds or {}
@staticmethod
@contextmanager
def default_engine(engine):
if engine not in StorageEngine._engines:
raise ValueError('Unknown storage engine %s', engine)
saved = StorageEngine.default
try:
StorageEngine.default = engine
yield saved
finally:
StorageEngine.default = saved
@property
def create_dataset_kwds(self):
return self._create_dataset_kwds
@property
def name(self):
return self._name
def open(self, name, flags, **kwds):
pass
def close(self, name, flags, **kwds):
pass
def flush(self):
pass
@staticmethod
def lookup(engine):
default=StorageEngine._engines.get(StorageEngine.default)
return StorageEngine._engines.get(engine, default)
@staticmethod
def engines():
return StorageEngine._engines
```
#### File: progressivis/table/hist_index.py
```python
import operator
import logging
import numpy as np
from progressivis.core.bitmap import bitmap
from progressivis.core.slot import SlotDescriptor
from progressivis.core.utils import (slice_to_arange, fix_loc)
from .module import TableModule
from . import Table
from ..core.utils import indices_len
from . import TableSelectedView
APPROX = False
logger = logging.getLogger(__name__)
class _HistogramIndexImpl(object):
"Implementation part of Histogram Index"
# pylint: disable=too-many-instance-attributes
def __init__(self, column, table, e_min, e_max, nb_bin):
self.column = table[column]
self.e_min = e_min
self.e_max = e_max
self.bitmaps = None
self.bins = None
self._sampling_size = 1000
self._perm_deviation = 0.1 # permitted deviation
self._divide_threshold = 1000 # TODO: make it settable!
self._divide_coef = 5 # TODO: make it settable!
self._merge_threshold = 1000 # TODO: make it settable!
self._merge_coef = 5 # TODO: make it settable!
self._min_hist_size = 64
self._max_hist_size = 256
self._init_histogram(e_min, e_max, nb_bin)
self.update_histogram(created=table.index)
def _init_histogram(self, e_min, e_max, nb_bin):
self.bins = np.linspace(e_min, e_max, nb_bin, endpoint=True)
assert len(self.bins) == nb_bin
self.bitmaps = [bitmap() for _ in range(nb_bin+1)]
def _needs_division(self, size):
len_c = len(self.column)
len_b = len(self.bins)
mean = float(len_c)/len_b
if size < self._divide_threshold:
return False
return size > self._divide_coef*mean
# def __reshape__still_inconclusive_variant(self, i):
# "Change the bounds of the index if needed"
# prev_ = sum([len(bm) for bm in self.bitmaps[:i]])
# p_ = (prev_ + len(self.bitmaps[i])/2.0)/len(self.column) * 100
# v = self._tdigest.percentile(p_)
# try:
# assert self.bins[i-1] < v < self.bins[i]
# except:
# import pdb;pdb.set_trace()
# ids = np.array(self.bitmaps[i], np.int64)
# values = self.column.loc[ids]
# lower_bin = bitmap(ids[values < v])
# upper_bin = self.bitmaps[i] - lower_bin
# np.insert(self.bins, i, v)
# self.bitmaps.insert(i, lower_bin)
# self.bitmaps[i] = upper_bin
def show_histogram(self):
"Print the histogram on the display"
for i, bm in enumerate(self.bitmaps):
print(i, len(bm), "="*len(bm))
def find_bin(self, elt):
res = []
for i, bm in enumerate(self.bitmaps):
if elt in bm:
res.append(i)
return res # if len(res)>1: BUG
def _is_merging_required(self):
return len(self.bitmaps) > self._max_hist_size
def _is_mergeable_pair(self, bm1, bm2, merge_cnt):
if len(self.bitmaps) - merge_cnt < self._min_hist_size:
return False
len_c = len(self.column)
len_b = len(self.bins)
mean = float(len_c)/len_b
return len(bm1)+len(bm2) < max(self._merge_coef*mean,
self._merge_threshold)
def merge_once(self):
assert len(self.bins)+1 == len(self.bitmaps), "unexpected # of bins"
bins_1 = list(self.bins)
bins_1.append(None)
bin_tuples = list(zip(self.bitmaps, bins_1))
merge_cnt = 0
if len(bin_tuples) <= 2:
return merge_cnt
merged_tuples = []
prev_bm, prev_sep = bin_tuples[0]
for bm, sep in bin_tuples[1:]:
if self._is_mergeable_pair(prev_bm, bm, merge_cnt):
prev_bm = prev_bm | bm
prev_sep = sep
merge_cnt += 1
else:
merged_tuples.append((prev_bm, prev_sep))
prev_bm, prev_sep = bm, sep
assert prev_sep is None
merged_bitmaps, merged_bins = zip(*merged_tuples)
merged_bitmaps = list(merged_bitmaps) + [prev_bm]
self.bins = np.array(merged_bins)
self.bitmaps = merged_bitmaps
return merge_cnt
def reshape(self):
for i, bm in enumerate(self.bitmaps):
if self._needs_division(len(bm)):
self.divide_bin(i)
if self._is_merging_required():
self.merge_once()
def divide_bin(self, i):
"Change the bounds of the index if needed"
ids = np.array(self.bitmaps[i], np.int64)
if self._sampling_size*1.2 < len(ids):
samples = np.random.choice(ids, self._sampling_size, replace=False)
else:
samples = ids
s_vals = self.column.loc[samples]
v = np.median(s_vals)
if i >= len(self.bins):
assert self.bins[i-1] < v
else:
assert (self.bins[i-1] < v < self.bins[i]
if i > 0 else v < self.bins[i])
values = self.column.loc[ids]
lower_bin = bitmap(ids[values < v])
upper_bin = self.bitmaps[i] - lower_bin
lower_len = len(lower_bin)
upper_len = len(upper_bin)
t = len(ids)*self._perm_deviation
if abs(lower_len - upper_len) > t:
print("DIFF: ", lower_len, upper_len,
float(abs(lower_len - upper_len))/len(ids))
self.bins = np.insert(self.bins, i, v)
if i+1 >= len(self.bins):
assert self.bins[i-1] < self.bins[i]
else:
assert (self.bins[i-1] < self.bins[i] < self.bins[i+1] if i > 0
else self.bins[i] < self.bins[i+1])
self.bitmaps.insert(i, lower_bin)
self.bitmaps[i+1] = upper_bin
print('*', end='')
def _get_bin(self, val):
i = np.digitize(val, self.bins)
return self.bitmaps[int(i)]
def update_histogram(self, created, updated=(), deleted=()):
"Update the histogram index"
created = bitmap.asbitmap(created)
updated = bitmap.asbitmap(updated)
deleted = bitmap.asbitmap(deleted)
# if deleted:
# self._tdigest_is_valid = False
if deleted or updated:
to_remove = updated | deleted
for i, bm in enumerate(self.bitmaps):
self.bitmaps[i] = bm - to_remove
if created or updated:
to_add = created | updated
ids = np.array(to_add, np.int64)
values = self.column.loc[to_add]
bins = np.digitize(values, self.bins)
counts = np.bincount(bins)
for i in np.nonzero(counts)[0]:
bm = self.bitmaps[i]
selection = (bins == i) # boolean mask of values in bin i
bm.update(ids[selection]) # add them to the bitmap
def query(self, operator_, limit, approximate=APPROX): # blocking...
"""
Return the list of rows matching the query.
For example, returning all values less than 10 (< 10) would be
`query(operator.__lt__, 10)`
"""
pos = np.digitize(limit, self.bins)
detail = bitmap()
if not approximate:
ids = np.array(self.bitmaps[pos], np.int64)
values = self.column.loc[ids]
selected = ids[operator_(values, limit)]
detail.update(selected)
if operator_ in (operator.lt, operator.le):
for bm in self.bitmaps[:pos]:
detail.update(bm)
else:
for bm in self.bitmaps[pos + 1:]:
detail.update(bm)
return detail
def restricted_query(self, operator_, limit, only_locs,
approximate=APPROX): # blocking...
"""
Returns the subset of only_locs matching the query.
"""
only_locs = bitmap.asbitmap(only_locs)
pos = np.digitize(limit, self.bins)
detail = bitmap()
if not approximate:
ids = np.array(self.bitmaps[pos] & only_locs, np.int64)
values = self.column.loc[ids]
selected = ids[operator_(values, limit)]
detail.update(selected)
if operator_ in (operator.lt, operator.le):
for bm in self.bitmaps[:pos]:
detail.update(bm & only_locs)
else:
for bm in self.bitmaps[pos + 1:]:
detail.update(bm & only_locs)
return detail
def range_query(self, lower, upper, approximate=APPROX):
"""
Return the bitmap of all rows with values in range [`lower`, `upper`[
"""
if lower > upper:
lower, upper = upper, lower
pos = np.digitize([lower, upper], self.bins)
detail = bitmap()
if not approximate:
ids = np.array(self.bitmaps[pos[0]], np.int64)
values = self.column.loc[ids]
if pos[0] == pos[1]:
selected = ids[(lower <= values) & (values < upper)]
else:
selected = ids[lower <= values]
detail.update(selected)
ids = np.array(self.bitmaps[pos[1]], np.int64)
values = self.column.loc[ids]
selected = ids[values < upper]
detail.update(selected)
for bm in self.bitmaps[pos[0] + 1:pos[1]]:
detail.update(bm)
return detail
def range_query_aslist(self, lower, upper, approximate=APPROX):
"""
Return the list of bitmaps with values in range [`lower`, `upper`[
"""
if lower > upper:
lower, upper = upper, lower
pos = np.digitize([lower, upper], self.bins)
detail = bitmap()
res = self.bitmaps[pos[0] + 1:pos[1]]
if not approximate:
ids = np.array(self.bitmaps[pos[0]], np.int64)
values = self.column.loc[ids]
if pos[0] == pos[1]:
selected = ids[(lower <= values) & (values < upper)]
else:
selected = ids[lower <= values]
detail.update(selected)
ids = np.array(self.bitmaps[pos[1]], np.int64)
values = self.column.loc[ids]
selected = ids[values < upper]
detail.update(selected)
res.append(detail)
return res
def restricted_range_query(self, lower, upper, only_locs,
approximate=APPROX):
"""
Return the bitmap of only_locs rows in range [`lower`, `upper`[
"""
if lower > upper:
lower, upper = upper, lower
only_locs = bitmap.asbitmap(only_locs)
pos = np.digitize([lower, upper], self.bins)
detail = bitmap()
if not approximate:
ids = np.array(self.bitmaps[pos[0]] & only_locs, np.int64)
values = self.column.loc[ids]
if pos[0] == pos[1]:
selected = ids[(lower <= values) & (values < upper)]
else:
selected = ids[lower <= values]
detail.update(selected)
ids = np.array(self.bitmaps[pos[1]] & only_locs, np.int64)
values = self.column.loc[ids]
selected = ids[values < upper]
detail.update(selected)
for bm in self.bitmaps[pos[0] + 1:pos[1]]:
detail.update(bm & only_locs)
return detail
def get_min_bin(self):
if self.bitmaps is None:
return None
for bm in self.bitmaps:
if bm:
return bm
return None
def get_max_bin(self):
if self.bitmaps is None:
return None
for bm in reversed(self.bitmaps):
if bm:
return bm
return None
class HistogramIndex(TableModule):
"""
Compute and maintain an histogram index
"""
parameters = [
('bins', np.dtype(int), 126), # actually 128 with "-inf" and "inf"
('init_threshold', int, 1),
]
inputs = [SlotDescriptor('table', type=Table, required=True)]
outputs = [
SlotDescriptor('min_out', type=Table, required=False),
SlotDescriptor('max_out', type=Table, required=False)
]
def __init__(self, column, **kwds):
super(HistogramIndex, self).__init__(**kwds)
self.column = column
self._impl = None # will be created when the init_threshold is reached
self.selection = bitmap() # will be filled when the table is read
# so realistic initial values for min and max were available
self.input_module = None
self.input_slot = None
self._input_table = None
self._min_table = None
self._max_table = None
def compute_bounds(self, input_table):
values = input_table[self.column]
return values.min(), values.max()
def get_data(self, name):
if name in ('min_out', 'max_out'):
if self._impl is None:
return None
if self._input_table is None:
return None
if name == 'min_out':
if self.get_min_bin() is None:
return None
if self._min_table is None:
self._min_table = TableSelectedView(self._input_table,
bitmap([]))
self._min_table.selection = self.get_min_bin()
return self._min_table
if name == 'max_out':
if self.get_max_bin() is None:
return None
if self._max_table is None:
self._max_table = TableSelectedView(self._input_table,bitmap([]))
self._max_table.selection = self.get_max_bin()
return self._max_table
return super(HistogramIndex, self).get_data(name)
def get_min_bin(self):
if self._impl is None:
return None
return self._impl.get_min_bin()
def get_max_bin(self):
if self._impl is None:
return None
return self._impl.get_max_bin()
def run_step(self, run_number, step_size, howlong):
input_slot = self.get_input_slot('table')
# input_slot.update(run_number)
steps = 0
input_table = input_slot.data()
self._input_table = input_table
if input_table is None \
or len(input_table) < self.params.init_threshold:
# there are not enough rows. it's not worth building an index yet
return self._return_run_step(self.state_blocked, steps_run=0)
if self._impl is None:
input_slot.reset()
input_slot.update(run_number)
input_slot.clear_buffers()
bound_min, bound_max = self.compute_bounds(input_table)
self._impl = _HistogramIndexImpl(self.column,
input_table,
bound_min, bound_max,
self.params.bins)
self.selection = bitmap(input_table.index)
self.result = TableSelectedView(input_table, self.selection)
return self._return_run_step(self.state_blocked,
len(self.selection))
else:
# Many not always, or should the implementation decide?
self._impl.reshape()
deleted = None
if input_slot.deleted.any():
deleted = input_slot.deleted.next(as_slice=False)
# steps += indices_len(deleted) # deleted are constant time
steps = 1
deleted = fix_loc(deleted)
self.selection -= deleted
created = None
if input_slot.created.any():
created = input_slot.created.next(step_size, as_slice=False)
created = fix_loc(created)
steps += indices_len(created)
self.selection |= created
updated = None
if input_slot.updated.any():
updated = input_slot.updated.next(step_size, as_slice=False)
updated = fix_loc(updated)
steps += indices_len(updated)
if steps == 0:
return self._return_run_step(self.state_blocked, steps_run=0)
input_table = input_slot.data()
# self._table = input_table
self._impl.update_histogram(created, updated, deleted)
self.result.selection = self.selection
return self._return_run_step(
self.next_state(input_slot), steps_run=steps)
def _eval_to_ids(self, operator_, limit, input_ids=None):
input_slot = self.get_input_slot('table')
table_ = input_slot.data()
if input_ids is None:
input_ids = table_.index
else:
input_ids = fix_loc(input_ids)
x = table_[self.column].loc[input_ids]
mask_ = operator_(x, limit)
arr = slice_to_arange(input_ids)
return bitmap(arr[np.nonzero(mask_)[0]])
def query(self, operator_, limit, approximate=APPROX):
"""
Return the list of rows matching the query.
For example, returning all values less than 10 (< 10) would be
`query(operator.__lt__, 10)`
"""
if self._impl:
return self._impl.query(operator_, limit, approximate)
# there are no histogram because init_threshold wasn't be reached yet
# so we query the input table directly
return self._eval_to_ids(operator_, limit)
def restricted_query(self, operator_, limit, only_locs, approximate=APPROX):
"""
Return the list of rows matching the query.
For example, returning all values less than 10 (< 10) would be
`query(operator.__lt__, 10)`
"""
if self._impl:
return self._impl.restricted_query(operator_, limit, only_locs,
approximate)
# there are no histogram because init_threshold wasn't be reached yet
# so we query the input table directly
return self._eval_to_ids(operator_, limit, only_locs)
def range_query_aslist(self, lower, upper, approximate=APPROX):
"""
Return the list of rows with values in range [`lower`, `upper`[
"""
if self._impl:
return self._impl.range_query_aslist(lower, upper, approximate)
return None
def range_query(self, lower, upper, approximate=APPROX):
"""
Return the list of rows with values in range [`lower`, `upper`[
"""
if self._impl:
return self._impl.range_query(lower, upper, approximate)
# there are no histogram because init_threshold wasn't be reached yet
# so we query the input table directly
return (self._eval_to_ids(operator.__lt__, upper) & # optimize later
self._eval_to_ids(operator.__ge__, lower))
def restricted_range_query(self, lower, upper, only_locs,
approximate=APPROX):
"""
Return the list of rows with values in range [`lower`, `upper`[
among only_locs
"""
if self._impl:
return self._impl.restricted_range_query(lower, upper, only_locs,
approximate)
# there are no histogram because init_threshold wasn't be reached yet
# so we query the input table directly
return (self._eval_to_ids(operator.__lt__, upper, only_locs) &
# optimize later
self._eval_to_ids(operator.__ge__, lower, only_locs))
def create_dependent_modules(self, input_module, input_slot, **kwds):
self.input_module = input_module
self.input_slot = input_slot
hist_index = self
hist_index.input.table = input_module.output[input_slot]
# hist_index.input.min = min_.output.result
# hist_index.input.max = max_.output.result
return hist_index
```
#### File: progressivis/table/liteselect.py
```python
from ..core.utils import indices_len
from ..core.slot import SlotDescriptor
from .module import TableModule
from .table import Table
from ..core.bitmap import bitmap
from . import TableSelectedView
import logging
logger = logging.getLogger(__name__)
class LiteSelect(TableModule):
inputs = [SlotDescriptor('table', type=Table, required=True),
SlotDescriptor('select', type=bitmap, required=True)]
def __init__(self, **kwds):
super(LiteSelect, self).__init__(**kwds)
self.default_step_size = 1000
def run_step(self, run_number, step_size, howlong):
step_size = 1000
table_slot = self.get_input_slot('table')
table = table_slot.data()
# table_slot.update(run_number,
# buffer_created=False,
# buffer_updated=True,
# buffer_deleted=False,
# manage_columns=False)
select_slot = self.get_input_slot('select')
# select_slot.update(run_number,
# buffer_created=True,
# buffer_updated=False,
# buffer_deleted=True)
steps = 0
if self._table is None:
self._table = TableSelectedView(table, bitmap([]))
if select_slot.deleted.any():
indices = select_slot.deleted.next(step_size, as_slice=False)
s = indices_len(indices)
print("LITESELECT: -", s)
logger.info("deleting %s", indices)
self._table.selection -= bitmap.asbitmap(indices)
# step_size -= s//2
if step_size > 0 and select_slot.created.any():
indices = select_slot.created.next(step_size, as_slice=False)
s = indices_len(indices)
logger.info("creating %s", indices)
steps += s
# step_size -= s
self._table.selection |= bitmap.asbitmap(indices)
return self._return_run_step(self.next_state(select_slot),
steps_run=steps)
```
#### File: progressivis/table/merge_dict.py
```python
from progressivis.core.utils import Dialog
from progressivis.core.slot import SlotDescriptor
from ..table.module import TableModule
from ..utils.psdict import PsDict
class MergeDict(TableModule):
"""
Binary join module to join two dict and return a third one.
Slots:
first : Table module producing the first dict to join
second : Table module producing the second dict to join
Args:
kwds : argument to pass to the join function
"""
inputs = [SlotDescriptor('first', type=PsDict, required=True),
SlotDescriptor('second', type=PsDict, required=True)]
def __init__(self, **kwds):
super().__init__(**kwds)
# self.join_kwds = self._filter_kwds(kwds, join)
self._dialog = Dialog(self)
def run_step(self, run_number, step_size, howlong):
first_slot = self.get_input_slot('first')
# first_slot.update(run_number)
second_slot = self.get_input_slot('second')
first_dict = first_slot.data()
second_dict = second_slot.data()
if first_dict is None or second_dict is None:
return self._return_run_step(self.state_blocked, steps_run=0)
# second_slot.update(run_number)
first_slot.created.next()
second_slot.created.next()
first_slot.updated.next()
second_slot.updated.next()
first_slot.deleted.next()
second_slot.deleted.next()
if self.result is None:
self.result = PsDict(**first_dict, **second_dict)
else:
self.result.update(first_dict)
self.result.update(second_dict)
return self._return_run_step(self.next_state(first_slot), steps_run=1)
```
#### File: progressivis/table/paging_helper.py
```python
from .table import Table
import numpy as np
from progressivis.core.utils import remove_nan
class PagingHelper:
def __init__(self, tbl):
self._table = tbl
self._index = tbl.index
if not isinstance(tbl, Table):
self._index = np.array(list(self._table.index))
elif not tbl.is_identity:
self._index = self._table.index
def get_page(self, start, end):
ret = []
columns = self._table.columns
for i in self._index[start:end]:
row = [i]
for name in columns:
col = self._table[name]
row.append(remove_nan(col.loc[i]))
ret.append(row)
return ret
```
#### File: progressivis/table/paste.py
```python
"Binary Join module."
from progressivis.core.slot import SlotDescriptor
from .table import Table
from .module import TableModule
from .join import join
from .dshape import dshape_join
from collections import OrderedDict
class Paste(TableModule):
"""
Binary join module to join two tables and return a third one.
Slots:
first : Table module producing the first table to join
second : Table module producing the second table to join
Args:
kwds : argument to pass to the join function
"""
inputs = [SlotDescriptor('first', type=Table, required=True),
SlotDescriptor('second', type=Table, required=True)]
def __init__(self, **kwds):
super(Paste, self).__init__(**kwds)
self.join_kwds = self._filter_kwds(kwds, join)
def run_step(self, run_number, step_size, howlong):
first_slot = self.get_input_slot('first')
# first_slot.update(run_number)
second_slot = self.get_input_slot('second')
first_table = first_slot.data()
second_table = second_slot.data()
if first_table is None or second_table is None:
return self._return_run_step(self.state_blocked, steps_run=0)
# second_slot.update(run_number)
if first_slot.deleted.any() or second_slot.deleted.any():
first_slot.reset()
second_slot.reset()
if self.result is not None:
self.result.resize(0)
first_slot.update(run_number)
second_slot.update(run_number)
first_slot.created.next(step_size)
second_slot.created.next(step_size)
first_slot.updated.next(step_size)
second_slot.updated.next(step_size)
if self.result is None:
dshape, rename = dshape_join(first_table.dshape,
second_table.dshape)
self.result = Table(name=None, dshape=dshape)
if len(first_table) == 0 or len(second_table) == 0:
return self._return_run_step(self.state_blocked, steps_run=0)
col_0 = first_table.columns[0]
col_1 = second_table.columns[0]
if len(self.result) == 0:
self.result.append(OrderedDict([(col_0, first_table.last(col_0)),
(col_1, second_table.last(col_1))]),
indices=[0])
else:
assert len(self.result) == 1
if first_table.last(col_0) != self.result.last(col_0):
self.result[col_0].loc[0] = first_table.last(col_0)
if second_table.last(col_1) != self.result.last(col_1):
self.result[col_1].loc[0] = second_table.last(col_1)
return self._return_run_step(self.next_state(first_slot), steps_run=1)
```
#### File: progressivis/table/reduce.py
```python
from .nary import NAry
from progressivis import Scheduler
class Reduce(NAry):
"Reduce binary modules over multiple inputs"
# def __init__(self, binary_module, left_in, right_in, outp, **kwds):
# super(Reduce, self).__init__(**kwds)
# self._binary_module = binary_module
# self._left_in = left_in
# self._right_in = right_in
# self._outp = outp
# self.binary_module_kwds = self._filter_kwds(kwds,
# binary_module.__init__)
# self.out_module = None
# def _expand(self):
# "Expand the Reduce module into several binary modules"
# slots = self.get_input_slot_multiple()
# if len(slots) < 2:
# raise ValueError("Reduce needs at least two unputs")
# prev_slot = slots[0]
# with self.scheduler():
# for slot in slots[1:]:
# bin_mod = self._binary_module(**self.binary_module_kwds)
# bin_mod.input[self._left_in] = prev_slot
# bin_mod.input[self._right_in] = slot
# prev_slot = bin_mod.output[self._outp]
# self.out_module = bin_mod
# return bin_mod
# def run_step(self, run_number, step_size, howlong):
# if self.out_module is None:
# self._expand()
# if self._table is None:
# self._table = self._out_module.table()
# return self._return_run_step(self.state_blocked, steps_run=0)
@staticmethod
def expand(binary_module, left_in, right_in, outp, slots,
**binary_module_kwds):
if len(slots) < 2:
raise ValueError("Reduce needs at least two unputs")
scheduler = binary_module_kwds.get("scheduler")
if scheduler is None:
scheduler = Scheduler.default
binary_module_kwds["scheduler"] = scheduler
prev_slot = slots[0]
with scheduler:
for slot in slots[1:]:
bin_mod = binary_module(**binary_module_kwds)
bin_mod.input[left_in] = prev_slot
bin_mod.input[right_in] = slot
prev_slot = bin_mod.output[outp]
return bin_mod
```
#### File: progressivis/table/tablechanges_base.py
```python
from abc import ABCMeta, abstractmethod
class BaseChanges(metaclass=ABCMeta):
"Base class for object keeping track of changes in a Table"
def __str__(self):
return type(self)
@abstractmethod
def add_created(self, locs):
"Add ids of items created in the Table"
pass
@abstractmethod
def add_updated(self, locs):
"Add ids of items updated in the Table"
pass
@abstractmethod
def add_deleted(self, locs):
"Add ids of items deleted from the Table"
pass
@abstractmethod
def compute_updates(self, last, now, mid, cleanup=True):
"Compute and return the list of changes as an IndexUpdate or None"
return None
```
#### File: progressivis/utils/errors.py
```python
class ProgressiveError(Exception):
"Errors from ProgressiVis."
def __init__(self, message=None, details=None):
self.message = message
self.details = details
class ProgressiveStopIteration(Exception):
"Stop Iteration for coroutines"
def __init__(self, message=None, details=None):
self.message = message
self.details = details
```
#### File: progressivis/vis/histograms.py
```python
import logging
import numbers
import numpy as np
from progressivis.table.nary import NAry
from progressivis.core.slot import SlotDescriptor
from progressivis.stats import Histogram1D
from progressivis.table.table import BaseTable
logger = logging.getLogger(__name__)
class Histograms(NAry):
"Visualize a table with multiple histograms"
parameters = [('bins', np.dtype(int), 128),
('delta', np.dtype(float), -5)] # 5%
inputs = [SlotDescriptor('min', type=BaseTable, required=True),
SlotDescriptor('max', type=BaseTable, required=True)]
outputs = [SlotDescriptor('min', type=BaseTable, required=False),
SlotDescriptor('max', type=BaseTable, required=False)]
def __init__(self, columns=None, **kwds):
super(Histograms, self).__init__(**kwds)
self.default_step_size = 1
self._columns = columns
self._histogram = {}
def table(self):
"Return the table"
return self.get_input_slot('table').data()
def get_data(self, name):
if name == 'min':
return self.get_input_slot('min').data()
if name == 'max':
return self.get_input_slot('max').data()
return super(Histograms, self).get_data(name)
def predict_step_size(self, duration):
return 1
def run_step(self, run_number, step_size, howlong):
dfslot = self.get_input_slot('table')
input_df = dfslot.data()
# dfslot.update(run_number)
dfslot.clear_buffers()
col_changes = dfslot.column_changes
if col_changes is not None:
self._create_columns(col_changes.created, input_df)
self._delete_columns(col_changes.deleted)
return self._return_run_step(self.state_blocked, steps_run=1)
def _create_columns(self, columns, df):
bins = self.params.bins
delta = self.params.delta # crude
inp = self.get_input_module('table')
minmod = self.get_input_module('min')
maxmod = self.get_input_module('max')
for column in columns:
logger.debug('Creating histogram1d %s', column)
dtype = df[column].dtype
if not np.issubdtype(dtype, numbers.Number):
# only create histograms for number columns
continue
histo = Histogram1D(group=self.name, column=column,
bins=bins, delta=delta,
scheduler=self.scheduler)
histo.input.table = inp.output.result
histo.input.min = minmod.output.result
histo.input.max = maxmod.output.result
self.input.table = histo.output._trace # will become table.1 ...
self._histogram[column] = histo
def _delete_columns(self, columns):
for column in columns:
logger.debug('Deleting histogram1d %s', column)
histo = self._histogram[column]
del self._histogram[column]
histo.destroy()
def is_visualization(self):
return True
def get_visualization(self):
return "histograms"
def to_json(self, short=False):
json = super(Histograms, self).to_json(short)
if short:
return json
return self._histograms_to_json(json)
def _histograms_to_json(self, json):
histo_json = {}
for (column, value) in self._histogram.items():
column = str(column)
histo_json[column] = value.get_histogram()
json['histograms'] = histo_json
return json
```
#### File: progressivis/scripts/svmlight2csv.py
```python
import click
from sys import stdin
@click.command()
@click.option('--n_samples', default=10, help='number of samples')
@click.option('--n_features', default=3, help='number of features')
@click.option('--with_labels', default="class", help='if not empty=>label col')
@click.option('--with_header', default="_", help='if not empty=>header pattern')
def main(n_samples, n_features, with_labels, with_header):
if with_header:
patt = f"{with_header}{{}}"
header = ','.join([patt.format(i) for i in range(n_features)])
if with_labels:
header += f',{with_labels}'
print(header)
keys_ = [str(i) for i in range(n_features)]
for i, line in zip(range(n_samples), stdin):
lab, *kw = line.split(' ')
pairs = [elt.strip().split(':') for elt in kw]
dict_ = dict(pairs)
s = ','.join([dict_.get(k, '0') for k in keys_])
print(s, end='')
if with_labels:
print(f',{lab}')
else:
print('')
#print(lab)
if __name__ == '__main__':
main()
```
#### File: stool/examples/make_load_csv_reference.py
```python
import pandas as pd
import csv
from progressivis import Scheduler
from progressivis.io import CSVLoader
import sys
import subprocess
import glob
import dask.dataframe as dd
from progressivis.core.utils import RandomBytesIO
from stool import BenchmarkCase, bench
import unittest
GIGA = 1000000000
class BenchLoadCsv(BenchmarkCase):
def __init__(self, testMethod='runTest'):
super(BenchLoadCsv, self).__init__(testMethod)
self.nb_step = 8
self.random_file = None
def tearDown(self):
for d in glob.glob('/tmp/progressivis_*'):
subprocess.call(['/bin/rm','-rf', d])
def setUp(self):
self.set_step_header("Gigabytes")
def setUpStep(self, step):
self.set_step_info("{} Gb".format(step))
def tearDownStep(self, step):
pass
@bench(name="Nop")
def none_read_csv(self):
for _ in RandomBytesIO(cols=30, size=self.current_step*GIGA):
pass
@bench(name="Progressivis", corrected_by="Nop")
def p10s_read_csv(self):
s=Scheduler()
module=CSVLoader(RandomBytesIO(cols=30, size=self.current_step*GIGA), index_col=False, header=None, scheduler=s)
module.start()
@bench(name="Pandas", corrected_by="Nop")
def pandas_read_csv(self):
pd.read_csv(RandomBytesIO(cols=30, size=self.current_step*GIGA))
#@bench(name="Dask", corrected_by="Nop")
#def dask_read_csv(self):
# dd.read_csv(RandomBytesIO(cols=30, size=self.current_step*GIGA)).compute()
@bench(name="Naive", corrected_by="Nop")
def naive_read_csv(self):
res = {}
reader = RandomBytesIO(cols=30, size=self.current_step*GIGA)
first = next(reader)
for i, cell in enumerate(first.split(',')):
res[i] = [float(cell)]
for row in reader:
for i, cell in enumerate(row.split(',')):
res[i].append(float(cell))
return res
def runTest(self):
self.runBench()
self.save(db_name='bench_refs.db', name='load_csv')
if __name__ == '__main__':
unittest.main()
```
#### File: progressivis/stool/main.py
```python
from .case import BenchmarkCase
from .utils import banner, print_banner, BenchEnv
def main():
import argparse
import sys
from tabulate import tabulate
parser = argparse.ArgumentParser()
parser.add_argument("--db", help="measurements database", nargs=1,
required=True)
parser.add_argument("--bench", help="bench to visualize", nargs=1,
required=False)
parser.add_argument("--cases", help="case(s) to visualize", nargs='+',
required=False)
parser.add_argument("--summary", help="List of cases in the DB",
action='store_const', const=42, required=False)
parser.add_argument("--plot", help="Plot measurements", #action='store_const',
nargs=1, required=False, choices=['min', 'max', 'mean', 'all'])
parser.add_argument("--pstats", help="Profile stats for: case[:first] | case:last | case:step", #action='store_const',
nargs=1, required=False)
parser.add_argument("--no-corr", help="No correction", action='store_const',
const=1, required=False)
args = parser.parse_args()
db_name = args.db[0]
if args.plot:
plot_opt = args.plot[0]
if args.bench:
bench_name = args.bench[0]
else:
benv = BenchEnv(db_name=db_name)
bench_name = benv.bench_list[0]
if args.summary:
bench = BenchmarkCase.load(db_name=db_name, name=bench_name)
print("List of cases: {}".format(", ".join(bench.case_names)))
sys.exit(0)
corr = True
if args.no_corr:
corr = False
bench = BenchmarkCase.load(db_name=db_name, name=bench_name)
if args.cases:
cases = args.cases
print(cases)
else:
cases = bench.case_names
print_banner("Cases: {}".format(" ".join(cases)))
for case in cases:
print_banner("Case: {}".format(case))
df = bench.to_df(case=case, corrected=corr)
print(tabulate(df, headers='keys', tablefmt='psql'))
if args.plot:
bench.plot(cases=cases, corrected=corr, plot_opt=plot_opt)
#bench.prof_stats("P10sH5MinMax")
#dump_table('measurement_tbl', 'prof.db')
if args.pstats:
case_step = args.pstats[0]
if ':' not in case_step:
case = case_step
step = 'first'
else:
case, step = case_step.split(':', 1)
bench.prof_stats(case, step)
```
#### File: progressivis/tests/test_00_storageengine.py
```python
from . import ProgressiveTest, skip
from progressivis.storage.base import StorageEngine, Group, Dataset #, Attribute
from progressivis.table.table import Table
import numpy as np
@skip
class TestStorageEngine(ProgressiveTest):
def test_storage_engine(self):
e = StorageEngine.default
self.assertIsNotNone(e)
se = StorageEngine.lookup(e)
g = se['/']
self.assertIsNotNone(g)
self.assertIsInstance(g, Group)
g2 = g.create_group('g2')
self.assertIsNotNone(g2)
self.assertIsInstance(g2, Group)
d1 = g.create_dataset('d1', shape=(10,), dtype=np.int32)
self.assertIsNotNone(d1)
self.assertIsInstance(d1, Dataset)
def test_storage_engines(self):
print('Engines detected: ', list(StorageEngine.engines().keys()))
for e in StorageEngine.engines():
s = StorageEngine.lookup(e)
self.assertIsNotNone(s)
g = s['/']
self.assertIsNotNone(g)
self.assertIsInstance(g, Group)
g2 = g.create_group('g_'+e)
self.assertIsNotNone(g2)
self.assertIsInstance(g2, Group)
d1 = g.create_dataset('d_'+e, shape=(10,), dtype=np.int32)
self.assertIsNotNone(d1)
self.assertIsInstance(d1, Dataset)
arr = d1[:]
self.assertIsInstance(arr, np.ndarray)
self.assertEqual(len(arr), 10)
self.assertEqual(arr.dtype, np.int32)
s = StorageEngine.lookup(e)
group = s.require_group('table')
t = self._create_table(e, group)
self.assertEqual(t.storagegroup, group)
# for e in StorageEngine.engines():
# with StorageEngine.default_engine(e) as _:
# t = self._create_table(None)
# self.assertEqual(t.storagegroup, e)
def _create_table(self, storageengine, group):
if storageengine == "mmap":
t = Table('table_'+str(storageengine),
dshape='{a: int64, b: real}',
data={'a': [1,2,3], 'b': [0.1, 0.2, 0.3]},
storagegroup=group)
else:
t = Table('table_'+str(storageengine),
dshape='{a: int64, b: real, c: string}',
data={'a': [1,2,3], 'b': [0.1, 0.2, 0.3], 'c': [u'one', u'two', u'three']},
storagegroup=group)
self.assertEqual(len(t), 3)
return t
```
#### File: progressivis/tests/test_01_dataflow.py
```python
from progressivis import Print
from progressivis.io import CSVLoader
from progressivis.stats import Min, Max
from progressivis.datasets import get_dataset
from progressivis.core import aio
from . import ProgressiveTest
class TestDataflow(ProgressiveTest):
def test_dataflow_0(self):
scheduler = self.scheduler()
saved_inputs = None
saved_outputs = None
with scheduler as dataflow:
csv = CSVLoader(get_dataset('smallfile'), name='csv',
index_col=False, header=None,
scheduler=scheduler)
self.assertIs(scheduler['csv'], csv)
self.assertEqual(dataflow.validate_module(csv), [])
m = Min(name="min", scheduler=scheduler)
self.assertIs(dataflow[m.name], m)
self.assertEqual(dataflow.validate_module(m),
['Input slot "table" missing in module "min"'])
prt = Print(proc=self.terse,
name='print',
scheduler=scheduler)
self.assertIs(dataflow[prt.name], prt)
self.assertEqual(dataflow.validate_module(prt),
['Input slot "df" missing in module "print"'])
m.input.table = csv.output.result
prt.input.df = m.output.result
self.assertEqual(len(dataflow), 3)
self.assertEqual(dataflow.dir(), ['csv', 'min', 'print'])
errors = dataflow.validate()
self.assertEqual(errors, [])
deps = dataflow.order_modules()
self.assertEqual(deps, ['csv', m.name, prt.name])
saved_inputs = dataflow.inputs
saved_outputs = dataflow.outputs
# dataflow.__exit__() is called here
# print('Old modules:', end=' ')
# pprint(scheduler._modules)
scheduler._update_modules() # force modules in the main loop
# print('New modules:', end=' ')
# pprint(scheduler.modules())
with scheduler as dataflow:
# nothing should change when nothing is modified in dataflow
self.assertEqual(len(dataflow), 3)
deps = dataflow.order_modules()
self.assertEqual(deps, ['csv', m.name, prt.name])
self.assertEqual(dataflow.inputs, saved_inputs)
self.assertEqual(dataflow.outputs, saved_outputs)
scheduler._update_modules() # force modules in the main loop
with scheduler as dataflow:
dataflow.remove_module(prt)
self.assertEqual(len(dataflow), 2)
deps = dataflow.order_modules()
self.assertEqual(deps, ['csv', m.name])
# pprint(dataflow.inputs)
# pprint(dataflow.outputs)
# print('Old modules:')
# pprint(scheduler._new_modules)
scheduler._update_modules() # force modules in the main loop
# print('New modules:')
# pprint(scheduler.modules())
with scheduler as dataflow:
self.assertEqual(len(dataflow), 2)
deps = dataflow.order_modules()
self.assertEqual(deps, ['csv', m.name])
prt = Print(proc=self.terse,
name="print",
scheduler=scheduler)
self.assertIs(dataflow[prt.name], prt)
self.assertEqual(dataflow.validate_module(prt),
['Input slot "df" missing in module "print"'])
prt.input.df = m.output.result
scheduler._update_modules() # force modules in the main loop
def test_dataflow_1_dynamic(self):
scheduler = self.scheduler(clean=True)
csv = CSVLoader(get_dataset('bigfile'), name='csv',
index_col=False, header=None,
scheduler=scheduler)
m = Min(name="min", scheduler=scheduler)
prt = Print(proc=self.terse,
name='print_min',
scheduler=scheduler)
m.input.table = csv.output.result
prt.input.df = m.output.result
started = False
def proc(x):
nonlocal started
print("proc max called")
started = True
async def _add_max(csv, scheduler, proc):
await aio.sleep(2)
with scheduler:
print('adding new modules')
m = Max(name="max", scheduler=scheduler)
prt = Print(name='print_max',
proc=proc,
scheduler=scheduler)
m.input.table = csv.output.result
prt.input.df = m.output.result
t = _add_max(csv, scheduler, proc=proc)
aio.run_gather(scheduler.start(), t)
self.assertTrue(started)
def test_dataflow_2_add_remove(self):
scheduler = self.scheduler(clean=True)
csv = CSVLoader(get_dataset('bigfile'), name='csv',
index_col=False, header=None,
scheduler=scheduler)
m = Min(name="min", scheduler=scheduler)
prt = Print(proc=self.terse,
name='print_min',
scheduler=scheduler)
m.input.table = csv.output.result
prt.input.df = m.output.result
started = False
def proc(x):
nonlocal started
print("proc max called")
started = True
async def _add_max_remove_min(csv, scheduler, proc):
await aio.sleep(2)
with scheduler:
print('removing min module')
del scheduler['min']
print('adding new modules')
m = Max(name="max", scheduler=scheduler)
prt = Print(name='print_max',
proc=proc,
scheduler=scheduler)
m.input.table = csv.output.result
prt.input.df = m.output.result
t = _add_max_remove_min(csv, scheduler, proc=proc)
aio.run_gather(scheduler.start(), t)
self.assertTrue(started)
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_01_input.py
```python
from . import ProgressiveTest
from progressivis import Print
from progressivis.io.input import Input
import numpy as np
from progressivis.core import aio
async def _do_line(inp, s):
await aio.sleep(2)
for r in range(10):
await inp.from_input('line#%d' % r)
await aio.sleep(np.random.random())
await aio.sleep(1)
await s.stop()
class TestInput(ProgressiveTest):
def test_input(self):
s = self.scheduler()
with s:
inp = Input(scheduler=s)
pr = Print(proc=self.terse, scheduler=s)
pr.input.df = inp.output.result
aio.run_gather(s.start(), _do_line(inp, s))
self.assertEqual(len(inp.result), 10)
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_02_changemanager_column.py
```python
from . import ProgressiveTest
from progressivis.table.column import Column
from progressivis.table.changemanager_column import ColumnChangeManager
from progressivis.table.tablechanges import TableChanges
import numpy as np
class FakeSlot(object):
def __init__(self, table):
self.table = table
def data(self):
return self.table
class TestColumnChangeManager(ProgressiveTest):
def setUp(self):
super(TestColumnChangeManager, self).setUp()
self.scheduler = self.scheduler()
def test_columnchangemanager(self):
#pylint: disable=protected-access
column = Column('test_changemanager_column', None, data=np.array([ 1, 2, 3]))
s = self.scheduler
column.changes = TableChanges()
s._run_number = 1
last = s._run_number
slot = FakeSlot(column)
mid1 = 1
cm = ColumnChangeManager(slot, buffer_updated=True,buffer_deleted=True)
self.assertEqual(cm.last_update(), 0)
self.assertEqual(cm.created.length(), 0)
self.assertEqual(cm.updated.length(), 0)
self.assertEqual(cm.deleted.length(), 0)
mid2 = 2
cm2 = ColumnChangeManager(slot, buffer_updated=True,buffer_deleted=True)
self.assertEqual(cm2.last_update(), 0)
self.assertEqual(cm2.created.length(), 0)
self.assertEqual(cm2.updated.length(), 0)
self.assertEqual(cm2.deleted.length(), 0)
mid3 = 3
cm3 = ColumnChangeManager(slot, buffer_updated=True,buffer_deleted=True)
self.assertEqual(cm3.last_update(), 0)
self.assertEqual(cm3.created.length(), 0)
self.assertEqual(cm3.updated.length(), 0)
self.assertEqual(cm3.deleted.length(), 0)
cm.update(last, column, mid=mid1)
self.assertEqual(cm.last_update(), last)
self.assertEqual(cm.created.next(),slice(0, 3))
self.assertEqual(cm.updated.length(), 0)
self.assertEqual(cm.deleted.length(), 0)
s._run_number += 1
last = s._run_number
column.append(np.array([4]))
cm.update(last, column, mid=mid1)
self.assertEqual(cm.last_update(), last)
self.assertEqual(cm.created.next(), slice(3,4))
self.assertEqual(cm.updated.length(), 0)
self.assertEqual(cm.deleted.length(), 0)
s._run_number += 1
last = s._run_number
column.append(np.array([5]))
cm.update(last, column, mid=mid1)
self.assertEqual(cm.last_update(), last)
self.assertEqual(cm.created.next(),slice(4, 5))
self.assertEqual(cm.updated.length(), 0)
self.assertEqual(cm.deleted.length(), 0)
s._run_number += 1
column[3] = 42
column[4] = 52
last = s._run_number
cm.update(last, column, mid=mid1)
self.assertEqual(cm.last_update(), last)
self.assertEqual(cm.created.length(), 0)
self.assertEqual(cm.updated.next(), slice(3,5))
self.assertEqual(cm.deleted.length(), 0)
s._run_number += 1
last = s._run_number
cm.update(last, column, mid=mid1)
self.assertEqual(cm.last_update(), last)
self.assertEqual(cm.created.length(), 0)
self.assertEqual(cm.updated.length(), 0)
self.assertEqual(cm.deleted.length(), 0)
s._run_number += 1
last2 = 0
column[2] = 22
column[1] = 0.12
last2 = s._run_number
cm2.update(last2, column, mid=mid2)
self.assertEqual(cm2.last_update(), last2)
self.assertEqual(cm2.created.next(), slice(0, 5))
self.assertEqual(cm2.updated.length(), 0)
self.assertEqual(cm2.deleted.length(), 0)
s._run_number += 1
column[0] = 11
column[2] = 32
column.append(np.array([6]))
# tv = column.loc[1:2]
# last3 = s._run_number
# cm3.update(last3, tv, mid=mid3)
# self.assertEqual(cm3.created.next(), slice(1, 3)) # test ids, not indices
# self.assertEqual(cm2.updated.length(), 0)
# self.assertEqual(cm2.deleted.length(), 0)
s._run_number += 1
last = s._run_number
# with self.assertRaises(ValueError):
# cm.update(last+1, column, mid=mid1)
cm.update(last, column, mid=mid1)
self.assertEqual(cm.last_update(), last)
self.assertEqual(cm.created.next(), slice(5,6))
self.assertEqual(cm.updated.next(), slice(0,3))
self.assertEqual(cm.deleted.length(), 0)
s._run_number += 1
last2 = s._run_number
cm2.update(last2, column, mid=mid2)
self.assertEqual(cm2.last_update(), last2)
self.assertEqual(cm2.created.next(), slice(5,6))
self.assertEqual(list(cm2.updated.next()), [0,2])
self.assertEqual(cm2.deleted.length(), 0)
# s._run_number += 1
# column[0] = 1
# column[2] = 22
# last3 = s._run_number
# cm3.update(last3, tv, mid=mid3)
# self.assertEqual(cm3.last_update(), last3)
# self.assertEqual(cm3.created.length(), 0)
# self.assertEqual(cm3.updated.next(), slice(2,3))
# self.assertEqual(cm3.deleted.length(), 0)
# test deletes
s._run_number += 1
del column.loc[2]
last = s._run_number
cm.update(last, column, mid=mid1)
self.assertEqual(cm.last_update(), last)
self.assertEqual(cm.created.length(), 0)
self.assertEqual(cm.updated.length(), 0)
self.assertEqual(cm.deleted.next(), slice(2,3))
# self.assertTrue(np.all(column[:]==np.array([1,2,a.fillvalue,42,5,6])))
# self.assertTrue(np.all(b[:]==np.array([0.11,0.12,a.fillvalue,0.42,.52,0.6])))
s._run_number += 1
del column.loc[4]
column.append(np.array([7,8]))
column[5] = 55
last2 = s._run_number
cm2.update(last2, column, mid=mid2)
self.assertEqual(cm2.last_update(), last2)
self.assertEqual(cm2.created.next(), slice(6,8))
self.assertEqual(cm2.updated.next(), slice(5,6))
self.assertEqual(list(cm2.deleted.next()), [2,4])
#TODO test reset
cm.reset()
self.assertEqual(cm.last_update(), 0)
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_bisect.py
```python
from progressivis.table.table import Table
from progressivis.table.constant import Constant
from progressivis import Print
from progressivis.stats import RandomTable
from progressivis.table.bisectmod import Bisect
from progressivis.core.bitmap import bitmap
from progressivis.table.hist_index import HistogramIndex
from progressivis.core import aio
from progressivis.table.stirrer import Stirrer
from . import ProgressiveTest
class TestBisect(ProgressiveTest):
def test_bisect(self):
s = self.scheduler()
random = RandomTable(2, rows=1000_000, scheduler=s)
t = Table(name=None, dshape='{value: string}', data={'value':[0.5]})
min_value = Constant(table=t, scheduler=s)
hist_index = HistogramIndex(column='_1', scheduler=s)
hist_index.create_dependent_modules(random, 'result')
bisect_ = Bisect(column='_1', op='>', hist_index=hist_index, scheduler=s)
bisect_.input[0] = hist_index.output.result
#bisect_.input[0] = random.output.result
bisect_.input.limit = min_value.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = bisect_.output.result
aio.run(s.start())
#hist_index._impl.dump()
idx = random.result.eval('_1>0.5', result_object='index')
self.assertEqual(bisect_.result.index, bitmap(idx))
def test_bisect2(self):
s = self.scheduler()
random = RandomTable(2, rows=100_000, scheduler=s)
stirrer = Stirrer(update_column='_1', delete_rows=100,
#update_rows=5,
#fixed_step_size=100,
scheduler=s)
stirrer.input[0] = random.output.result
t = Table(name=None, dshape='{value: string}', data={'value':[0.5]})
min_value = Constant(table=t, scheduler=s)
hist_index = HistogramIndex(column='_1', scheduler=s)
hist_index.create_dependent_modules(stirrer, 'result')
bisect_ = Bisect(column='_1', op='>', hist_index=hist_index, scheduler=s)
bisect_.input[0] = hist_index.output.result
#bisect_.input[0] = random.output.result
bisect_.input.limit = min_value.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = bisect_.output.result
aio.run(s.start())
idx = stirrer.result.eval('_1>0.5', result_object='index')
self.assertEqual(bisect_.result.index, bitmap(idx))
def test_bisect3(self):
s = self.scheduler()
random = RandomTable(2, rows=100_000, scheduler=s)
stirrer = Stirrer(update_column='_1', update_rows=100,
fixed_step_size=100, scheduler=s)
stirrer.input[0] = random.output.result
t = Table(name=None, dshape='{value: string}', data={'value':[0.5]})
min_value = Constant(table=t, scheduler=s)
hist_index = HistogramIndex(column='_1', scheduler=s)
hist_index.create_dependent_modules(stirrer, 'result')
bisect_ = Bisect(column='_1', op='>', hist_index=hist_index, scheduler=s)
bisect_.input[0] = hist_index.output.result
#bisect_.input[0] = random.output.result
bisect_.input.limit = min_value.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = bisect_.output.result
aio.run(s.start())
idx = stirrer.result.eval('_1>0.5', result_object='index')
self.assertEqual(bisect_.result.index, bitmap(idx))
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_blobs_table.py
```python
from . import ProgressiveTest
from progressivis.core import aio
from progressivis import Every
from progressivis.stats.blobs_table import BlobsTable, MVBlobsTable
from progressivis.stats.random_table import RandomTable
from progressivis.linalg import Add
import numpy as np
from progressivis.io import CSVLoader
def print_len(x):
if x is not None:
print(len(x))
centers = [(0.1, 0.3), (0.7, 0.5), (-0.4, -0.3)]
class TestBlobsTable(ProgressiveTest):
def test_blobs_table(self):
s = self.scheduler()
module=BlobsTable(['a', 'b'], centers=centers, rows=10000, scheduler=s)
self.assertEqual(module.result.columns[0],'a')
self.assertEqual(module.result.columns[1],'b')
self.assertEqual(len(module.result.columns), 2)
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = module.output.result
aio.run(s.start())
#s.join()
self.assertEqual(len(module.result), 10000)
def test_blobs_table2(self):
s = self.scheduler()
sz = 100000
centers = [(0.1, 0.3), (0.7, 0.5), (-0.4, -0.3)]
blob1=BlobsTable(['a', 'b'], centers=centers, cluster_std=0.2, rows=sz, scheduler=s)
blob1.default_step_size = 1500
blob2=BlobsTable(['a', 'b'], centers=centers, cluster_std=0.2, rows=sz, scheduler=s)
blob2.default_step_size = 200
add = Add(scheduler=s)
add.input.first = blob1.output.result
add.input.second = blob2.output.result
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = add.output.result
aio.run(s.start())
#s.join()
self.assertEqual(len(blob1.result), sz)
self.assertEqual(len(blob2.result), sz)
arr1 = blob1.result.to_array()
arr2 = blob2.result.to_array()
self.assertTrue(np.allclose(arr1, arr2))
means = [0.1, 0.3], [0.7, 0.5], [-0.4, -0.3]
covs = [[0.01, 0], [0, 0.09]], [[0.04, 0], [0, 0.01]], [[0.09, 0.04], [0.04, 0.02]]
class TestMVBlobsTable(ProgressiveTest):
def test_mv_blobs_table(self):
s = self.scheduler()
module=MVBlobsTable(['a', 'b'], means=means, covs=covs, rows=10000, scheduler=s)
self.assertEqual(module.result.columns[0],'a')
self.assertEqual(module.result.columns[1],'b')
self.assertEqual(len(module.result.columns), 2)
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = module.output.result
aio.run(s.start())
#s.join()
self.assertEqual(len(module.result), 10000)
def test_mv_blobs_table2(self):
s = self.scheduler()
sz = 100000
blob1=MVBlobsTable(['a', 'b'], means=means, covs=covs, rows=sz, scheduler=s)
blob1.default_step_size = 1500
blob2=MVBlobsTable(['a', 'b'], means=means, covs=covs, rows=sz, scheduler=s)
blob2.default_step_size = 200
add = Add(scheduler=s)
add.input.first = blob1.output.result
add.input.second = blob2.output.result
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = add.output.result
aio.run(s.start())
#s.join()
self.assertEqual(len(blob1.result), sz)
self.assertEqual(len(blob2.result), sz)
arr1 = blob1.result.to_array()
arr2 = blob2.result.to_array()
self.assertTrue(np.allclose(arr1, arr2))
```
#### File: progressivis/tests/test_03_csv.py
```python
from . import ProgressiveTest
from progressivis.core import aio
from progressivis.io import CSVLoader
from progressivis.table.constant import Constant
from progressivis.table.table import Table
from progressivis.datasets import get_dataset
from progressivis.core.utils import RandomBytesIO
class TestProgressiveLoadCSV(ProgressiveTest):
# def setUpNO(self):
# self.logger=logging.getLogger('progressivis.core')
# self.saved=self.logger.getEffectiveLevel()
# self.logger.setLevel(logging.DEBUG)
# ch = logging.StreamHandler(stream=sys.stdout)
# self.logger.addHandler(ch)
# def tearDownNO(self):
# self.logger.setLevel(self.saved)
def runit(self, module):
module.run(1)
table = module.result
self.assertFalse(table is None)
l = len(table)
cnt = 2
while not module.is_zombie():
module.run(cnt)
cnt += 1
table = module.result
ln = len(table)
l = ln
s = module.trace_stats(max_runs=1)
return cnt
def test_read_csv(self):
s = self.scheduler()
module = CSVLoader(
get_dataset("bigfile"), index_col=False, header=None, scheduler=s
)
self.assertTrue(module.result is None)
aio.run(s.start())
self.assertEqual(len(module.result), 1000000)
def test_read_fake_csv(self):
s = self.scheduler()
module = CSVLoader(
RandomBytesIO(cols=30, rows=1000000),
index_col=False,
header=None,
scheduler=s,
)
self.assertTrue(module.result is None)
aio.run(s.start())
self.assertEqual(len(module.result), 1000000)
def test_read_multiple_csv(self):
s = self.scheduler()
filenames = Table(
name="file_names",
dshape="{filename: string}",
data={
"filename": [
get_dataset("smallfile"),
get_dataset("smallfile"),
]
},
)
cst = Constant(table=filenames, scheduler=s)
csv = CSVLoader(index_col=False, header=None, scheduler=s)
csv.input.filenames = cst.output.result
aio.run(csv.start())
self.assertEqual(len(csv.result), 60000)
def test_read_multiple_fake_csv(self):
s = self.scheduler()
filenames = Table(
name="file_names2",
dshape="{filename: string}",
data={
"filename": [
"buffer://fake1?cols=10&rows=30000",
"buffer://fake2?cols=10&rows=30000",
]
},
)
cst = Constant(table=filenames, scheduler=s)
csv = CSVLoader(index_col=False, header=None, scheduler=s)
csv.input.filenames = cst.output.result
aio.run(csv.start())
self.assertEqual(len(csv.result), 60000)
def test_as_array(self):
s = self.scheduler()
module = CSVLoader(
get_dataset("bigfile"),
index_col=False,
as_array="array",
header=None,
scheduler=s,
)
self.assertTrue(module.result is None)
aio.run(s.start())
table = module.result
self.assertEqual(len(table), 1000000)
self.assertEqual(table.columns, ["array"])
self.assertEqual(table["array"].shape, (1000000, 30))
def test_as_array2(self):
s = self.scheduler()
module = CSVLoader(
get_dataset("bigfile"),
index_col=False,
as_array={
"firsthalf": ["_" + str(r) for r in range(13)],
"secondhalf": ["_" + str(r) for r in range(13, 30)],
},
header=None,
scheduler=s,
)
self.assertTrue(module.result is None)
aio.run(s.start())
table = module.result
self.assertEqual(len(table), 1000000)
self.assertEqual(table.columns, ["firsthalf", "secondhalf"])
self.assertEqual(table["firsthalf"].shape, (1000000, 13))
self.assertEqual(table["secondhalf"].shape, (1000000, 17))
def test_as_array3(self):
s = self.scheduler()
module = CSVLoader(
get_dataset("mnist_784"),
index_col=False,
as_array=lambda cols: {"array": [c for c in cols if c != "class"]},
scheduler=s,
)
self.assertTrue(module.result is None)
aio.run(s.start())
table = module.result
self.assertEqual(len(table), 70000)
self.assertEqual(table.columns, ["array", "class"])
self.assertEqual(table["array"].shape, (70000, 784))
self.assertEqual(table["class"].shape, (70000,))
if __name__ == "__main__":
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_cxxmax.py
```python
from . import ProgressiveTest, skip, skipIf
from progressivis.core import aio
from progressivis import Print
from progressivis.stats import RandomTable
from progressivis.table.stirrer import Stirrer, StirrerView
from progressivis.stats.cxxmax import Max, CxxMax
import numpy as np
class TestCxxMax(ProgressiveTest):
def compare(self, res1, res2):
v1 = np.array(list(res1.values()))
v2 = np.array(list(res2.values()))
#print('v1 = ', v1)
#print('v2 = ', v2)
self.assertTrue(np.allclose(v1, v2))
@skipIf(CxxMax is None, "C++ module is missing")
def test_max(self):
s = self.scheduler()
random = RandomTable(10, rows=10_000, scheduler=s)
max_=Max(name='max_'+str(hash(random)), scheduler=s)
max_.input[0] = random.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
res1 = random.result.max()
res2 = max_.cxx_module.get_output_table().last().to_dict(ordered=True)
self.compare(res1, res2)
@skipIf(CxxMax is None, "C++ module is missing")
def test_stirrer(self):
s = self.scheduler()
random = RandomTable(2, rows=100_000, scheduler=s)
stirrer = Stirrer(update_column='_1',
delete_rows=5,
update_rows=5,
fixed_step_size=100, scheduler=s)
stirrer.input[0] = random.output.result
max_=Max(name='max_'+str(hash(random)), scheduler=s)
max_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
res1 = random.result.max()
res2 = max_.cxx_module.get_output_table().last().to_dict(ordered=True)
self.compare(res1, res2)
@skipIf(CxxMax is None, "C++ module is missing")
def test_stirrer_view(self):
s = self.scheduler()
random = RandomTable(2, rows=100_000, scheduler=s)
stirrer = StirrerView(update_column='_1',
delete_rows=5,
update_rows=5,
fixed_step_size=100, scheduler=s)
stirrer.input[0] = random.output.result
max_=Max(name='max_'+str(hash(random)), scheduler=s)
max_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
res1 = random.result.max()
res2 = max_.cxx_module.get_output_table().last().to_dict(ordered=True)
self.compare(res1, res2)
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_histogram1d.py
```python
from . import ProgressiveTest
from progressivis.core import aio
from progressivis import Scheduler, Every
from progressivis.io import CSVLoader
from progressivis.stats import Histogram1D, Min, Max
from progressivis.datasets import get_dataset
from progressivis.table.stirrer import Stirrer
import pandas as pd
import numpy as np
import logging
logging.basicConfig(level=logging.WARNING)
class TestHistogram1D(ProgressiveTest):
#def tearDown(self):
#StorageManager.default.end()
def test_histogram1d(self):
s=self.scheduler()
csv = CSVLoader(get_dataset('bigfile'), index_col=False,header=None,scheduler=s)
min_ = Min(scheduler=s)
min_.input[0] = csv.output.result
max_ = Max(scheduler=s)
max_.input[0] = csv.output.result
histogram1d=Histogram1D('_2', scheduler=s) # columns are called 1..30
histogram1d.input[0] = csv.output.result
histogram1d.input.min = min_.output.result
histogram1d.input.max = max_.output.result
pr = Every(proc=self.terse, scheduler=s)
pr.input[0] = csv.output.result
aio.run(s.start())
s = histogram1d.trace_stats()
def test_histogram1d1(self):
s=self.scheduler()
csv = CSVLoader(get_dataset('bigfile'), index_col=False,header=None,scheduler=s)
min_ = Min(scheduler=s)
min_.input[0] = csv.output.result
max_ = Max(scheduler=s)
max_.input[0] = csv.output.result
histogram1d=Histogram1D('_2', scheduler=s) # columns are called 1..30
histogram1d.input[0] = csv.output.result
histogram1d.input.min = min_.output.result
histogram1d.input.max = max_.output.result
pr = Every(proc=self.terse, scheduler=s)
pr.input[0] = csv.output.result
aio.run(s.start())
s = histogram1d.trace_stats()
last = histogram1d.result.last().to_dict()
h1 = last['array']
bounds = (last['min'], last['max'])
df = pd.read_csv(get_dataset('bigfile'), header=None, usecols=[2])
v = df.to_numpy().reshape(-1)
h2, _ = np.histogram(v, bins=histogram1d.params.bins, density=False, range=bounds)
self.assertListEqual(h1.tolist(), h2.tolist())
def t_histogram1d_impl(self, **kw):
s=self.scheduler()
csv = CSVLoader(get_dataset('bigfile'), index_col=False,header=None,scheduler=s)
stirrer = Stirrer(update_column='_2',
fixed_step_size=1000, scheduler=s, **kw)
stirrer.input[0] = csv.output.result
min_ = Min(scheduler=s)
min_.input[0] = stirrer.output.result
max_ = Max(scheduler=s)
max_.input[0] = stirrer.output.result
histogram1d=Histogram1D('_2', scheduler=s) # columns are called 1..30
histogram1d.input[0] = stirrer.output.result
histogram1d.input.min = min_.output.result
histogram1d.input.max = max_.output.result
#pr = Print(scheduler=s)
pr = Every(proc=self.terse, scheduler=s)
pr.input[0] = stirrer.output.result
aio.run(s.start())
s = histogram1d.trace_stats()
last = histogram1d.result.last().to_dict()
h1 = last['array']
bounds = (last['min'], last['max'])
v = stirrer.result.loc[:, ['_2']].to_array().reshape(-1)
h2, _ = np.histogram(v, bins=histogram1d.params.bins, density=False, range=bounds)
self.assertEqual(np.sum(h1), np.sum(h2))
self.assertListEqual(h1.tolist(), h2.tolist())
def test_histogram1d2(self):
return self.t_histogram1d_impl(delete_rows=5)
def test_histogram1d3(self):
return self.t_histogram1d_impl(update_rows=5)
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_hist_percentiles.py
```python
"Test for Range Query"
from progressivis.core import aio
from progressivis.table.constant import Constant
from progressivis import Print
from progressivis.stats import RandomTable
from progressivis.table.hist_index import HistogramIndex
from progressivis.table.percentiles import Percentiles
import numpy as np
from . import ProgressiveTest, main
from progressivis.table.range_query import RangeQuery
from progressivis.utils.psdict import PsDict
from progressivis.table.stirrer import Stirrer, StirrerView
class TestPercentiles(ProgressiveTest):
"""Tests for HistIndex based percentiles
NB: another percentiles module exists in stats directory
which is based on T-digest
"""
def tearDown(self):
TestPercentiles.cleanup()
def _impl_tst_percentiles(self, accuracy):
"""
"""
s = self.scheduler()
with s:
random = RandomTable(2, rows=10000, scheduler=s)
hist_index = HistogramIndex(column='_1', scheduler=s)
hist_index.input[0] = random.output.result
t_percentiles = PsDict({'_25': 25.0,
'_50': 50.0,
'_75': 75.0})
which_percentiles = Constant(table=t_percentiles, scheduler=s)
percentiles = Percentiles(hist_index, accuracy=accuracy, scheduler=s)
percentiles.input[0] = random.output.result
percentiles.input.percentiles = which_percentiles.output.result
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = percentiles.output.result
aio.run(s.start())
pdict = percentiles.result.last().to_dict()
v = random.result['_1'].values
p25 = np.percentile(v, 25.0)
p50 = np.percentile(v, 50.0)
p75 = np.percentile(v, 75.0)
print("Table=> accuracy: ", accuracy,
" 25:", p25, pdict['_25'],
" 50:", p50, pdict['_50'],
" 75:", p75, pdict['_75'])
self.assertAlmostEqual(p25, pdict['_25'], delta=0.01)
self.assertAlmostEqual(p50, pdict['_50'], delta=0.01)
self.assertAlmostEqual(p75, pdict['_75'], delta=0.01)
def _impl_stirred_tst_percentiles(self, accuracy, **kw):
"""
"""
s = self.scheduler()
with s:
random = RandomTable(2, rows=10000, scheduler=s)
stirrer = Stirrer(update_column='_2',
fixed_step_size=1000, scheduler=s, **kw)
stirrer.input[0] = random.output.result
hist_index = HistogramIndex(column='_1', scheduler=s)
hist_index.input[0] = stirrer.output.result
t_percentiles = PsDict({'_25': 25.0,
'_50': 50.0,
'_75': 75.0})
which_percentiles = Constant(table=t_percentiles, scheduler=s)
percentiles = Percentiles(hist_index, accuracy=accuracy, scheduler=s)
percentiles.input[0] = stirrer.output.result
percentiles.input.percentiles = which_percentiles.output.result
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = percentiles.output.result
aio.run(s.start())
pdict = percentiles.result.last().to_dict()
#v = random.table()['_1'].values
#import pdb;pdb.set_trace()
v = stirrer.result.to_array(columns=['_1']).reshape(-1)
p25 = np.percentile(v, 25.0)
p50 = np.percentile(v, 50.0)
p75 = np.percentile(v, 75.0)
print("Table=> accuracy: ", accuracy,
" 25:", p25, pdict['_25'],
" 50:", p50, pdict['_50'],
" 75:", p75, pdict['_75'])
self.assertAlmostEqual(p25, pdict['_25'], delta=0.01)
self.assertAlmostEqual(p50, pdict['_50'], delta=0.01)
self.assertAlmostEqual(p75, pdict['_75'], delta=0.01)
def test_percentiles_fast(self):
"""test_percentiles: Simple test for HistIndex based percentiles
low accurracy => faster mode
"""
return self._impl_tst_percentiles(2.0)
def test_percentiles_fast2(self):
"""test_percentiles: Simple test for HistIndex based percentiles
low accurracy => faster mode
"""
return self._impl_stirred_tst_percentiles(2.0, delete_rows=5)
def test_percentiles_fast3(self):
"""test_percentiles: Simple test for HistIndex based percentiles
low accurracy => faster mode
"""
return self._impl_stirred_tst_percentiles(2.0, update_rows=5)
def test_percentiles_accurate(self):
"""test_percentiles: Simple test for HistIndex based percentiles
higher accurracy => slower mode
"""
return self._impl_tst_percentiles(0.2)
def test_percentiles_accurate2(self):
"""test_percentiles: Simple test for HistIndex based percentiles
higher accurracy => slower mode
"""
return self._impl_stirred_tst_percentiles(0.2, delete_rows=5)
def test_percentiles_accurate3(self):
"""test_percentiles: Simple test for HistIndex based percentiles
higher accurracy => slower mode
"""
return self._impl_stirred_tst_percentiles(0.2, update_rows=5)
def _impl_tst_percentiles_rq(self, accuracy):
"""
"""
s = self.scheduler()
with s:
random = RandomTable(2, rows=10000, scheduler=s)
t_min = PsDict({'_1': 0.3})
min_value = Constant(table=t_min, scheduler=s)
t_max = PsDict({'_1': 0.8})
max_value = Constant(table=t_max, scheduler=s)
range_qry = RangeQuery(column='_1', scheduler=s)
range_qry.create_dependent_modules(random, 'result',
min_value=min_value,
max_value=max_value)
hist_index = range_qry.hist_index
t_percentiles = PsDict({'_25': 25.0,
'_50': 50.0,
'_75': 75.0})
which_percentiles = Constant(table=t_percentiles, scheduler=s)
percentiles = Percentiles(hist_index, accuracy=accuracy, scheduler=s)
percentiles.input[0] = range_qry.output.result
percentiles.input.percentiles = which_percentiles.output.result
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = percentiles.output.result
aio.run(s.start())
pdict = percentiles.result.last().to_dict()
v = range_qry.result['_1'].values
p25 = np.percentile(v, 25.0)
p50 = np.percentile(v, 50.0)
p75 = np.percentile(v, 75.0)
print("TSV=> accuracy: ", accuracy,
" 25:", p25, pdict['_25'],
" 50:", p50, pdict['_50'],
" 75:", p75, pdict['_75'])
self.assertAlmostEqual(p25, pdict['_25'], delta=0.01)
self.assertAlmostEqual(p50, pdict['_50'], delta=0.01)
self.assertAlmostEqual(p75, pdict['_75'], delta=0.01)
def _impl_stirred_tst_percentiles_rq(self, accuracy, **kw):
"""
"""
s = self.scheduler()
with s:
random = RandomTable(2, rows=10000, scheduler=s)
stirrer = Stirrer(update_column='_2',
fixed_step_size=1000, scheduler=s, **kw)
stirrer.input[0] = random.output.result
t_min = PsDict({'_1': 0.3})
min_value = Constant(table=t_min, scheduler=s)
t_max = PsDict({'_1': 0.8})
max_value = Constant(table=t_max, scheduler=s)
range_qry = RangeQuery(column='_1', scheduler=s)
range_qry.create_dependent_modules(stirrer, 'result',
min_value=min_value,
max_value=max_value)
hist_index = range_qry.hist_index
t_percentiles = PsDict({'_25': 25.0,
'_50': 50.0,
'_75': 75.0})
which_percentiles = Constant(table=t_percentiles, scheduler=s)
percentiles = Percentiles(hist_index, accuracy=accuracy, scheduler=s)
percentiles.input[0] = range_qry.output.result
percentiles.input.percentiles = which_percentiles.output.result
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = percentiles.output.result
aio.run(s.start())
pdict = percentiles.result.last().to_dict()
v = range_qry.result['_1'].values
p25 = np.percentile(v, 25.0)
p50 = np.percentile(v, 50.0)
p75 = np.percentile(v, 75.0)
print("TSV=> accuracy: ", accuracy,
" 25:", p25, pdict['_25'],
" 50:", p50, pdict['_50'],
" 75:", p75, pdict['_75'])
self.assertAlmostEqual(p25, pdict['_25'], delta=0.01)
self.assertAlmostEqual(p50, pdict['_50'], delta=0.01)
self.assertAlmostEqual(p75, pdict['_75'], delta=0.01)
def test_percentiles_fast_rq(self):
"""test_percentiles: Simple test for HistIndex based percentiles
low accurracy => faster mode
"""
return self._impl_tst_percentiles_rq(2.0)
def test_percentiles_fast_rq2(self):
"""test_percentiles: Simple test for HistIndex based percentiles
low accurracy => faster mode
"""
return self._impl_stirred_tst_percentiles_rq(2.0, delete_rows=5)
def test_percentiles_fast_rq3(self):
"""test_percentiles: Simple test for HistIndex based percentiles
low accurracy => faster mode
"""
return self._impl_stirred_tst_percentiles_rq(2.0, update_rows=5)
def test_percentiles_accurate_rq(self):
"""test_percentiles: Simple test for HistIndex based percentiles
higher accurracy => slower mode
"""
return self._impl_tst_percentiles_rq(0.2)
def test_percentiles_accurate_rq2(self):
"""test_percentiles: Simple test for HistIndex based percentiles
higher accurracy => slower mode
"""
return self._impl_stirred_tst_percentiles_rq(0.2, delete_rows=5)
def test_percentiles_accurate_rq3(self):
"""test_percentiles: Simple test for HistIndex based percentiles
higher accurracy => slower mode
"""
return self._impl_stirred_tst_percentiles_rq(0.2, update_rows=5)
if __name__ == '__main__':
main()
```
#### File: progressivis/tests/test_03_join2.py
```python
import pandas as pd
from progressivis.core import aio
from progressivis import Print, Every
from progressivis.stats import Stats
from progressivis.io import CSVLoader
from progressivis.datasets import get_dataset
from progressivis.table.bin_join import BinJoin
from progressivis.table.constant import Constant
from progressivis.table.table import Table
from progressivis.table.reduce import Reduce
from . import ProgressiveTest, skip
def print_len(x):
if x is not None:
print(len(x))
class TestJoin2(ProgressiveTest):
@skip("Need fixing")
def test_join(self):
s = self.scheduler()
csv = CSVLoader(get_dataset('bigfile'), index_col=False, header=None,
scheduler=s)
stat1 = Stats(1, reset_index=True, scheduler=s)
stat1.input[0] = csv.output.result
stat2 = Stats(2, reset_index=True, scheduler=s)
stat2.input[0] = csv.output.result
# join=Join(scheduler=s)
# reduce_ = Reduce(BinJoin, "first", "second", "table", scheduler=s)
# reduce_.input[0] = stat1.output.stats
# reduce_.input[0] = stat2.output.stats
# join = reduce_.expand()
join = Reduce.expand(BinJoin, "first", "second", "table",
[stat1.output.stats, stat2.output.stats],
scheduler=s)
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = join.output.result
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = csv.output.result
aio.run(s.start())
res = join.trace_stats(max_runs=1)
print(res)
def test_join_simple(self):
s = self.scheduler()
cst1 = Constant(Table(name='test_join_simple_cst1',
data=pd.DataFrame({'xmin': [1], 'xmax': [2]}),
create=True), scheduler=s)
cst2 = Constant(Table(name='test_join_simple_cst2',
data=pd.DataFrame({'ymin': [3], 'ymax': [4]}),
create=True), scheduler=s)
join = Reduce.expand(BinJoin, "first", "second", "table",
[cst1.output.result, cst2.output.result],
scheduler=s)
# reduce_ = Reduce(BinJoin, "first", "second", "table", scheduler=s)
# reduce_.input[0] = cst1.output.result
# reduce_.input[0] = cst2.output.result
# join = reduce_.expand()
# join = BinJoin(scheduler=s)
# join.input.first = cst1.output.result
# join.input.second = cst2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = join.output.result
aio.run(s.start())
res = join.trace_stats(max_runs=1)
print(res)
df = join.result
last = df.loc[df.index[-1]]
self.assertTrue(last['xmin'] == 1 and last['xmax'] == 2 and
last['ymin'] == 3 and last['ymax'] == 4)
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_last_row.py
```python
from . import ProgressiveTest
from progressivis import Print, Every
from progressivis.table.last_row import LastRow
from progressivis.table.constant import Constant
from progressivis.io import CSVLoader
from progressivis.datasets import get_dataset
from progressivis.table.table import Table
from progressivis.table.join import Join
from progressivis.core.utils import get_random_name
from progressivis.core import aio
class TestLastRow(ProgressiveTest):
def test_last_row(self):
s = self.scheduler()
csv = CSVLoader(get_dataset('smallfile'),
index_col=False,
header=None,
scheduler=s)
lr1 = LastRow(scheduler=s)
lr1.input[0] = csv.output.result
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = lr1.output.result
aio.run(s.start())
df = csv.result
last = df.last()
res = lr1.result
self.assertEqual(res.at[0, '_1'], last['_1'])
def test_last_row_simple(self):
s = self.scheduler()
t1 = Table(name=get_random_name("cst1"),
data={'xmin': [1], 'xmax': [2]})
t2 = Table(name=get_random_name("cst2"),
data={'ymin': [3], 'ymax': [4]})
cst1 = Constant(t1, scheduler=s)
cst2 = Constant(t2, scheduler=s)
join = Join(scheduler=s)
join.input[0] = cst1.output.result
join.input[0] = cst2.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = join.output.result
aio.run(s.start())
# res = join.trace_stats(max_runs=1)
# pd.set_option('display.expand_frame_repr', False)
# print(res)
df = join.result
last = df.last()
self.assertTrue(last['xmin'] == 1 and last['xmax'] == 2 and
last['ymin'] == 3 and last['ymax'] == 4)
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_linear_map.py
```python
from . import ProgressiveTest
from progressivis.core import aio
from progressivis import Print
from progressivis.linalg import LinearMap
import numpy as np
from progressivis.stats import RandomTable
class TestLinearMap(ProgressiveTest):
def test_linear_map(self):
s = self.scheduler()
vectors = RandomTable(3, rows=100000, scheduler=s)
transf = RandomTable(10, rows=3, scheduler=s)
module = LinearMap(scheduler=s)
module.input.vectors = vectors.output.result
module.input.transformation = transf.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.matmul(vectors.result.to_array(),
transf.result.to_array())
res2 = module.result.to_array()
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_linear_map2(self):
s = self.scheduler()
vectors = RandomTable(20, rows=100000, scheduler=s)
transf = RandomTable(20, rows=3, scheduler=s)
module = LinearMap(columns=['_3', '_4', '_5'], scheduler=s)
module.input.vectors = vectors.output.result
module.input.transformation = transf.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.matmul(vectors.result.to_array()[:, 2:5],
transf.result.to_array())
res2 = module.result.to_array()
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
def test_linear_map3(self):
s = self.scheduler()
vectors = RandomTable(20, rows=100000, scheduler=s)
transf = RandomTable(20, rows=3, scheduler=s)
module = LinearMap(columns=['_3', '_4', '_5'],
transf_columns=['_4', '_5', '_6', '_7'],
scheduler=s)
module.input.vectors = vectors.output.result
module.input.transformation = transf.output.result
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = module.output.result
aio.run(s.start())
res1 = np.matmul(vectors.result.to_array()[:, 2:5],
transf.result.to_array()[:, 3:7])
res2 = module.result.to_array()
self.assertTrue(np.allclose(res1, res2, equal_nan=True))
```
#### File: progressivis/tests/test_03_pairwise.py
```python
from . import ProgressiveTest
from progressivis import Every
from progressivis.io import VECLoader, CSVLoader
#from progressivis.metrics import PairwiseDistances
from progressivis.datasets import get_dataset
from progressivis.core import aio
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
def print_len(x):
if x is not None:
print(len(x))
times = 0
async def ten_times(scheduler, run_number):
global times
times += 1
#import pdb;pdb.set_trace()
if times > 10:
scheduler.exit()
class TestPairwiseDistances(ProgressiveTest):
def NOtest_vec_distances(self):
s= self.scheduler()
vec=VECLoader(get_dataset('warlogs'),scheduler=s)
# dis=PairwiseDistances(metric='cosine',scheduler=s)
# dis.input[0] = vec.output.df
# dis.input.array = vec.output.array
cnt = Every(proc=self.terse,constant_time=True,scheduler=s)
# cnt.input[0] = dis.output.dist
cnt.input[0] = vec.output.result
global times
times = 0
s.start()
table = vec.result
#print(table)
# computed = dis.dist()
# self.assertEquals(computed.shape[0], len(df))
# truth = pairwise_distances(vec.toarray(), metric=dis._metric)
# self.assertTrue(np.allclose(truth, computed))
def test_csv_distances(self):
s = self.scheduler()
vec=CSVLoader(get_dataset('smallfile'),index_col=False,header=None,scheduler=s)
# dis=PairwiseDistances(metric='euclidean',scheduler=s)
# dis.input[0] = vec.output.df
cnt = Every(proc=self.terse,constant_time=True,scheduler=s)
# cnt.input[0] = dis.output.dist
cnt.input[0] = vec.output.result
global times
times = 0
aio.run(s.start(ten_times))
table = vec.result
#print(repr(table))
# computed = dis.dist()
#self.assertEquals(computed.shape[0], len(df))
# offset=0
# size=offset+5000
# truth = pairwise_distances(df.iloc[offset:size], metric=dis._metric)
# dist = computed[offset:size,offset:size]
# self.assertTrue(np.allclose(truth, dist,atol=1e-7)) # reduce tolerance
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_ppca.py
```python
from . import ProgressiveTest, skipIf
from progressivis import Scheduler, Every
from progressivis.core import aio
from progressivis.io import CSVLoader
from progressivis.stats.ppca import PPCA
from progressivis.datasets import get_dataset
from progressivis.table.module import TableModule
from progressivis.table.table import Table
from progressivis.utils.psdict import PsDict
from progressivis.core.slot import SlotDescriptor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils.random import sample_without_replacement
import pandas as pd
import os
def _print(x):
pass
TRAIN_SAMPLE_SIZE = 10000
PREDICT_SAMPLE_SIZE = 1000
SAMPLE_SIZE = TRAIN_SAMPLE_SIZE + PREDICT_SAMPLE_SIZE
RANDOM_STATE = 42
NNEIGHBOURS = 7
N_COMPONENTS = 154
TRACE = None # 'verbose'
LABELS = INDICES = KNN = None
def _array(tbl):
return tbl['array'].values
class MyResetter(TableModule):
inputs = [SlotDescriptor('table', type=Table, required=True)]
def __init__(self, threshold, **kwds):
super().__init__(**kwds)
self._threshold = threshold
self.result = PsDict({'reset': True})
def run_step(self, run_number, step_size, howlong):
input_slot = self.get_input_slot('table')
input_slot.clear_buffers()
data = input_slot.data()
if data and len(data) >= self._threshold:
self.result['reset'] = False
return self._return_run_step(
self.next_state(input_slot), steps_run=step_size
)
@skipIf(os.getenv('TRAVIS'), 'skipped because too expensive for the CI')
class TestPPCA(ProgressiveTest):
def _common(self, rtol, threshold=None, resetter=None, resetter_func=None,
scheduler=None):
global KNN, LABELS, INDICES
if scheduler is None:
s = Scheduler()
else:
s = scheduler
dataset = get_dataset('mnist_784')
data = CSVLoader(dataset, index_col=False, as_array='array',
usecols=lambda x: x != 'class', scheduler=s)
ppca = PPCA(scheduler=s)
ppca.input[0] = data.output.result
ppca.params.n_components = N_COMPONENTS
if resetter:
assert callable(resetter_func)
resetter.input[0] = ppca.output.result
ppca.create_dependent_modules(rtol=rtol, trace=TRACE,
threshold=threshold,
resetter=resetter,
resetter_func=resetter_func)
prn = Every(scheduler=s, proc=_print)
prn.input[0] = ppca.reduced.output.result
aio.run(s.start())
pca_ = ppca._transformer['inc_pca']
recovered = pca_.inverse_transform(_array(ppca.reduced.result))
if KNN is None:
print("Init KNN")
KNN = KNeighborsClassifier(NNEIGHBOURS)
arr = _array(data.result)
LABELS = pd.read_csv(dataset, usecols=['class']).values.reshape((-1,))
indices_t = sample_without_replacement(n_population=len(data.result),
n_samples=TRAIN_SAMPLE_SIZE,
random_state=RANDOM_STATE)
KNN.fit(arr[indices_t], LABELS[indices_t])
indices_p = sample_without_replacement(n_population=len(data.result),
n_samples=PREDICT_SAMPLE_SIZE,
random_state=RANDOM_STATE*2+1)
return KNN.score(recovered[indices_p], LABELS[indices_p])
def test_always_reset(self):
"""
test_always_reset()
"""
score = self._common(0.1)
print("always reset=>score", score)
self.assertGreater(score, 0.93) # 0.94?
def test_never_reset(self):
"""
test_never_reset()
"""
score = self._common(100.0)
print("never reset=>score", score)
self.assertGreater(score, 0.77)
def test_reset_threshold_30k(self):
"""
test_reset_threshold_30k ()
"""
score = self._common(0.1, threshold=30000)
print("reset when threshold 30K=>score", score)
self.assertGreater(score, 0.77)
def test_reset_threshold_40k(self):
"""
test_reset_threshold_40k()
"""
score = self._common(0.1, threshold=40000)
print("reset when threshold 40K=>score", score)
self.assertGreater(score, 0.77)
def test_reset_threshold_50k(self):
"""
test_reset_threshold_50k()
"""
score = self._common(0.1, threshold=50000)
print("reset when threshold 50K=>score", score)
self.assertGreater(score, 0.77)
def test_reset_threshold_60k(self):
"""
test_reset_threshold_60k()
"""
score = self._common(0.1, threshold=60000)
print("reset when threshold 60K=>score", score)
self.assertGreater(score, 0.77)
def test_resetter(self):
"""
test_resetter()
"""
s = Scheduler()
resetter = MyResetter(threshold=30000, scheduler=s)
def _func(slot):
return slot.data().get('reset')
score = self._common(0.1, resetter=resetter, resetter_func=_func, scheduler=s)
print("resetter 30K=>score", score)
self.assertGreater(score, 0.77)
if __name__ == '__main__':
unittest.main()
```
#### File: progressivis/tests/test_03_random_csv.py
```python
from progressivis.core.utils import RandomBytesIO
from . import ProgressiveTest
import tempfile, os, os.path, shutil
class TestRandomCsv(ProgressiveTest):
def setUp(self):
# fixed rows number
self.dtemp = tempfile.mkdtemp(prefix='p10s_')
fixed_rows_obj = RandomBytesIO(cols=10, rows=50)
self.fixed_rows_file = os.path.join(self.dtemp, 'fixed_rows.csv')
fixed_rows_obj.save(self.fixed_rows_file)
# fixed size
fixed_size_obj = RandomBytesIO(cols=10, size=7777)
self.fixed_size_file = os.path.join(self.dtemp, 'fixed_size.csv')
fixed_size_obj.save(self.fixed_size_file)
def tearDown(self):
shutil.rmtree(self.dtemp)
def test_size(self):
fixed_rows_obj = RandomBytesIO(cols=10, rows=50)
self.assertEqual(os.stat(self.fixed_rows_file).st_size, fixed_rows_obj.size())
fixed_size_obj = RandomBytesIO(cols=10, size=7777)
self.assertEqual(os.stat(self.fixed_size_file).st_size, fixed_size_obj.size())
def test_read(self):
fixed_rows_obj = RandomBytesIO(cols=10, rows=50)
with open(self.fixed_rows_file) as fd:
for n in [7, 77, 777, 7007]:
self.assertEqual(fixed_rows_obj.read(n), fd.read(n).encode('utf-8'))
self.assertEqual(fixed_rows_obj.tell(), fd.tell())
fixed_size_obj = RandomBytesIO(cols=10, size=7777)
with open(self.fixed_size_file) as fd:
for n in [7, 77, 777, 7007]:
self.assertEqual(fixed_size_obj.read(n), fd.read(n).encode('utf-8'))
self.assertEqual(fixed_size_obj.tell(), fd.tell())
def test_read_all(self):
fixed_rows_obj = RandomBytesIO(cols=10, rows=50)
with open(self.fixed_rows_file) as fd:
self.assertEqual(fixed_rows_obj.read(), fd.read().encode('utf-8'))
self.assertEqual(fixed_rows_obj.tell(), fd.tell())
fixed_size_obj = RandomBytesIO(cols=10, size=7777)
with open(self.fixed_size_file) as fd:
self.assertEqual(fixed_size_obj.read(), fd.read().encode('utf-8'))
self.assertEqual(fixed_size_obj.tell(), fd.tell())
def test_iter(self):
fixed_rows_obj = RandomBytesIO(cols=10, rows=50)
with open(self.fixed_rows_file) as fd:
for row in fixed_rows_obj:
self.assertEqual(row, fd.readline())
fixed_size_obj = RandomBytesIO(cols=10, size=7777)
with open(self.fixed_size_file) as fd:
for row in fixed_size_obj:
self.assertEqual(row, fd.readline())
def test_readline(self):
fixed_rows_obj = RandomBytesIO(cols=10, rows=50)
with open(self.fixed_rows_file) as fd:
for _ in range(50):
self.assertEqual(fixed_rows_obj.readline(), fd.readline())
fixed_size_obj = RandomBytesIO(cols=10, size=7777)
with open(self.fixed_size_file) as fd:
for _ in range(50):
self.assertEqual(fixed_size_obj.readline(), fd.readline())
def test_readlines(self):
fixed_rows_obj = RandomBytesIO(cols=10, rows=50)
with open(self.fixed_rows_file) as fd:
self.assertEqual(fixed_rows_obj.readlines(), fd.readlines())
fixed_size_obj = RandomBytesIO(cols=10, size=7777)
with open(self.fixed_size_file) as fd:
self.assertEqual(fixed_size_obj.readlines(), fd.readlines())
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_03_random_table.py
```python
from . import ProgressiveTest
from progressivis.core import aio
from progressivis import Every
from progressivis.stats.random_table import RandomTable
def print_len(x):
if x is not None:
print(len(x))
class TestRandomTable(ProgressiveTest):
def test_random_table(self):
s = self.scheduler()
module=RandomTable(['a', 'b'], rows=10000, scheduler=s)
self.assertEqual(module.result.columns[0],'a')
self.assertEqual(module.result.columns[1],'b')
self.assertEqual(len(module.result.columns), 2) # add the UPDATE_COLUMN
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = module.output.result
aio.run(s.start())
#s.join()
self.assertEqual(len(module.result), 10000)
# self.assertFalse(module.result['a'].isnull().any())
# self.assertFalse(module.result['b'].isnull().any())
def test_random_table2(self):
s = self.scheduler()
# produces more than 4M rows per second on my laptop
module=RandomTable(10, rows=1000000, force_valid_ids=True, scheduler=s)
self.assertEqual(len(module.result.columns), 10) # add the UPDATE_COLUMN
self.assertEqual(module.result.columns[0],'_1')
self.assertEqual(module.result.columns[1],'_2')
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = module.output.result
aio.run(s.start())
#s.join()
self.assertEqual(len(module.result), 1000000)
# self.assertFalse(module.result['_1'].isnull().any())
# self.assertFalse(module.result['_2'].isnull().any())
```
#### File: progressivis/tests/test_03_range_query.py
```python
"Test for Range Query"
from progressivis.table.constant import Constant
from progressivis.table.table import Table
from progressivis import Print
from progressivis.stats import RandomTable, Min, Max
from progressivis.core.bitmap import bitmap
from progressivis.table.range_query import RangeQuery
from progressivis.utils.psdict import PsDict
from progressivis.core import aio
from . import ProgressiveTest, main
class TestRangeQuery(ProgressiveTest):
"Test Suite for RangeQuery Module"
def tearDown(self):
TestRangeQuery.cleanup()
def test_range_query(self):
"Run tests of the RangeQuery module"
s = self.scheduler()
with s:
random = RandomTable(2, rows=1000, scheduler=s)
t_min = PsDict({'_1': 0.3})
min_value = Constant(table=t_min, scheduler=s)
t_max = PsDict({'_1': 0.8})
max_value = Constant(table=t_max, scheduler=s)
range_qry = RangeQuery(column='_1', scheduler=s)
range_qry.create_dependent_modules(random, 'result',
min_value=min_value,
max_value=max_value)
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = range_qry.output.result
aio.run(s.start())
idx = range_qry.input_module\
.output['result']\
.data().eval('(_1>0.3)&(_1<0.8)', result_object='index')
self.assertEqual(range_qry.result.index, bitmap(idx))
def test_hist_index_min_max(self):
"Test min_out and max_out on HistogramIndex"
s = self.scheduler()
with s:
random = RandomTable(2, rows=100000, scheduler=s)
t_min = PsDict({'_1': 0.3})
min_value = Constant(table=t_min, scheduler=s)
t_max = PsDict({'_1': 0.8})
max_value = Constant(table=t_max, scheduler=s)
range_qry = RangeQuery(column='_1', scheduler=s)
range_qry.create_dependent_modules(random, 'result',
min_value=min_value,
max_value=max_value)
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = range_qry.output.result
hist_index = range_qry.hist_index
min_=Min(name='min_'+str(hash(hist_index)), scheduler=s)
min_.input[0] = hist_index.output.min_out
prt2 = Print(proc=self.terse, scheduler=s)
prt2.input[0] = min_.output.result
max_=Max(name='max_'+str(hash(hist_index)), scheduler=s)
max_.input[0] = hist_index.output.max_out
pr3=Print(proc=self.terse, scheduler=s)
pr3.input[0] = max_.output.result
aio.run(s.start())
res1 = random.result.min()['_1']
res2 = min_.result['_1']
self.assertAlmostEqual(res1, res2)
res1 = random.result.max()['_1']
res2 = max_.result['_1']
self.assertAlmostEqual(res1, res2)
def _query_min_max_impl(self, random, t_min, t_max, s):
min_value = Constant(table=t_min, scheduler=s)
max_value = Constant(table=t_max, scheduler=s)
range_qry = RangeQuery(column='_1', scheduler=s)
range_qry.create_dependent_modules(random, 'result',
min_value=min_value,
max_value=max_value)
prt = Print(proc=self.terse, scheduler=s)
prt.input[0] = range_qry.output.result
prt2 = Print(proc=self.terse, scheduler=s)
prt2.input[0] = range_qry.output.min
pr3 = Print(proc=self.terse, scheduler=s)
pr3.input[0] = range_qry.output.max
return range_qry
def test_range_query_min_max(self):
"Test min and max on RangeQuery output"
s = self.scheduler()
with s:
random = RandomTable(2, rows=100000, scheduler=s)
t_min = PsDict({'_1': 0.3})
t_max = PsDict({'_1': 0.8})
range_qry = self._query_min_max_impl(random, t_min, t_max, s)
aio.run(s.start())
min_data = range_qry.output.min.data()
max_data = range_qry.output.max.data()
self.assertAlmostEqual(min_data['_1'], 0.3)
self.assertAlmostEqual(max_data['_1'], 0.8)
def test_range_query_min_max2(self):
"Test min and max on RangeQuery output"
s = self.scheduler()
with s:
random = RandomTable(2, rows=100000, scheduler=s)
t_min = PsDict({'_1': 0.0})
t_max = PsDict({'_1': float('nan')})
range_qry = self._query_min_max_impl(random, t_min, t_max, s)
aio.run(s.start())
min_data = range_qry.output.min.data()
max_data = range_qry.output.max.data()
min_rand = random.result.min()['_1']
self.assertAlmostEqual(min_data['_1'], min_rand, delta=0.0001)
self.assertAlmostEqual(max_data['_1'], 1.0, delta=0.0001)
def test_range_query_min_max3(self):
"Test min and max on RangeQuery output"
s = self.scheduler()
with s:
random = RandomTable(2, rows=100000, scheduler=s)
t_min = PsDict({'_1': 0.3})
t_max = PsDict({'_1': 15000.})
range_qry = self._query_min_max_impl(random, t_min, t_max, s)
aio.run(s.start())
min_data = range_qry.output.min.data()
max_data = range_qry.output.max.data()
max_rand = random.result.max()['_1']
self.assertAlmostEqual(min_data['_1'], 0.3)
self.assertAlmostEqual(max_data['_1'], max_rand)
if __name__ == '__main__':
main()
```
#### File: progressivis/tests/test_03_repair_min_max.py
```python
from . import ProgressiveTest, skip, skipIf
from progressivis import Print, Scheduler
from progressivis.table.module import TableModule
from progressivis.table.table import Table
from progressivis.core.slot import SlotDescriptor
from progressivis.stats import RandomTable, ScalarMax, ScalarMin
from progressivis.table.stirrer import Stirrer
from progressivis.core.bitmap import bitmap
from progressivis.core import aio
from progressivis.core.utils import indices_len, fix_loc
import numpy as np
ScalarMax._reset_calls_counter = 0
ScalarMax._orig_reset = ScalarMax.reset
def _reset_func(self_):
ScalarMax._reset_calls_counter += 1
return ScalarMax._orig_reset(self_)
ScalarMax.reset = _reset_func
ScalarMin._reset_calls_counter = 0
ScalarMin._orig_reset = ScalarMin.reset
def _reset_func(self_):
ScalarMin._reset_calls_counter += 1
return ScalarMin._orig_reset(self_)
ScalarMin.reset = _reset_func
class MyStirrer(TableModule):
inputs = [SlotDescriptor('table', type=Table, required=True)]
def __init__(self, watched, proc_sensitive=True, mode='delete', value=9999.0, **kwds):
super().__init__(**kwds)
self.watched = watched
self.proc_sensitive = proc_sensitive
self.mode = mode
self.default_step_size = 100
self.value = value
self.done = False
def run_step(self, run_number, step_size, howlong):
input_slot = self.get_input_slot('table')
# input_slot.update(run_number)
steps = 0
if not input_slot.created.any():
return self._return_run_step(self.state_blocked, steps_run=0)
created = input_slot.created.next(step_size)
steps = indices_len(created)
input_table = input_slot.data()
if self.result is None:
self.result = Table(self.generate_table_name('stirrer'),
dshape=input_table.dshape, )
v = input_table.loc[fix_loc(created), :]
self.result.append(v)
if not self.done:
sensitive_ids = bitmap(self.scheduler().modules()[self.watched]._sensitive_ids.values())
if sensitive_ids:
if self.proc_sensitive:
if self.mode == 'delete':
#print('delete sensitive', sensitive_ids)
del self.result.loc[sensitive_ids]
else:
#print('update sensitive', sensitive_ids)
self.result.loc[sensitive_ids, 0] = self.value
self.done = True
else: # non sensitive
if len(self.result) > 10:
for i in range(10):
id_ = self.result.index[i]
if id_ not in sensitive_ids:
if self.mode == 'delete':
del self.result.loc[id_]
else:
self.result.loc[id_, 0] = self.value
self.done = True
return self._return_run_step(self.next_state(input_slot),
steps_run=steps)
#@skip
class TestRepairMax(ProgressiveTest):
def test_repair_max(self):
"""
test_repair_max()
max without deletes/updates
"""
s=Scheduler()
random = RandomTable(2, rows=100000, scheduler=s)
max_=ScalarMax(name='max_'+str(hash(random)), scheduler=s)
max_.input[0] = random.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
res1 = random.result.max()
res2 = max_.result
self.compare(res1, res2)
def test_repair_max2(self):
"""
test_repair_max2()
runs with sensitive ids deletion
"""
s=Scheduler()
ScalarMax._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
max_=ScalarMax(name='max_repair_test2', scheduler=s)
stirrer = MyStirrer(watched='max_repair_test2', scheduler=s)
stirrer.input[0] = random.output.result
max_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
self.assertEqual(ScalarMax._reset_calls_counter, 1)
res1 = stirrer.result.max()
res2 = max_.result
self.compare(res1, res2)
def test_repair_max3(self):
"""
test_repair_max3()
runs with NON-sensitive ids deletion
"""
s=Scheduler()
ScalarMax._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
max_=ScalarMax(name='max_repair_test3', scheduler=s)
stirrer = MyStirrer(watched='max_repair_test3', proc_sensitive=False, scheduler=s)
stirrer.input[0] = random.output.result
max_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
self.assertEqual(ScalarMax._reset_calls_counter, 0)
res1 = stirrer.result.max()
res2 = max_.result
self.compare(res1, res2)
def test_repair_max4(self):
"""
test_repair_max4()
runs with sensitive ids update
"""
s=Scheduler()
ScalarMax._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
max_=ScalarMax(name='max_repair_test4', scheduler=s)
stirrer = MyStirrer(watched='max_repair_test4', mode='update', value=9999.0, scheduler=s)
stirrer.input[0] = random.output.result
max_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
self.assertEqual(ScalarMax._reset_calls_counter, 0)
res1 = stirrer.result.max()
res2 = max_.result
self.compare(res1, res2)
def test_repair_max5(self):
"""
test_repair_max5()
runs with sensitive ids update (critical)
"""
s=Scheduler()
ScalarMax._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
max_=ScalarMax(name='max_repair_test4', scheduler=s)
stirrer = MyStirrer(watched='max_repair_test4', mode='update', value=-9999.0, scheduler=s)
stirrer.input[0] = random.output.result
max_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
self.assertEqual(ScalarMax._reset_calls_counter, 1)
res1 = stirrer.result.max()
res2 = max_.result
self.compare(res1, res2)
def test_repair_max6(self):
"""
test_repair_max6()
runs with NON-sensitive ids updates
"""
s=Scheduler()
ScalarMax._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
max_=ScalarMax(name='max_repair_test5', scheduler=s)
stirrer = MyStirrer(watched='max_repair_test5', proc_sensitive=False,
mode='update', scheduler=s)
stirrer.input[0] = random.output.result
max_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = max_.output.result
aio.run(s.start())
self.assertEqual(ScalarMax._reset_calls_counter, 0)
res1 = stirrer.result.max()
res2 = max_.result
self.compare(res1, res2)
def compare(self, res1, res2):
v1 = np.array(list(res1.values()))
v2 = np.array(list(res2.values()))
#print('v1 = ', v1, res1.keys())
#print('v2 = ', v2, res2.keys())
self.assertTrue(np.allclose(v1, v2))
class TestRepairMin(ProgressiveTest):
def test_repair_min(self):
"""
test_repair_min()
min without deletes/updates
"""
s=Scheduler()
random = RandomTable(2, rows=100000, scheduler=s)
min_=ScalarMin(name='min_'+str(hash(random)), scheduler=s)
min_.input[0] = random.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = min_.output.result
aio.run(s.start())
res1 = random.result.min()
res2 = min_.result
self.compare(res1, res2)
def test_repair_min2(self):
"""
test_repair_min2()
runs with sensitive ids deletion
"""
s=Scheduler()
ScalarMin._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
min_=ScalarMin(name='min_repair_test2', scheduler=s)
stirrer = MyStirrer(watched='min_repair_test2', scheduler=s)
stirrer.input[0] = random.output.result
min_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = min_.output.result
aio.run(s.start())
self.assertEqual(ScalarMin._reset_calls_counter, 1)
res1 = stirrer.result.min()
res2 = min_.result
self.compare(res1, res2)
def test_repair_min3(self):
"""
test_repair_min3()
runs with NON-sensitive ids deletion
"""
s=Scheduler()
ScalarMin._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
min_=ScalarMin(name='min_repair_test3', scheduler=s)
stirrer = MyStirrer(watched='min_repair_test3', proc_sensitive=False, scheduler=s)
stirrer.input[0] = random.output.result
min_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = min_.output.result
aio.run(s.start())
self.assertEqual(ScalarMin._reset_calls_counter, 0)
res1 = stirrer.result.min()
res2 = min_.result
self.compare(res1, res2)
def test_repair_min4(self):
"""
test_repair_min4()
runs with sensitive ids update
"""
s=Scheduler()
ScalarMin._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
min_=ScalarMin(name='min_repair_test4', scheduler=s)
stirrer = MyStirrer(watched='min_repair_test4', mode='update', value=-9999.0, scheduler=s)
stirrer.input[0] = random.output.result
min_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = min_.output.result
aio.run(s.start())
self.assertEqual(ScalarMin._reset_calls_counter, 0)
res1 = stirrer.result.min()
res2 = min_.result
self.compare(res1, res2)
def test_repair_min5(self):
"""
test_repair_min5()
runs with sensitive ids update (critical)
"""
s=Scheduler()
ScalarMin._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
min_=ScalarMin(name='min_repair_test4', scheduler=s)
stirrer = MyStirrer(watched='min_repair_test4', mode='update', value=9999.0, scheduler=s)
stirrer.input[0] = random.output.result
min_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = min_.output.result
aio.run(s.start())
self.assertEqual(ScalarMin._reset_calls_counter, 1)
res1 = stirrer.result.min()
res2 = min_.result
self.compare(res1, res2)
def test_repair_min6(self):
"""
test_repair_min6()
runs with NON-sensitive ids updates
"""
s=Scheduler()
ScalarMin._reset_calls_counter = 0
random = RandomTable(2, rows=100000, scheduler=s)
min_=ScalarMin(name='min_repair_test5', scheduler=s)
stirrer = MyStirrer(watched='min_repair_test5', proc_sensitive=False,
mode='update', scheduler=s)
stirrer.input[0] = random.output.result
min_.input[0] = stirrer.output.result
pr=Print(proc=self.terse, scheduler=s)
pr.input[0] = min_.output.result
aio.run(s.start())
self.assertEqual(ScalarMin._reset_calls_counter, 0)
res1 = stirrer.result.min()
res2 = min_.result
self.compare(res1, res2)
def compare(self, res1, res2):
v1 = np.array(list(res1.values()))
v2 = np.array(list(res2.values()))
#print('v1 = ', v1, res1.keys())
#print('v2 = ', v2, res2.keys())
self.assertTrue(np.allclose(v1, v2))
if __name__ == '__main__':
unittest.main()
```
#### File: progressivis/tests/test_04_mb_k_means.py
```python
from . import ProgressiveTest, skip
from progressivis import Print, Every #, log_level
from progressivis.cluster import MBKMeans
from progressivis.io import CSVLoader
from progressivis.datasets import get_dataset
from progressivis.core import aio
#from sklearn.cluster import MiniBatchKMeans
#from sklearn.utils.extmath import squared_norm
#import numpy as np
#import pandas as pd
# times = 0
# def stop_if_done(s, n):
# global times
# if s.run_queue_length()==3:
# if times==2:
# s.stop()
# times += 1
@skip
class TestMBKmeans(ProgressiveTest):
def test_mb_k_means(self):
#log_level()
s = self.scheduler()
n_clusters = 3
with s:
csv = CSVLoader(get_dataset('cluster:s3'),
sep=' ',
skipinitialspace=True,
header=None,
index_col=False,
scheduler=s)
km = MBKMeans(n_clusters=n_clusters, random_state=42,
is_input=False, is_greedy=False, scheduler=s)
#km.input.table = csv.output.result
km.create_dependent_modules(csv)
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = km.output.result
e = Every(proc=self.terse, scheduler=s)
e.input[0] = km.output.labels
aio.run(s.start())
# s.join()
self.assertEqual(len(csv.result), len(km.labels()))
# mbk = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=True)
# X = csv.df()[km.columns]
# mbk.partial_fit(X)
# print mbk.cluster_centers_
# print km.mbk.cluster_centers_
# self.assertTrue(np.allclose(mbk.cluster_centers_, km.mbk.cluster_centers_))
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis/tests/test_04_mds.py
```python
from . import ProgressiveTest, skip
from progressivis import Every
from progressivis.io import CSVLoader
from progressivis.metrics import PairwiseDistances
from progressivis.datasets import get_dataset
from progressivis.core import aio
import logging
def print_len(x):
if x is not None:
print(x.shape)
times = 0
def ten_times(scheduler, run_number):
global times
times += 1
if times > 10:
scheduler.stop()
class TestMDS(ProgressiveTest):
# def test_MDS_vec(self):
# vec=VECLoader(get_dataset('warlogs'))
# dis=PairwiseDistances(metric='cosine')
# dis.input[0] = vec.output.df
# dis.input.array = vec.output.array
# cnt = Every(proc=print_len,constant_time=True)
# cnt.input[0] = dis.output.df
# vec.start()
@skip("Need to implement MDS on tables")
def test_MDS_csv(self):
s= self.scheduler()
vec=CSVLoader(get_dataset('smallfile'),index_col=False,header=None,scheduler=s)
dis=PairwiseDistances(metric='euclidean',scheduler=s)
dis.input[0] = vec.output.df
cnt = Every(proc=self.terse, constant_time=True,scheduler=s)
cnt.input[0] = dis.output.dist
global times
times = 0
aio.run(s.start(ten_times))
if __name__ == '__main__':
ProgressiveTest.main()
```
#### File: progressivis_nb_widgets/nbwidgets/module_wg.py
```python
import ipywidgets as ipw
from .utils import update_widget
from .slot_wg import SlotWg
from .json_html import JsonHTML
debug_console = None # ipw.Output()
"""
{"table": [{"output_name": "table", "output_module": "csv_loader_1", "input_name": "df", "input_module": "every_1"}, {"output_name": "table", "output_module": "csv_loader_1", "input_name": "table", "input_module": "histogram_index_1"}, {"output_name": "table", "output_module": "csv_loader_1", "input_name": "table", "input_module": "histogram_index_2"}, {"output_name": "table", "output_module": "csv_loader_1", "input_name": "table", "input_module": "histogram_index_3"}, {"output_name": "table", "output_module": "csv_loader_1", "input_name": "table", "input_module": "histogram_index_4"}], "_trace": null}
## range_query_2d
{"min": [{"output_name": "min", "output_module": "range_query2d_2", "input_name": "table.00.03", "input_module": "mc_histogram2_d_1"}, {"output_name": "min", "output_module": "range_query2d_2", "input_name": "table.00.03", "input_module": "mc_histogram2_d_2"}], "max": [{"output_name": "max", "output_module": "range_query2d_2", "input_name": "table.00.04", "input_module": "mc_histogram2_d_1"}, {"output_name": "max", "output_module": "range_query2d_2", "input_name": "table.00.04", "input_module": "mc_histogram2_d_2"}], "table": [{"output_name": "table", "output_module": "range_query2d_2", "input_name": "data", "input_module": "mc_histogram2_d_2"}, {"output_name": "table", "output_module": "range_query2d_2", "input_name": "table", "input_module": "sample_2"}], "_trace": null}
"""
class ModuleWg(ipw.Tab): # pylint: disable=too-many-ancestors
def __init__(self, board, dconsole=None):
global debug_console # pylint: disable=global-statement
debug_console = dconsole
self._index = board
self._main = JsonHTML()
self.module_name = None
self.selection_changed = False
self._output_slots = ipw.Tab()
super().__init__([self._main, self._output_slots])
self.set_title(0, 'Main')
self.set_title(1, 'Output slots')
async def refresh(self):
_idx = self._index
# pylint: disable=protected-access
json_ = _idx._cache_js
assert json_ is not None
module_json = None
m = None
for i, m in enumerate(json_['modules']):
if m['id'] == self.module_name:
module_json = m
break
assert module_json is not None
self.set_title(0, self.module_name)
await update_widget(self._main, 'data', m)
await update_widget(self._main, 'config',
dict(order=["classname",
"speed",
"debug",
"state",
"last_update",
"default_step_size",
"start_time",
"end_time",
"parameters",
"input_slots"],
sparkline=["speed"]))
_selected_index = 0
module = self._index.scheduler.modules()[self.module_name]
if module is None:
return
if self.selection_changed or not self._output_slots.children:
# first refresh
self.selection_changed = False
self.selected_index = 0
slots = [SlotWg(module, sl)
for sl in m["output_slots"].keys()]+[SlotWg(module,
'_params')]
await update_widget(self._output_slots, 'children', slots)
if module.name in self._index.vis_register:
for wg, label in self._index.vis_register[module.name]:
self.children = (wg,) + self.children
self.set_title(0, label)
self.set_title(1, 'Main')
self.set_title(2, 'Output slots')
elif len(self.children) > 2:
self.children = self.children[1:]
self.set_title(0, module.name)
self.set_title(1, 'Output slots')
else:
_selected_index = self._output_slots.selected_index
for i, k in enumerate(m["output_slots"].keys()):
self._output_slots.set_title(i, k)
await self._output_slots.children[i].refresh()
i += 1
self._output_slots.set_title(i, '_params')
await self._output_slots.children[i].refresh()
self._output_slots.selected_index = _selected_index
```
|
{
"source": "jdfoote/mesa",
"score": 2
}
|
#### File: virus_on_network/virus_on_network/server.py
```python
import math
from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.UserParam import UserSettableParameter
from mesa.visualization.modules import ChartModule
from mesa.visualization.modules import NetworkModule
from mesa.visualization.modules import TextElement
from .model import VirusOnNetwork, State, number_infected
def network_portrayal(G):
# The model ensures there is always 1 agent per node
def node_color(agent):
return {State.INFECTED: "#FF0000", State.SUSCEPTIBLE: "#008000"}.get(
agent.state, "#808080"
)
def edge_color(agent1, agent2):
if State.RESISTANT in (agent1.state, agent2.state):
return "#000000"
return "#e8e8e8"
def edge_width(agent1, agent2):
if State.RESISTANT in (agent1.state, agent2.state):
return 3
return 2
def get_agents(source, target):
return G.nodes[source]["agent"][0], G.nodes[target]["agent"][0]
portrayal = dict()
portrayal["nodes"] = [
{
"size": 6,
"color": node_color(agents[0]),
"tooltip": f"id: {agents[0].unique_id}<br>state: {agents[0].state.name}",
}
for (_, agents) in G.nodes.data("agent")
]
portrayal["edges"] = [
{
"source": source,
"target": target,
"color": edge_color(*get_agents(source, target)),
"width": edge_width(*get_agents(source, target)),
}
for (source, target) in G.edges
]
return portrayal
network = NetworkModule(network_portrayal, 500, 500, library="d3")
chart = ChartModule(
[
{"Label": "Infected", "Color": "#FF0000"},
{"Label": "Susceptible", "Color": "#008000"},
{"Label": "Resistant", "Color": "#808080"},
]
)
class MyTextElement(TextElement):
def render(self, model):
ratio = model.resistant_susceptible_ratio()
ratio_text = "∞" if ratio is math.inf else f"{ratio:.2f}"
infected_text = str(number_infected(model))
return "Resistant/Susceptible Ratio: {}<br>Infected Remaining: {}".format(
ratio_text, infected_text
)
model_params = {
"num_nodes": UserSettableParameter(
"slider",
"Number of agents",
10,
10,
100,
1,
description="Choose how many agents to include in the model",
),
"avg_node_degree": UserSettableParameter(
"slider", "Avg Node Degree", 3, 3, 8, 1, description="Avg Node Degree"
),
"initial_outbreak_size": UserSettableParameter(
"slider",
"Initial Outbreak Size",
1,
1,
10,
1,
description="Initial Outbreak Size",
),
"virus_spread_chance": UserSettableParameter(
"slider",
"Virus Spread Chance",
0.4,
0.0,
1.0,
0.1,
description="Probability that susceptible neighbor will be infected",
),
"virus_check_frequency": UserSettableParameter(
"slider",
"Virus Check Frequency",
0.4,
0.0,
1.0,
0.1,
description="Frequency the nodes check whether they are infected by " "a virus",
),
"recovery_chance": UserSettableParameter(
"slider",
"Recovery Chance",
0.3,
0.0,
1.0,
0.1,
description="Probability that the virus will be removed",
),
"gain_resistance_chance": UserSettableParameter(
"slider",
"Gain Resistance Chance",
0.5,
0.0,
1.0,
0.1,
description="Probability that a recovered agent will become "
"resistant to this virus in the future",
),
}
server = ModularServer(
VirusOnNetwork, [network, MyTextElement(), chart], "Virus Model", model_params
)
server.port = 8521
```
|
{
"source": "jdfoster/boids",
"score": 2
}
|
#### File: boids/boids/controller.py
```python
from boids.builder import BuildBoids
from boids.viewer import ViewBoids
from boids.boid_exceptions import BoidExceptions
from matplotlib import animation
class ControlBoids(BoidExceptions):
def __init__(self, settings):
builder = BuildBoids()
builder.set_location_ranges(**settings.pop('location_range'))
builder.set_velocity_ranges(**settings.pop('velocity_range'))
builder.set_flock_parameters(**settings.pop('flock_parameters'))
builder.generate_boids()
self.boids = builder.finish()
self.view = ViewBoids(self.boids, **settings.pop('boundary_limits'))
self._set_animation_settings(**settings.pop('animation_settings'))
def animate_boid(frame_number):
self.boids.update_boids()
self.view.update_plt()
self.animator = animate_boid
@BoidExceptions._check_set_animation_settings
def _set_animation_settings(self, frames, interval):
self.anim_frames = frames
self.anim_interval = interval
def run_animation(self):
anim = animation.FuncAnimation(self.view.figure, self.animator,
frames=self.anim_frames,
interval=self.anim_interval,
repeat=0)
return anim
```
#### File: boids/boids/model.py
```python
import numpy as np
from boids.boid_exceptions import BoidExceptions
class Boid(BoidExceptions):
@BoidExceptions._check_boid_init
def __init__(self, x, y, xv, yv, host):
self.location = np.array([x, y])
self.velocity = np.array([xv, yv])
self.host = host
def shift_boid(self, other):
location_delta = np.array([0., 0.])
location_separation = other.location - self.location
sum_of_squares = np.sum(location_separation**2)
# Fly towards the middle
location_delta += (location_separation *
self.host.flock_attraction)
# Fly away from nearby boids
if sum_of_squares < self.host.avoid_radius:
location_delta -= location_separation
# Try to match speed with nearby boids
if sum_of_squares < self.host.flock_radius:
location_delta += ((other.velocity - self.velocity) *
self.host.velocity_matching)
return location_delta
class Boids(object):
def update_boids(self):
for protagonist in self.flock:
location_delta = np.array([0., 0.])
for antagonist in self.flock:
shift_values = protagonist.shift_boid(antagonist)
location_delta += shift_values
# Adjust velocities from interaction
protagonist.velocity += location_delta
# Move according to velocities
protagonist.location += protagonist.velocity
@property
def current_locations(self):
return np.vstack([boid.location for boid in self.flock])
@property
def current_velocities(self):
return np.vstack([boid.velocity for boid in self.flock])
```
#### File: boids/tests/test_builder.py
```python
from ..builder import BuildBoids
from ..model import Boids
from boids.tests.generate_fixtures import generate_broken_flock_param, \
generate_broken_limits, negative_fixture_check
from nose.tools import assert_equal, raises
from numpy import linspace
from numpy.testing import assert_array_less
def test_generate_boids():
builder = BuildBoids()
builder.set_flock_parameters(50, 0.01, 100, 10000, 0.125)
builder.set_velocity_ranges([0.0, 10.0], [-20.0, 20.0])
builder.set_location_ranges([-450.0, 50.0], [300.0, 600.0])
builder.generate_boids()
boids = builder.finish()
assert_equal(len(boids.flock), 50)
for bd in boids.flock:
assert_array_less(bd.location, [50, 600])
assert_array_less([-450, 300], bd.location)
assert_array_less(bd.velocity, [10, 20])
assert_array_less([0, -20], bd.velocity)
def test_generate_from_file():
test_data = [linspace(0, 3, 4)] * 4
builder = BuildBoids()
builder.generate_from_file(test_data)
assert(len(builder.model.flock) == 4)
assert(builder.model.current_locations.cumsum()[-1] == 12)
def test_validate():
builder = BuildBoids()
@raises(Exception)
def validate_fail():
builder.validate()
validate_fail()
builder.set_flock_parameters(50, 0.01, 100, 10000, 0.125)
builder.set_velocity_ranges([0.0, 10.0], [-20.0, 20.0])
builder.set_location_ranges([-450.0, 50.0], [300.0, 600.0])
validate_fail()
builder.generate_boids()
builder.validate()
def test_set_location():
builder = BuildBoids()
builder.set_location_ranges([2.1, 2.2], [4.1, 4.2])
assert(builder.location_x_limits == [2.1, 2.2])
assert(builder.location_y_limits == [4.1, 4.2])
def test_set_location_fail():
fixtures = generate_broken_limits()
builder = BuildBoids()
negative_fixture_check(builder.set_location_ranges,
fixtures, TypeError)
def test_set_velocity():
builder = BuildBoids()
builder.set_velocity_ranges([2.1, 2.2], [4.1, 4.2])
assert(builder.velocity_x_limits == [2.1, 2.2])
assert(builder.velocity_y_limits == [4.1, 4.2])
def test_set_velocity_fail():
fixtures = generate_broken_limits()
builder = BuildBoids()
negative_fixture_check(builder.set_velocity_ranges,
fixtures, TypeError)
def test_set_flock_parameters():
test_data = {'boid_count': 2, 'flock_attraction': 4.2,
'avoid_radius': 6, 'flock_radius': 8,
'velocity_matching': 10.2}
builder = BuildBoids()
builder.set_flock_parameters(**test_data)
assert(builder.model.boid_count == 2)
assert(builder.model.flock_attraction == 2.1)
assert(builder.model.avoid_radius == 6)
assert(builder.model.flock_radius == 8)
assert(builder.model.velocity_matching == 5.1)
def test_set_flock_parameters_fails():
fixtures = generate_broken_flock_param()
builder = BuildBoids()
negative_fixture_check(builder.set_flock_parameters,
fixtures, TypeError)
def test_finish():
builder = BuildBoids()
builder.set_flock_parameters(50, 0.01, 100, 10000, 0.125)
builder.set_velocity_ranges([0.0, 10.0], [-20.0, 20.0])
builder.set_location_ranges([-450.0, 50.0], [300.0, 600.0])
builder.generate_boids()
returned_value = builder.finish()
assert(isinstance(returned_value, Boids))
assert(hasattr(returned_value, 'boid_count'))
assert(hasattr(returned_value, 'flock_attraction'))
assert(hasattr(returned_value, 'avoid_radius'))
assert(hasattr(returned_value, 'flock_radius'))
assert(hasattr(returned_value, 'velocity_matching'))
assert(hasattr(returned_value, 'flock'))
```
#### File: boids/boids/viewer.py
```python
from boids.boid_exceptions import BoidExceptions
from matplotlib import pyplot as plt
class ViewBoids(BoidExceptions):
@BoidExceptions._check_xy_limits
def __init__(self, model, x_limits, y_limits):
self.model = model
self.figure = plt.figure()
axes = plt.axes(xlim=(x_limits), ylim=(y_limits))
axes.tick_params(axis='both', top='off', bottom='off',
left='off', right='off', labelbottom='off',
labelleft='off')
self.figure.tight_layout()
self.scatter = axes.scatter(self.model.current_locations[:, 0],
self.model.current_locations[:, 1])
def update_plt(self):
self.scatter.set_offsets(self.model.current_locations)
```
|
{
"source": "jdfr228/PSNDiscord",
"score": 2
}
|
#### File: jdfr228/PSNDiscord/PSNDiscord.py
```python
import discord
import asyncio
import time
import traceback
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException, NoSuchElementException
# Modify for your needs -------------------------------------------------------
url = "https://my.playstation.com/profile/YourName"
dataDirectory = "C:/PSNDiscord"
twitchUrl = "https://www.twitch.tv/YourName"
userToken = "<PASSWORD>"
hideChrome = False
noGameRefreshTime = 60 # WARNING - Setting below 15 seconds could violate Discord API ToS
inGameRefreshTime = 120 # WARNING
# NOTE - Sony servers only allow ~1.3 requests per minute regardless, so setting these
# lower than ~50 seconds will have limited practical effect
loadTime = 5
# -----------------------------------------------------------------------------
alreadyFetched = False
gameName = ""
oldGameName = ""
imageSrc = ""
gameUrl = ""
console = ""
driver = None
client = None
clientReady = False
refreshTime = 5
async def resetNowPlaying():
global gameName, oldGameName, imageSrc, gameUrl, console
gameName = ""
imageSrc = ""
gameUrl = ""
console = ""
if (oldGameName != gameName):
print("No longer playing a game")
await updatePresence()
oldGameName = ""
def chromeSetup():
global driver, dataDirectory, hideChrome
# Set up Chrome options
chrome_opts = Options()
chrome_opts.set_headless(headless=hideChrome) # Set up headless
chrome_opts.add_argument('no-sandbox') # --
chrome_opts.add_argument("user-data-dir=" + dataDirectory) # Set up session saving (cookies)
chrome_opts.add_argument("proxy-server=direct://") # Make headless not unbearably slow
chrome_opts.add_argument("proxy-bypass-list=*") # --
chrome_opts.add_argument("disable-extensions") # --
chrome_opts.add_argument("hide-scrollbars") # --
chrome_opts.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36")
chrome_opts.add_argument("log-level=3") # Hide console messages
print("Setting up web driver...")
driver = webdriver.Chrome(chrome_options=chrome_opts)
print("Web driver set up")
driver.get("http://google.com/")
async def refreshThread():
global refreshTime
while True:
await fetchPage()
await asyncio.sleep(refreshTime) # Wait between page fetches
async def fetchPage():
global gameName, oldGameName, imageSrc, gameUrl, console, alreadyFetched, refreshTime, noGameRefreshTime, inGameRefreshTime, loadTime
# Fetch page, or if it has already been fetched before, simply refresh
if (alreadyFetched):
driver.find_element_by_tag_name('body').send_keys(Keys.F5)
print("Page Refreshing " + time.strftime("%I:%M:%S", time.localtime()))
refreshTime = noGameRefreshTime
else:
driver.get(url)
print("Page Fetching " + time.strftime("%I:%M:%S", time.localtime()))
# Wait for page to load and fetch game info
try:
gameNameElem = WebDriverWait(driver, loadTime).until(EC.presence_of_element_located((By.CLASS_NAME, 'now-playing__details__line1__name'))) # Name
imageElem = WebDriverWait(driver, loadTime).until(EC.presence_of_element_located((By.CLASS_NAME, 'now-playing__thumbnail'))) # Image
gameUrlElem = WebDriverWait(driver, loadTime).until(EC.presence_of_element_located((By.CLASS_NAME, 'now-playing__details__store-link'))) # Url
consoleElem = WebDriverWait(driver, loadTime).until(EC.presence_of_element_located((By.CLASS_NAME, 'now-playing__details__line1'))) # Console
#if (gameNameElem is not None):
gameName = gameNameElem.text
# DOM differs between PS3/No Game and PS4 games
try:
imageSrc = imageElem.find_element_by_tag_name('img').get_attribute('src') # PS4
except:
try:
imageSrc = imageElem.find_element_by_tag_name('span').text # PS3 or no game
except:
print("Cannot resolve image")
gameUrl = gameUrlElem.find_element_by_tag_name('a').get_attribute('href')
console = consoleElem.find_element_by_tag_name('div').get_attribute('aria-label')
# If no game is being played reset now playing status
if (gameName == ""):
await resetNowPlaying()
# If all the try statements have executed and the game name is new, update Discord's status
elif (gameName != oldGameName):
print("Now playing a game")
if (alreadyFetched):
refreshTime = inGameRefreshTime # Wait longer between page refreshes when in-game
await updatePresence()
oldGameName = gameName
except TimeoutException:
print("Profile page timeout")
except AttributeError:
traceback.print_exc()
except NoSuchElementException:
print("Elements not found, or page has not loaded properly")
await resetNowPlaying()
alreadyFetched = True # Ensure the page simply refreshes the second time this is run
def runDiscord(client):
global userToken
@client.event
async def on_ready():
global clientReady
print('Logged into Discord as')
print(client.user.name)
print(client.user.id)
print('-----')
clientReady = True
client.run(userToken, bot=False)
# Called to update the "Now Playing" status in Discord
async def updatePresence():
global gameName, gameUrl, imageSrc, console, client, clientReady, twitchUrl
if (clientReady):
timestampsDict = {"start": int(time.time()) * 1000}
print("Updating Discord status...")
await client.change_presence(activity=discord.Activity(name=gameName,
url=twitchUrl,
type=discord.ActivityType.playing,
state="In-Game (" + console + ")",
timestamps=timestampsDict))
else:
print("Client not yet ready, cannot update status")
await resetNowPlaying()
def main():
global client
chromeSetup()
# Set up the thread for refreshing the webpage
loop = asyncio.get_event_loop()
loop.create_task(refreshThread())
# Set up Discord
client = discord.Client()
runDiscord(client)
main()
```
|
{
"source": "jdfreder/ipython-bootstrapbox",
"score": 3
}
|
#### File: ipython-bootstrapbox/bootstrapbox/bootstrapbox.py
```python
from IPython.html.widgets import Box
from IPython.utils.traitlets import Bool, Int, Unicode, CUnicode
class ResponsiveBox(Box):
_view_module = Unicode('nbextensions/bootstrapbox/bootstrapbox', sync=True)
visible_xs_block = Bool(False, sync=True)
visible_xs_inline = Bool(False, sync=True)
visible_xs_inline_block = Bool(False, sync=True)
visible_sm_block = Bool(False, sync=True)
visible_sm_inline = Bool(False, sync=True)
visible_sm_inline_block = Bool(False, sync=True)
visible_md_block = Bool(False, sync=True)
visible_md_inline = Bool(False, sync=True)
visible_md_inline_block = Bool(False, sync=True)
visible_lg_block = Bool(False, sync=True)
visible_lg_inline = Bool(False, sync=True)
visible_lg_inline_block = Bool(False, sync=True)
hidden_xs = Bool(False, sync=True)
hidden_sm = Bool(False, sync=True)
hidden_md = Bool(False, sync=True)
hidden_lg = Bool(False, sync=True)
visible_print_block = Bool(False, sync=True)
visible_print_inline = Bool(False, sync=True)
visible_print_inline_block = Bool(False, sync=True)
hidden_print = Bool(False, sync=True)
class BootstrapContainer(ResponsiveBox):
_view_name = Unicode('BootstrapContainerView', sync=True)
fluid = Bool(True, sync=True, help="Uses percents instead of pixels for column widths. Ensures proper proportions for key screen resolutions and devices.")
width = CUnicode('100%', sync=True)
class BootstrapRow(ResponsiveBox):
_view_name = Unicode('BootstrapRowView', sync=True)
class BootstrapCol(ResponsiveBox):
_view_name = Unicode('BootstrapColView', sync=True)
def _validate_change(self, name, old, new):
setattr(self, name, new if new is None else min(max(1, new), 12))
xs_width = Int(None, sync=True, allow_none=True)
sm_width = Int(None, sync=True, allow_none=True)
md_width = Int(None, sync=True, allow_none=True)
lg_width = Int(None, sync=True, allow_none=True)
_xs_width_changed = _validate_change
_sm_width_changed = _validate_change
_md_width_changed = _validate_change
_lg_width_changed = _validate_change
xs_offset = Int(None, sync=True, allow_none=True)
sm_offset = Int(None, sync=True, allow_none=True)
md_offset = Int(None, sync=True, allow_none=True)
lg_offset = Int(None, sync=True, allow_none=True)
_xs_offset_changed = _validate_change
_sm_offset_changed = _validate_change
_md_offset_changed = _validate_change
_lg_offset_changed = _validate_change
xs_push = Int(None, sync=True, allow_none=True)
sm_push = Int(None, sync=True, allow_none=True)
md_push = Int(None, sync=True, allow_none=True)
lg_push = Int(None, sync=True, allow_none=True)
_xs_push_changed = _validate_change
_sm_push_changed = _validate_change
_md_push_changed = _validate_change
_lg_push_changed = _validate_change
xs_pull = Int(None, sync=True, allow_none=True)
sm_pull = Int(None, sync=True, allow_none=True)
md_pull = Int(None, sync=True, allow_none=True)
lg_pull = Int(None, sync=True, allow_none=True)
_xs_pull_changed = _validate_change
_sm_pull_changed = _validate_change
_md_pull_changed = _validate_change
_lg_pull_changed = _validate_change
```
|
{
"source": "jdfreder/ipython-crossnotebook",
"score": 2
}
|
#### File: jdfreder/ipython-crossnotebook/setup.py
```python
try:
from setuptools import setup
from setuptools.command.install import install
except ImportError:
from distutils.core import setup
from distutils.core.command.install import install
class InstallCommand(install):
"""Install as noteboook extension"""
develop = False
def install_extension(self):
from os.path import dirname, abspath, join
from IPython.html.nbextensions import install_nbextension
from IPython.html.services.config import ConfigManager
print("Installing nbextension ...")
crossnotebook = join(dirname(abspath(__file__)), 'crossnotebook', 'js')
install_nbextension(crossnotebook, destination='crossnotebook', symlink=self.develop, user=True)
print("Enabling the extension ...")
cm = ConfigManager()
cm.update('notebook', {"load_extensions": {"crossnotebook/init": True}})
def run(self):
print "Installing Python module..."
install.run(self)
# Install Notebook extension
self.install_extension()
class DevelopCommand(InstallCommand):
"""Install as noteboook extension"""
develop = True
from glob import glob
setup(
name='crossnotebook',
version='0.1',
description='Plugin that enables multicell selection and copying and pasting cells between notebooks.',
author='<NAME>',
author_email='<EMAIL>',
license='New BSD License',
url='https://github.com/jdfreder/ipython-crossnotebook',
keywords='data visualization interactive interaction python ipython widgets widget',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License'],
cmdclass={
'install': InstallCommand,
'develop': DevelopCommand,
}
)
```
|
{
"source": "jdfreder/ipython-d3networkx",
"score": 2
}
|
#### File: ipython-d3networkx/d3networkx/widget.py
```python
from IPython.html import widgets # Widget definitions
from IPython.utils.traitlets import Unicode, CInt, CFloat # Import the base Widget class and the traitlets Unicode class.
# Define our ForceDirectedGraph and its target model and default view.
class ForceDirectedGraph(widgets.DOMWidget):
_view_module = Unicode('nbextensions/d3networkx/widget', sync=True)
_view_name = Unicode('D3ForceDirectedGraphView', sync=True)
width = CInt(400, sync=True)
height = CInt(300, sync=True)
charge = CFloat(270., sync=True)
distance = CInt(30., sync=True)
strength = CInt(0.3, sync=True)
def __init__(self, eventful_graph, *pargs, **kwargs):
widgets.DOMWidget.__init__(self, *pargs, **kwargs)
self._eventful_graph = eventful_graph
self._send_dict_changes(eventful_graph.graph, 'graph')
self._send_dict_changes(eventful_graph.node, 'node')
self._send_dict_changes(eventful_graph.adj, 'adj')
def _ipython_display_(self, *pargs, **kwargs):
# Show the widget, then send the current state
widgets.DOMWidget._ipython_display_(self, *pargs, **kwargs)
for (key, value) in self._eventful_graph.graph.items():
self.send({'dict': 'graph', 'action': 'add', 'key': key, 'value': value})
for (key, value) in self._eventful_graph.node.items():
self.send({'dict': 'node', 'action': 'add', 'key': key, 'value': value})
for (key, value) in self._eventful_graph.adj.items():
self.send({'dict': 'adj', 'action': 'add', 'key': key, 'value': value})
def _send_dict_changes(self, eventful_dict, dict_name):
def key_add(key, value):
self.send({'dict': dict_name, 'action': 'add', 'key': key, 'value': value})
def key_set(key, value):
self.send({'dict': dict_name, 'action': 'set', 'key': key, 'value': value})
def key_del(key):
self.send({'dict': dict_name, 'action': 'del', 'key': key})
eventful_dict.on_add(key_add)
eventful_dict.on_set(key_set)
eventful_dict.on_del(key_del)
```
|
{
"source": "jdfreder/notebook_cherry_picker",
"score": 3
}
|
#### File: jdfreder/notebook_cherry_picker/cherry_picking_preprocessor.py
```python
from copy import deepcopy
from IPython.nbconvert.preprocessors import Preprocessor
from IPython.utils.traitlets import Unicode
class CherryPickingPreprocessor(Preprocessor):
expression = Unicode('True', config=True, help="Cell tag expression.")
def preprocess(self, nb, resources):
# Loop through each cell, remove cells that dont match the query.
for worksheet in nb.worksheets:
remove_indicies = []
for index, cell in enumerate(worksheet.cells):
if not self.validate_cell_tags(cell):
remove_indicies.append(index)
for index in remove_indicies[::-1]:
del worksheet.cells[index]
resources['notebook_copy'] = deepcopy(nb)
return nb, resources
def validate_cell_tags(self, cell):
if 'cell_tags' in cell['metadata']:
return self.eval_tag_expression(cell['metadata']['cell_tags'], self.expression)
return False
def eval_tag_expression(self, tags, expression):
# Create the tags as True booleans. This allows us to use python
# expressions.
for tag in tags:
exec tag + " = True"
# Attempt to evaluate expression. If a variable is undefined, define
# the variable as false.
while True:
try:
return eval(expression)
except NameError as Error:
exec str(Error).split("'")[1] + " = False"
```
|
{
"source": "jdfr/SimpleRefactoring",
"score": 2
}
|
#### File: jdfr/SimpleRefactoring/grokscrap.py
```python
from lxml import etree
import requests
class GrokScraper:
__slots__ = ['url', 'domain', 'proj', 'path', 'nresults', 'errors', 'getRevisionText']
def __init__(self, **kwargs):
#different grok versions might place the revision data in a different part of the DOM relative to the line elements, so use this as a means to customize the extraction code
self.getRevisionText = lambda occurrence: occurrence.getnext().iterchildren().__next__()
for name, val in kwargs.iteritems():
if name in self.__slots__:
setattr(self, name, val)
else:
raise RuntimeError('Invalid attribute <%s> for class GrokScraper!' % str(name))
def getOcurrences(self, term):
table = dict()
if self.domain != '':
domain = '/' + self.domain
else:
domain = ''
url = "%s%s/search?q=%s&project=%s&defs=&path=%s&hist=&n=%d" % (self.url, domain, term, self.proj, self.path, self.nresults)
r = requests.get(url)
if r.ok:
root = etree.HTML(r.text.encode('ascii', errors='replace'))
xp = ".//tt/a/b[text()='%s']/.." % term
occs = root.xpath(xp)
for occ in occs:
encoded = occ.attrib['href']
split = encoded.split('#', 1)
path = split[0]
line = int(split[1])
if path in table:
table[path].add(line)
else:
table[path] = set([line])
else:
handleError(url, r, self.errors)
return table
def getRevisions(self, table):
revisions = dict()
for f, lines in table.iteritems():
#This assumes that grok is responsive serving annotated sources for all affected ones. Thia might not be the case for some files, and these should be filtered out here
url = "%s%s?a=true" %(self.url, f)
r = requests.get(url)
if r.ok:
root = etree.HTML(r.text.encode('ascii', errors='replace'))
for line in lines:
xp = ".//a[@class='l' and @name='%d']" % line
occs = root.xpath(xp)
if (len(occs)==0):
#this is cheaper than using a more complex boolean condition like ".//a[(@class='l' or @class='hl') and @name='%d']"
xp = ".//a[@class='hl' and @name='%d']" % line
occs = root.xpath(xp)
#I wouldn't need this contraption if I could express in xpath how to locate an element relative to an adjacent one, something like ".//a[@class='l' and @name='%d']/following::span/a"
occ = self.getRevisionText(occs[0])
rev = occ.text
comment = occ.attrib['title']
if not rev in revisions:
revisions[rev] = comment.encode('utf-8').replace('\x0a', ' ').replace('<br/>', '\n')
else:
handleError(url, r, self.errors)
return revisions
def handleError(url, r, errors):
if errors!='ignore':
msg = "Error retrieving grok data for <%s>: %d %s" % (url, r.status_code, r.reason)
if errors=='raise':
raise RuntimeError()
elif errors=='print':
print msg
def printOcurrences(table):
print "NUMBER OF FILES: %d\n" % len(table)
for f in sorted(table.keys()):
lines = list(table[f])
lines.sort()
print "occurences in file <%s> in lines:\n %s" % (f, str(lines))
def printRevisions(revisions):
print "NUMBER OF REVISIONS: %d\n" % len(revisions)
i = 0
for rev, comment in revisions.iteritems():
print "---------------------------"
print "REVISION %d: %s" % (i, rev)
print comment
print "\n"
i=i+1
print "---------------------------"
#########################################################
if __name__=='__main__':
def BXR_SU():
gs = GrokScraper(url='http://bxr.su', domain='', proj='FreeBSD', path='', nresults=100, errors='print')
table = gs.getOcurrences('rt2561s')
printOcurrences(table)
def OPENGROK_LIBREOFFICE_ORG():
gs = GrokScraper(url='https://opengrok.libreoffice.org', domain='', proj='cppunit', path='', nresults=100, errors='print')
table = gs.getOcurrences('write')
printOcurrences(table)
revisions = gs.getRevisions(table)
printRevisions(revisions)
grokExamples = [BXR_SU, OPENGROK_LIBREOFFICE_ORG]
i = 0
for example in grokExamples:
print "EXAMPLE NUMBER %d FOR GROK SCRAPER: %s" % (i, example.__name__)
print "\n"
example()
i=i+1
```
#### File: jdfr/SimpleRefactoring/refactor.py
```python
from lxml import etree
import grokscrap as gs
import os
import subprocess as subp
import re
#This class allows for (very handy) re-entrant lists of command-line calls. All you need is to call startStep() at the beginning and make sure to call endStep() at the end only if there was no problem and the list doesn't have to be replayed. And, of course, do not change the list across runs, at least in the parts already executed, or hard-to-debug problems will ensue
class ExecuteContext:
def __init__(self, execute=True, verbose=False, stampName=os.path.join(os.getcwd(), 'step.txt')):
self.execute = execute
self.verbose = verbose
self.resumeFrom = None
self.stampName = stampName
self.step = 0
def writeStep(self, n):
self.step = n
with open(self.stampName, 'w') as f:
f.write(str(n))
def startStep(self):
if os.path.isfile(self.stampName):
with open(self.stampName, 'r') as f:
self.resumeFrom = int(f.read())
else:
self.resumeFrom = None
self.writeStep(0)
def endStep(self):
if os.path.isfile(self.stampName):
os.remove(self.stampName)
def checkStep(self):
if (self.execute):
if self.resumeFrom==self.step:
self.resumeFrom=None
return self.execute and self.resumeFrom is None
def updateStep(self):
self.writeStep(self.step+1)
def actualDoCommand(self, command, **kwargs):
#this is intended to actually do subprocess call, as some esoteric special-needs tools might be so picky about how exactly they are invoked that you can't yjust assume that a straight subptocess.Popen/call will work
#default implementation just uses regular subprocess.call
return subp.call(command, **kwargs)
def doCommand(self, command, **kwargs):
if self.verbose:
print ' '.join(command)
if self.checkStep():
ret = self.actualDoCommand(command, **kwargs)
if ret!=0:
raise RuntimeError("Error in command <%s>" % ' '.join(command))
self.updateStep()
def doCd(self, dr):
if self.checkStep() or True: #as chdirs have critically important side-effects, they have to be replayed no matter what
os.chdir(dr)
self.updateStep()
if self.verbose:
print "cd "+dr
#simple but high-level driver for the refactor binary, it basically does housekeeping and high-level planning; the binary does the grunt work.
class ExternalRefactor:
def __init__(self,
context,
translatepath=lambda x: x,
compiler_args_base=[],
compiler_args=lambda x: ["-I."],
command='./simpleRefactor',
exedir=os.getcwd(),
cppextensions=('.cpp',),
hppextensions=('.hpp', '.h'),
getCppForHpp=None,
grokscraper=None,
isxmlfile=lambda x: x.endswith(('.xml',)),
xml_xpath=None,
execute=True,
verbose=True):
self.context = context
self.translatepath = translatepath
self.compiler_args_base = compiler_args_base
self.compiler_args = compiler_args
self.command = command
self.exedir = exedir
self.cppextensions = cppextensions
self.hppextensions = hppextensions
self.getCppForHpp = getCppForHpp
self.grokscraper = grokscraper
self.xml_xpath = xml_xpath
self.isxmlfile = isxmlfile
self.execute = execute
self.verbose = verbose
self.template = lambda term, value, filepath: [self.command, '--term=%s' % term, '--value=%s' % value, '--overwrite=true', filepath, '--']
#do not forget to call this one if you want to make sure to also refactor instances that only apper in header files!
def addCppFilesForHppFiles(self, table):
for filename in table:
if filename.endswith(self.hppextensions):
cpp = None
if self.getCppForHpp is not None:
cpp = self.getCppForHpp(filename) #paths returned here should be consistent with grok, rather than with the codebase
elif self.grokscraper is not None:
cpp = self.getCppForHppWithGrok(filename, table)
if cpp is None:
raise RuntimeError("Could not find a C++ source file including the header %s!!!!!" % filename)
if cpp in table:
table[cpp].add(filename)
else:
table[cpp] = set([filename])
def getCppForHppWithGrok(self, hppfile, table):
filename = hppfile.rsplit('/', 1)[1]
hpptable = self.grokscraper.getOcurrences("include "+filename)
#first check if the file is already in the table
for grokfilepath in hpptable:
if grokfilepath.endswith(self.cppextensions) and grokfilepath in table:
return grokfilepath
#this is a quite dumb brute-force approach, it might be far better to avoid greedy strategies and compute a minimal set of cpps for all hpps with ocurrences; however that might be inefficient for codebases with massively nested sets of header files
for grokfilepath in hpptable:
if grokfilepath.endswith(self.cppextensions):
return grokfilepath
for grokfilepath in hpptable:
if grokfilepath.endswith(self.hppextensions):
ret = self.getCppForHppWithGrok(grokfilepath)
if ret is not None:
return ret
#there might be headers not included anywhere in the codebase (conceivably, they might be included by source files generated during the build process). If that's the case, here we should add some code to (a) use those generated sources (after compilation) or (b) generate some phony C++ source file that just includes the header and feed it to the binary tool
return None
def doCPPFile(self, term, value, filepath):
commandline = self.template(term, value, filepath)+self.compiler_args_base+self.compiler_args(filepath)
if self.verbose:
print 'ON %s EXECUTE %s' % (self.exedir, ' '.join(commandline))
if self.execute:
self.context.doCommand(commandline, cwd=self.exedir)
def doXMLFile(self, term, filepath):
if self.verbose:
print 'ON %s REMOVE REFERNCES TO %s' % (filepath, term)
if self.execute:
if self.context.checkStep():
root = etree.parse(filepath)
res = root.xpath(self.xml_xpath % term)
if len(res)!=1:
print "Error locating config value <%s> in XML file <%s>!!!!!" % (term, filepath)
else:
toremove = res[0]
toremove.getparent().remove(toremove)
with open(filepath, 'w') as svp:
svp.write(etree.tostring(root))
self.context.updateStep()
#main function, does the refactoring
def doFilesFromTable(self, table, term, value):
if self.verbose:
print "PROCESSING FILES FROM TERMS FOUND WITH OPENGROK\n"
for grokfilepath in sorted(table.keys()):
lines = list(table[grokfilepath])
lines.sort()
filepath = self.translatepath(grokfilepath)
if grokfilepath.endswith(self.cppextensions):
if self.verbose:
print " TERM <%s> TO BE REFACTORED IN CPP FILE <%s> in line(s) %s" % (term, filepath, lines)
self.doCPPFile(term, value, filepath)
if grokfilepath.endswith(self.hppextensions):
if self.verbose:
print " TERM <%s> FOUND IN HEADER FILE <%s> in line(s) %s (refactored as part of a cpp file)" % (term, filepath, lines)
elif self.isxmlfile(filepath):
if self.verbose:
print " TERM <%s> TO BE REFACTORED IN XML FILE <%s> in line(s) %s" % (term, filepath, lines)
self.doXMLFile(term, filepath)
#an example of how a high-level funtion to use GrokScraper and ExternalRefactor might look like
def doFilesFromGrok(self, term, value, printRevs=True):
table = grokscraper.getOcurrences(term)
self.addCppFilesForHppFiles(table)
self.context.startStep()
self.doFilesFromTable(table, term, value)
self.context.endStep()
if printRevs:
print ""
revisions = self.grokscraper.getRevisions(table)
self.grokscraper.printRevisions(revisions)
#helper funtion to be used as part of function compiler_args (one of the members of ExternalRefactor): for a .d file (the one generated by the compiler detailing ALL files #included into the compilation unit), heuristically generate a list of directives to include the relevant directories
def heuristicIncludeDirListFromDFile(dfilepath, rootDirKeyword=['include/'], hppextensions=(('.hpp', '.h')), toExclude=[], prefix=lambda x: ''):
with open(dfilepath, 'r') as fd:
text = fd.read()
dirs_unfiltered = set()
dirs_filtered = set()
for m in re.finditer('[_\./a-zA-Z0-9-]+', text):
path = m.group(0)
if path.endswith(hppextensions):
dr = path.rsplit('/', 1)[0]
if not dr.endswith(hppextensions) and not dr in dirs_unfiltered:
dirs_unfiltered.add(dr)
toAdd = True
for excl in toExclude:
if dr.startswith(excl):
toAdd = False
break
if toAdd:
processed = False
for key in rootDirKeyword:
pos = dr.find(key)
if pos!=-1:
processed = True
dr = dr[:pos+len(key)]
break
if dr[-1]=='/':
dr = dr[:-1]
dirs_filtered.add(dr)
return ['-I'+prefix(dr)+dr for dr in dirs_filtered]
#########################################################
if __name__=='__main__':
translatepath = lambda x: os.path.join('examples', x)
context = ExecuteContext(verbose=True)
external = ExternalRefactor(context,
translatepath=translatepath,
compiler_args=lambda x: ["-I./examples/include"], #if we used a non-installed binary release of clang (i.e. a clang release we just unzipped somewhere where we happen to have write permissions) to compile the binary tool, we should put in this list an additionsl include pointing to the location where clang's stddef.h lives inside the clang directory tree
xml_xpath="/config/value[@name='%s']",
execute=True,
verbose=True)
table = {'test.cpp':[], 'config.xml':[]}
external.doFilesFromTable(table, "UseSpanishLanguage", "true")
```
|
{
"source": "jdfr/TopoReg",
"score": 2
}
|
#### File: jdfr/TopoReg/imreg.py
```python
from __future__ import division, print_function
import numpy
from numpy.fft import fft2, ifft2
from numpy import log
import scipy.ndimage.interpolation as ndii
import scipy.ndimage.filters as scifil
#__version__ = '2013.01.18'
#__docformat__ = 'restructuredtext en'
#__all__ = ['translationSimple', 'similarity']
import matplotlib.pyplot as plt
def showTwoImages(img1, shift1, img2, shift2, txt):
# fig = plt.figure(); plt.imshow(img1, origin='lower')
# fig = plt.figure(); plt.imshow(img2, origin='lower')
fig = plt.figure()
fig.suptitle(txt)
l1 = shift1[1]
r1 = img1.shape[1]+shift1[1]
b1 = shift1[0]
t1 = img1.shape[0]+shift1[0]
l2 = shift2[1]
r2 = img2.shape[1]+shift2[1]
b2 = shift2[0]
t2 = img2.shape[0]+shift2[0]
plt.imshow(img1, extent=(l1, r1, b1, t1), origin='lower')
plt.imshow(img2, extent=(l2, r2, b2, t2), origin='lower', alpha=0.7)
ax = fig.gca()
ax.set_xlim([min(l1, l2), max(r1, r2)])
ax.set_ylim([min(b1, b2), max(t1, t2)])
def zeropad2(x, shap):
m, n = x.shape
p, q = shap
assert p > m
assert q > n
tb = numpy.zeros(((p - m) / 2, n))
lr = numpy.zeros((p, (q - n) / 2))
x = numpy.append(tb, x, axis = 0)
x = numpy.append(x, tb, axis = 0)
x = numpy.append(lr, x, axis = 1)
x = numpy.append(x, lr, axis = 1)
return x
#using a function to find peaks with the same parameters as in <NAME>'s
#findPeaks() function in Stitching2D.java (stitiching plugin for imageJ/Fiji,
# https://github.com/fiji/Stitching )
def findPeaks(matrix, numPeaks):
#computer maxima over the 8-neighborhood, wraping for edges (our matrices are fourier transforms, so that's the thing to do)
maxbool = matrix==scifil.maximum_filter(matrix, size=(3,3), mode='wrap')
values = matrix[maxbool]
rows, cols = numpy.nonzero(maxbool)
#order the peaks
indexes = numpy.argsort(values)
# z=numpy.column_stack((rows[indexes], cols[indexes]))
#get the $numPeaks highest peaks
indexes = indexes[-min(numPeaks, values.size):]
#put the highest peaks in decreasing order
indexes = indexes[::-1]
rows = rows[indexes]
cols = cols[indexes]
values = values[indexes]
return rows, cols, values
#shift is applied to img2 w.r.t. img1
def getAlignedSubmatrices(img1, img2, shft):
if shft[0]>=0:
selrowinit1 = shft[0]
selrowinit2 = 0
selrowend1 = img1.shape[0]
selrowend2 = img2.shape[0]-shft[0]
else:
selrowinit1 = 0
selrowinit2 = -shft[0]
selrowend1 = img1.shape[0]+shft[0]
selrowend2 = img2.shape[0]
if shft[1]>=0:
selcolinit1=shft[1]
selcolinit2 = 0
selcolend1 = img1.shape[1]
selcolend2 = img2.shape[1]-shft[1]
else:
selcolinit1 = 0
selcolinit2 = -shft[1]
selcolend1 = img1.shape[1]+shft[1]
selcolend2 = img2.shape[1]
return img1[selrowinit1:selrowend1, selcolinit1:selcolend1], img2[selrowinit2:selrowend2, selcolinit2:selcolend2]
#adapted from openPIV: https://github.com/OpenPIV/openpiv-python/blob/master/openpiv/pyprocess.py
#but, instead of refining over the naive algorithm used in openPIV, use the position
#we have computed previously
def find_subpixel_peak_position( img, default_peak_position, subpixel_method = 'gaussian'):
# the peak locations
peak1_i, peak1_j = default_peak_position
try:
# the peak and its neighbours: left, right, down, up
# c = img[peak1_i, peak1_j]
# cl = img[peak1_i-1, peak1_j]
# cr = img[peak1_i+1, peak1_j]
# cd = img[peak1_i, peak1_j-1]
# cu = img[peak1_i, peak1_j+1]
c = img[peak1_i, peak1_j]
cl = img[(peak1_i-1)%img.shape[0], peak1_j]
cr = img[(peak1_i+1)%img.shape[0], peak1_j]
cd = img[peak1_i, (peak1_j-1)%img.shape[1]]
cu = img[peak1_i, (peak1_j+1)%img.shape[1]]
# gaussian fit
if numpy.any ( numpy.array([c,cl,cr,cd,cu]) < 0 ) and subpixel_method == 'gaussian':
subpixel_method = 'centroid'
try:
if subpixel_method == 'centroid':
subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
elif subpixel_method == 'gaussian':
subp_peak_position = (peak1_i + ( (log(cl)-log(cr) )/( 2*log(cl) - 4*log(c) + 2*log(cr) )),
peak1_j + ( (log(cd)-log(cu) )/( 2*log(cd) - 4*log(c) + 2*log(cu) )))
elif subpixel_method == 'parabolic':
subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),
peak1_j + (cd-cu)/(2*cd-4*c+2*cu))
except:
subp_peak_position = default_peak_position
except IndexError:
subp_peak_position = default_peak_position
return subp_peak_position
#test the cross-correlation (adapted from testCrossCorrelation() in
#Stitching2D.java (stitiching plugin for imageJ/Fiji, https://github.com/fiji/Stitching )
def testCrossCorrelation(img1, img2, shft, minratio):
sub1, sub2 = getAlignedSubmatrices(img1, img2, shft)
if sub1.size==0: #non-overlapping
return -numpy.inf
if sub1.size/float(img1.size)<minratio: #not enough overlap
return -numpy.inf
# if shft[1]<-200:
# showTwoImages(sub1, [0,0], sub2, [0,0], '')
dist1 = sub1-sub1.mean()
dist2 = sub2-sub2.mean()
covar = (dist1*dist2).mean()
std1 = numpy.sqrt((dist1**2).mean())
std2 = numpy.sqrt((dist2**2).mean())
if (std1 == 0) or (std2 == 0):
corrcoef = 0
#sqdiff = n.inf
else:
corrcoef = covar / (std1*std2)
# print ('testCrossCorrelation '+str(shft)+': '+str(corrcoef))
if numpy.isnan(corrcoef):
covar=covar
#sqdiff = ((sub1-sub2)**2).mean()
return corrcoef#, sqdiff
def bestShift(img1, img2, shifts, minratio):
corrcoefs = [testCrossCorrelation(img1, img2, shft, minratio) for shft in shifts]
# for s, c in zip(shifts, corrcoefs):
# if (s[1]<-450) and (c>0):#if c>0.6:
# showTwoImages(img1, [0,0], img2, s, str(s)+": "+str(c))
# x=numpy.column_stack((corrcoefs, numpy.array(shifts), shifts[:,1]<-2400))
# indexes = numpy.argsort(corrcoefs)
# indexes = indexes[::-1]
# xx=numpy.nonzero(numpy.logical_and(shifts[1]<-2500, shifts[1]>-2700))
if len(shifts)==0:
raise ValueError('Very strange, no peaks detected!')
if len(corrcoefs)==0:
raise ValueError('Very strange, no peaks detected (bis)!')
idx = numpy.argmax(corrcoefs)
return idx, corrcoefs
def translationSimple(im0, im1, subpixel=False):
"""Return translation vector to register images."""
shape = im0.shape
f0 = fft2(im0)
f1 = fft2(im1)
#ir = abs(ifft2((f0 * f1.conjugate()) / (abs(f0) * abs(f1))))
lens0 = abs(f0)
lens1 = abs(f0)
ff0=f0/lens0
ff1=f1/lens1
ir = ifft2((ff0 * ff1.conjugate()))
ir = abs(ifft2)
zz= (abs(ff0) * abs(ff1))
ir = ir / zz
t0, t1 = numpy.unravel_index(numpy.argmax(ir), shape)
if t0 > shape[0] // 2:
t0 -= shape[0]
if t1 > shape[1] // 2:
t1 -= shape[1]
result = (t0, t1)
if subpixel:
result = find_subpixel_peak_position(ir, result)
return numpy.array(result)
import register_images as imr
def translationTestPeaks(im0, im1, numPeaks=20, refinement=True,subpixel=False, scaleSubPixel=None, minratio=0.01):
"""Return translation vector to register images."""
# im0 = scifil.laplace(im0)
# im1 = scifil.laplace(im1)
shape = im0.shape
f0 = fft2(im0)
f1 = fft2(im1)
ir = abs(ifft2((f0 * f1.conjugate()) / (abs(f0) * abs(f1))))
# lens0 = abs(f0)
# lens1 = abs(f0)
# ff0=f0/lens0
# ff1=f1/lens1
# ir = ifft2((ff0 * ff1.conjugate()))
# ir = abs(ir)
## zz= (abs(ff0) * abs(ff1))
## ir = ir / zz
rows, cols, values = findPeaks(ir, numPeaks)
rows[rows>(shape[0] // 2)] -= shape[0]
cols[cols>(shape[1] // 2)] -= shape[1]
#each peak in fact is four peaks: the following is adapted from the first for loop
# of the function verifyWithCrossCorrelation() of PhaseCorrelation.java in
# http://trac.imagej.net/browser/ImgLib/imglib1/algorithms/src/main/java/mpicbg/imglib/algorithm/fft/PhaseCorrelation.java?rev=e010ba0694e985c69a4ade7d846bef615e4e8043
rows2 = rows.copy()
cols2 = cols.copy()
below0 = rows2<0
rows2[below0] += shape[0]
rows2[numpy.logical_not(below0)] -= shape[0]
below0 = cols2<0
cols2[below0] += shape[1]
cols2[numpy.logical_not(below0)] -= shape[1]
allshifts = numpy.column_stack((numpy.concatenate((rows, rows, rows2, rows2)),
numpy.concatenate((cols, cols2, cols, cols2))))
idx, corrcoefs = bestShift(im0, im1, allshifts, minratio)
corrcoef = corrcoefs[idx]
shft = numpy.array(allshifts[idx])
# print('raro: '+str(shft)+', '+str(corrcoef))
peak = values[idx % values.size]
# refinement = True
#
# if refinement:
# num=1
# dsp = numpy.arange(-num, num+1).reshape((1,-1))
# dspr = numpy.repeat(dsp, dsp.size, axis=1)
# dspc = numpy.repeat(dsp, dsp.size, axis=0)
# shifts = numpy.column_stack((dspr.ravel()+shft[0], dspc.ravel()+shft[1]))
# print('before refinement: '+str(shft)+', '+str(corrcoef))
# idx, corrcoefs = bestShift(im0, im1, shifts, minratio)
# corrcoef = corrcoefs[idx]
# shft = numpy.array(shifts[idx])
# print('after refinement: '+str(shft)+', '+str(corrcoef))
# print('neighbourhood: ')
# for k in xrange(shifts.shape[0]):
# print(str(shifts[k])+': '+str(corrcoefs[k]))
if subpixel:
if (scaleSubPixel is not None) and (scaleSubPixel>=2):
sub0, sub1 = getAlignedSubmatrices(im0, im1, shft)
finer = numpy.array(imr.dftregistration(sub0,sub1,usfac=scaleSubPixel))
shft = shft+finer
else:
shft = numpy.array(find_subpixel_peak_position(ir, shft))
# finershft = numpy.array(find_subpixel_peak_position(ir, shft))
# if (scaleSubPixel is not None) and (scaleSubPixel>=2):
# #work only with the matching submatrices, to remove spurious peaks
# sub0, sub1 = getAlignedSubmatrices(im0, im1, shft)
# finer = numpy.array(imr.dftregistration(sub0,sub1,usfac=scaleSubPixel))
# finershftIMR = shft+finer
# discreps = finershft-finershftIMR
## print('DISCREPANCIES A: '+str(finershft))
## print('DISCREPANCIES B: '+str(finershftIMR))
# if (numpy.abs(discreps)<0.5).all():
# #we only trust register_images if the expected shift is around the same
# #as the one computed from fitting a gaussian to the peak
# finershft = finershftIMR
# shft=finershft
return [shft, corrcoef, peak]
def translationUpsamplingTestPeaks(im0, im1, scale, numPeaks, subpixel=False, minratio=0.01):
#http://www.velocityreviews.com/threads/magic-kernel-for-image-zoom-resampling.426518/
#http://johncostella.webs.com/magic/
if scale>1:
im0 = ndii.zoom(im0, scale, mode='wrap')
im1 = ndii.zoom(im1, scale, mode='wrap')
shft = translationTestPeaks(im0, im1, numPeaks, subpixel)
if scale>1:
shft[0] /= scale
return shft
#import sys
#if sys.gettrace() is not None:
# print('debugging')
# import tifffile as tff
# img0 = tff.imread('/home/josedavid/3dprint/software/pypcl/corrected.0.tif')
# img1 = tff.imread('/home/josedavid/3dprint/software/pypcl/corrected.1.tif')
## import image_registration as ir
## result = ir.register_images(img0, img1, usfac=1)
# imA = img0-img0.mean()
# imB = img1-img1.mean()
# res = translationTestPeaks(imA, imB, numPeaks=100, subpixel=True, scaleSubPixel=1000, minratio=0.01)
# res=res
```
#### File: jdfr/TopoReg/rotations.py
```python
import numpy as n
import matplotlib.pyplot as plt
#def fitPlaneLSQR(XYZ):
# A = n.column_stack((XYZ[:,0:2], n.ones((XYZ.shape[0],))))
# coefs, residuals, rank, s = n.linalg.lstsq(A, XYZ[:,2])
# plane = n.array([coefs[1], coefs[2], -1, coefs[0]])
# return plane, coefs, residuals, rank, s, A, XYZ[:,2]
#fit plane to points. Points are rows in the matrix data
#taken from http://stackoverflow.com/questions/15959411/fit-points-to-a-plane-algorithms-how-to-iterpret-results
def fitPlaneSVD(xyz):
[rows,cols] = xyz.shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
p = (n.ones((rows,1)))
AB = n.hstack([xyz,p])
[u, d, v] = n.linalg.svd(AB,0)
B = v[3,:] # Solution is last column of v.
#showPointsAndPlane(XYZ[::100,:], B)
# nn = n.linalg.norm(B[0:3])
# B = B / nn
# return B[0:3] #return just the normal vector
return B #returns [A,B,C,D] where the plane is Ax+By+Cz+D=0
from pylab import cm
import write3d as w
#plane is plane[0]*X+plane[1]*Y+plane[2]*Z+plane[3]=0
def showPointsAndPlane(xyz, fac, plane, values=None, vmin=None, vmax=None):
showF=True
if showF:
xx, yy = n.meshgrid([n.nanmin(xyz[:,0]),n.nanmax(xyz[:,0])], [n.nanmin(xyz[:,1]),n.nanmax(xyz[:,1])])
zz=-(xx*plane[0]+yy*plane[1]+plane[3])/plane[2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if values is None:
if showF:
ax.scatter(xyz[:,0], xyz[:,1], xyz[:,2])
else:
vals = values.copy()
vals-=vmin#vals.min()
vals/=vmax#vals.max()
colors = cm.jet(values)
if showF:
ax.scatter(xyz[::fac,0], xyz[::fac,1], xyz[::fac,2], c=colors[::fac])
w.writePLYFileWithColor('/home/josedavid/3dprint/software/pypcl/strawlab/cent/cosa.ply', xyz, colors*255)
if showF:
ax.plot_surface(xx, yy, zz, alpha=0.7, color=[0,1,0])
ax.set_xlim(n.nanmin(xyz[:,0]),n.nanmax(xyz[:,0]))
ax.set_ylim(n.nanmin(xyz[:,1]),n.nanmax(xyz[:,1]))
ax.set_zlim(n.nanmin(xyz[:,2]),n.nanmax(xyz[:,2]))
plt.show()
#return colors, vals
#points are rows in first matrix
#plane is specified by [A,B,C,D] where the plane is Ax+By+Cz+D=0
#taken from http://stackoverflow.com/questions/12299540/plane-fitting-to-4-or-more-xyz-points
def distancePointsToPlane(points, plane):
return ((points*(plane[0:3].reshape(1,3))).sum(axis=1) + plane[3]) / n.linalg.norm(plane[0:3])
def errorPointsToPlane(points, plane):
return n.abs(distancePointsToPlane(points, plane))
def planeEquationFrom3Points(points):
det = n.linalg.det(points)
ABCD = n.array([0.0,0.0,0.0,1.0]) #the following is parametric in D. we choose D=1.0
for k in xrange(3):
mat = points.copy()
mat[:,k] = 1
ABCD[k] = -n.linalg.det(mat)*ABCD[3]/det
return ABCD
#get rotation vector A onto onto vector B
#from http://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311
def rotateVectorToVector(A,B):
a = A.reshape((3,))/n.linalg.norm(A)
b = B.reshape((3,))/n.linalg.norm(B)
v = n.cross(a, b)
s = n.linalg.norm(v) #sin of angle
c = n.dot(a, b) #cos of angle
V = n.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) #skew-symmetric cross-product matrix of v
R = n.identity(3)+V+n.dot(V, V)*((1-c)/(s*s))
return R
#return rigid transformation between two sets of points
#http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
#see also http://robokitchen.tumblr.com/post/67060392720/finding-a-rotation-quaternion-from-two-pairs-of
#see also http://nghiaho.com/?page_id=671
#see also http://igl.ethz.ch/projects/ARAP/svd_rot.pdf
#see also http://en.wikipedia.org/wiki/Kabsch_algorithm
def findTransformation(v0, v1):
"""rigid transformation to convert v0 to v1. This allows any number of
vectors higher than v0.shape[1], doing a best fit"""
#local copy
v0 = n.array(v0, dtype=n.float64, copy=True)
v1 = n.array(v1, dtype=n.float64, copy=True)
ndims = v0.shape[0]
# move centroids to origin
t0 = -n.mean(v0, axis=1)
M0 = n.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -n.mean(v1, axis=1)
M1 = n.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
# Rigid transformation via SVD of covariance matrix
u, s, vh = n.linalg.svd(n.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = n.dot(u, vh)
if n.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= n.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = n.identity(ndims+1)
M[:ndims, :ndims] = R
# move centroids back
M = n.dot(n.linalg.inv(M1), n.dot(M, M0))
M /= M[ndims, ndims]
return M
def inverseTransform(M):
P = M[0:3,0:3]
T = M[0:3,3]
P1 = n.linalg.inv(P)
M1 = n.vstack((n.column_stack((P1, n.dot(-P1, T))), n.array([0.0, 0.0, 0.0, 1])))
return M1
def inverseTransformT(M):
return inverseTransform(M.T).T
def doTransform(M, xyz):
"""transform in the usual way as done in textbooks (coordinate vectors are vertical)"""
xyz1 = n.vstack((xyz, n.ones((1, xyz.shape[1]))))
return n.dot(M, xyz1)[0:-1,:]
def doTransformT(xyzT, MT):
"""transform for coordinate vectors as used by here (coordinate vectors are rows)"""
xyz1 = n.column_stack((xyzT, n.ones((xyzT.shape[0],))))
return n.dot(xyz1, MT)[:,0:-1]
def findTransformation_RANSAC(data):
"""Calls findTransformation inside RANSAC (a transformation is a model)"""
s = data.shape[1]/2
# if data.shape[0]!=3:
# print 'Mira findTransformation_RANSAC: '+str(data.shape)
# return findTransformation(data[:,:s].T, data[:,s:].T)
return findTransformation(data[:,:s].T, data[:,s:].T).T
def get_error_RANSAC(data, model):
"""Gets error of a model (a transformation) inside RANSAC"""
s = data.shape[1]/2
# set1 = data[:,:s].T
# set2 = data[:,s:].T
# transf = doTransform(model, set1)
# err = n.sum((transf-set2)**2, axis=0)
set1 = data[:,:s]
set2 = data[:,s:]
transf = doTransformT(set1, model)
err = n.sum((transf-set2)**2, axis=1)
return err
def findTranslation_RANSAC(data):
"""Calls findTransformation inside RANSAC (a transformation is a model)"""
raise Exception("This is untested!!!")
s = data.shape[1]/2
rot = n.identity(4)
#rot[0:3,3] = -n.mean(data[:,s:]-data[:,:s], axis=0)
rot[3,0:3] = -n.mean(data[:,s:]-data[:,:s], axis=0)
return rot
#def get_errordisp_RANSAC(data, model):
# """Gets error of a model (a transformation) inside RANSAC"""
# s = data.shape[1]/2
## set1 = data[:,:s].T
## set2 = data[:,s:].T
## transf = doTransform(model, set1)
## err = n.sum((transf-set2)**2, axis=0)
# set1 = data[:,:s]
# set2 = data[:,s:]
# err = n.sum(((set1+model)-set2)**2, axis=1)
# return err
def testPointsInsideBoundingBox(points, BB):
"""points: coordinates in row format
BB: bounding box"""
#
# Both arguments have to be two dimensional matrices, vectors will break
# the code. Returns a boolean matrix whose element [i,j] is the test of
# the point points[j,:] inside the BB BBs[i,:]"""
# if len(points.shape)==1:
# points = n.reshape(points, (1, points.size))
# if len(BBs.shape)==1:
# BBs = n.reshape(BBs, (1, BBs.size))
# return reduce(n.logical_and, [points[:,0:1]>=BBs[:,0:1].T, points[:,0:1]<=BBs[:,2:3].T,
# points[:,1:2]>=BBs[:,1:2].T, points[:,1:2]<=BBs[:,3:4].T])
return reduce(n.logical_and, [points[:,0]>=BB[0], points[:,0]<=BB[2],
points[:,1]>=BB[1], points[:,1]<=BB[3]])
def getOverlapingBoundingBoxes(BBs, A, Bs):
"""returns a bool array mask to cover only bounding boxes in Bs that overlap with bounding box A"""
#bool DoBoxesIntersect(Box a, Box b) {
# return (abs(a.x - b.x) * 2 < (a.width + b.width)) &&
# (abs(a.y - b.y) * 2 < (a.height + b.height));
#}
minxA = BBs[A,0]
minyA = BBs[A,1]
widthA = BBs[A,2]-minxA
heightA = BBs[A,3]-minyA
minxBs = BBs[Bs,0]
minyBs = BBs[Bs,1]
widthBs = BBs[Bs,2]-minxBs
heightBs = BBs[Bs,3]-minyBs
overlaps = n.logical_and(((n.abs(minxA-minxBs)*2) < (widthA+widthBs)),
((n.abs(minyA-minyBs)*2) < (heightA+heightBs)))
return overlaps
def imageRectangle(img, imgstep):
"""Return the rectangle defining the bounding box of an (axis-aligned) heightmap"""
minx,miny,maxx,maxy = imageBB(img, imgstep)
rectangle = n.array([[minx,miny,0], [maxx,miny,0], [maxx,maxy,0], [minx,maxy,0]])
return rectangle
def heightmapRectangle(heightmap):
"""Return the rectangle defining the bounding box of an (axis-aligned) heightmap"""
minx,miny,maxx,maxy = heightmapBB(heightmap)
rectangle = n.array([[minx,miny,0], [maxx,miny,0], [maxx,maxy,0], [minx,maxy,0]])
return rectangle
def testPoinstInsidePolygon(points, pol):#, _PointInPolygon(pt, outPt):
"""adapted & vectorized from Clipper._PointInPolygon. It assumes that there
are way more points to test than polygon vertices"""
#def _PointInPolygon(pt, outPt):
# outPt2 = outPt
# result = False
# while True:
# if ((((outPt2.pt.y <= pt.y) and (pt.y < outPt2.prevOp.pt.y)) or \
# ((outPt2.prevOp.pt.y <= pt.y) and (pt.y < outPt2.pt.y))) and \
# (pt.x < (outPt2.prevOp.pt.x - outPt2.pt.x) * (pt.y - outPt2.pt.y) / \
# (outPt2.prevOp.pt.y - outPt2.pt.y) + outPt2.pt.x)): result = not result
# outPt2 = outPt2.nextOp
# if outPt2 == outPt: break
count = n.zeros((points.shape[0],), dtype=int) #shape as a vector instead as a column vector to avoid broadcast errors
for kthis in xrange(pol.shape[0]):
kprev = (kthis-1)%pol.shape[0]
count += n.logical_and( n.logical_or(
n.logical_and( pol[kthis,1] <= points[:,1], points[:,1] < pol[kprev,1]),
n.logical_and( pol[kprev,1] <= points[:,1], points[:,1] < pol[kthis,1]) ),
points[:,0] < ((pol[kprev,0]-pol[kthis,0])*(points[:,1]-pol[kthis,1])/(pol[kprev,1]-pol[kthis,1])+pol[kthis,0]) )
inside = (count%2)==1
return inside
def imageBB(img, imgstep):
"""Return the bounding box of an (axis-aligned) heightmap"""
return n.array([[0.0,0.0], [img.shape[1]*imgstep[0], img.shape[0]*imgstep[1]]]).flatten()
def heightmapBB(heightmap):
"""Return the bounding box of an (axis-aligned) heightmap"""
return n.array([[0.0,0.0], [heightmap.size[0]*heightmap.step[0], heightmap.size[1]*heightmap.step[1]]]).flatten()
def heightmap2XYZ(heightmap):
"""convert a heightmap to a point cloud, pcl-style (a npointsXndimensions matrix)"""
# #rebase heightmap to cancel z_0 away
# data -= axes_config['z_0']
#create XY grid
xs = n.linspace(0, heightmap.size[0]*heightmap.step[0], heightmap.size[0])
ys = n.linspace(0, heightmap.size[1]*heightmap.step[1], heightmap.size[1])
grid = n.meshgrid(xs, ys)
xyz = n.column_stack((grid[0].ravel(), grid[1].ravel(), heightmap.img.ravel()))
return xyz
def image2XYZ(img, step):
"""convert a heightmap to a point cloud, pcl-style (a npointsXndimensions matrix)"""
# #rebase heightmap to cancel z_0 away
# data -= axes_config['z_0']
#create XY grid
xs = n.linspace(0, img.shape[1]*step[0], img.shape[1])
ys = n.linspace(0, img.shape[0]*step[1], img.shape[0])
grid = n.meshgrid(xs, ys)
xyz = n.column_stack((grid[0].ravel(), grid[1].ravel(), img.ravel()))
return xyz
#from https://sites.google.com/site/dlampetest/python/calculating-normals-of-a-triangle-mesh-using-numpy
#see also this for some subtle details out of this simple procedure: http://stackoverflow.com/questions/6656358/calculating-normals-in-a-triangle-mesh
def normalsFromMesh(points, triangles):
#Create a zeroed array with the same type and shape as our vertices i.e., per vertex normal
norm = n.zeros( points.shape, dtype=points.dtype )
#Create an indexed view into the vertex array using the array of three indices for triangles
tris = points[triangles]
#Calculate the normal for all the triangles, by taking the cross product of the vectors v1-v0, and v2-v0 in each triangle
nrm = n.cross( tris[::,1 ] - tris[::,0] , tris[::,2 ] - tris[::,0] )
# n is now an array of normals per triangle. The length of each normal is dependent the vertices,
# we need to normalize these, so that our next step weights each normal equally.
normalize_v3(nrm)
# now we have a normalized array of normals, one per triangle, i.e., per triangle normals.
# But instead of one per triangle (i.e., flat shading), we add to each vertex in that triangle,
# the triangles' normal. Multiple triangles would then contribute to every vertex, so we need to normalize again afterwards.
# The cool part, we can actually add the normals through an indexed view of our (zeroed) per vertex normal array
norm[ triangles[:,0] ] += nrm
norm[ triangles[:,1] ] += nrm
norm[ triangles[:,2] ] += nrm
normalize_v3(norm)
return norm
def normalize_v3(arr):
''' Normalize a numpy array of 3 component vectors shape=(n,3) '''
lens = n.sqrt( arr[:,0]**2 + arr[:,1]**2 + arr[:,2]**2 )
arr[:,0] /= lens
arr[:,1] /= lens
arr[:,2] /= lens
return arr
def meshForGrid(shape):
"""create a mesh for a grid in C-style matrix order"""
idxs = n.arange(n.prod(shape)).reshape(shape)
#v1, v2, v3, v4: vertices of each square in the grid, clockwise
v1 = idxs[:-1,:-1]
v2 = idxs[:-1,1:]
v3 = idxs[1:,1:]
v4 = idxs[1:,:-1]
faces = n.vstack((
n.column_stack((v1.ravel(), v2.ravel(), v4.ravel())), #triangles type 1
n.column_stack((v2.ravel(), v3.ravel(), v4.ravel())) )) #triangles type 2
return faces
def normalsForMatrixXYZ(xyz, shape):
"""assuming that xyz comes from a grid, compute the normals"""
triangles = meshForGrid(shape)
return normalsFromMesh(xyz, triangles)
#############################################################################
#NOT USED
#adapted from the python port of the clipper library
def testPointInsidePolygon(point, polygonPoints):#, _PointInPolygon(pt, outPt):
"""adapted & vectorized from Clipper._PointInPolygon"""
this = polygonPoints
prev = n.roll(polygonPoints, 1, 0)
inside = n.zeros((point.shape[0],1), dtype=bool)
tests = n.logical_and( n.logical_or(
n.logical_and( this[:,1] <= point[1], point[1] < prev[:,1]),
n.logical_and( prev[:,1] <= point[1], point[1] < this[:,1]) ),
point[0] < ((prev[:,0]-this[:,0])*(point[1]-this[:,1])/(prev[:,1]-this[:,1])+this[:,0]))
inside = (n.sum(tests)%2)==1
return inside
#def _PointInPolygon(pt, outPt):
# outPt2 = outPt
# result = False
# while True:
# if ((((outPt2.pt.y <= pt.y) and (pt.y < outPt2.prevOp.pt.y)) or \
# ((outPt2.prevOp.pt.y <= pt.y) and (pt.y < outPt2.pt.y))) and \
# (pt.x < (outPt2.prevOp.pt.x - outPt2.pt.x) * (pt.y - outPt2.pt.y) / \
# (outPt2.prevOp.pt.y - outPt2.pt.y) + outPt2.pt.x)): result = not result
# outPt2 = outPt2.nextOp
# if outPt2 == outPt: break
```
|
{
"source": "jdfurlan/nsc-cloudproject-s22016-master",
"score": 3
}
|
#### File: api/verify_oauth & blob/blob.py
```python
from azure.storage.blob import BlobService
import datetime
import string
from verify_oauth import verify_oauth
accountName = 'jesse15'
accountKey = ''
blob_service = BlobService(accountName, accountKey)
uploaded = False
def uploadBlob(username, file, filename, token, secret):
global uploaded
returnList = []
verify_oauth_code = verify_oauth(token, secret)
if verify_oauth_code.status_code != 200:
returnList = ["Could not verify oAuth credentials"]
return returnList
blob_service.create_container(username, x_ms_blob_public_access='container')
#datetime gives the system's current datetime, I convert to string in order to .replace
#characters that wouldn't work in a URL like ':','.', and ' '
time = str(datetime.datetime.now())
timeReplaced = time.replace(':','').replace('.','').replace(' ','')
URLstring = "https://" + accountName + ".blob.core.windows.net/" + username + "/" + timeReplaced + "_" + filename
uploaded = False
blob_service.put_block_blob_from_path(
username,
timeReplaced,
'\/Users\/rjhunter\/Desktop\/bridge.jpg',
x_ms_blob_content_type='image/png',
progress_callback=progress_callback
)
#if upload is successful, return a list with the timestamp and the final URL
#else return an empty list
if uploaded:
return returnList[time, URLstring]
else:
return returnList["Failure to upload to Azure Blob Storage"]
def deleteBlob(username, blobURL):
exploded = blobURL.split("/")
blob_service.delete_blob(username, exploded[len(exploded)-1])
def listBlobs(username):
blobs = []
marker = None
while True:
batch = blob_service.list_blobs(username, marker=marker)
blobs.extend(batch)
if not batch.next_marker:
break
marker = batch.next_marker
for blob in blobs:
print(blob.name)
def progress_callback(current, total):
global uploaded
print ("Current bytes uploaded: ", current)
print ("===============")
print ("Total bytes of file: ", total)
print ()
if(current==total):
uploaded = True
```
|
{
"source": "jdg1837/Spotlight",
"score": 2
}
|
#### File: Spotlight/src/main.py
```python
import line_detection as ld
import blob_detection as bd
def main():
inputfile = ld.detect_lines('../images/parkinglot.png')
bd.detect_blobs(inputfile)
if __name__ == '__main__':
main()
```
|
{
"source": "jdg2011/Roark-Creek",
"score": 4
}
|
#### File: jdg2011/Roark-Creek/lib_auditor.py
```python
import os
def list_duplicates(seq):
seen = set()
seen_add = seen.add
#adds all elements it doesn't know yet to seen and all other to seen_twice
seen_twice = set( x for x in seq if x in seen or seen_add(x) )
#turn the set into a list
return list( seen_twice )
Z=1
while Z==1:
lib=str(input("Enter a library file name (no extension): "))
if bool(os.path.exists('libraries/'+lib+'.txt')) is False:
print("Library \""+lib+".txt\" does not exist! Try again.")
else:
library=open('libraries/'+lib+'.txt','r',encoding='utf8')
a = []
for x in library: a.append(x)
print(list_duplicates(a))
library.close()
continue
```
|
{
"source": "jdgalviss/jetbot-ros2",
"score": 2
}
|
#### File: ws_server/ws_server/ws_server_node.py
```python
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Twist
import asyncio
import websockets
import json
import os
HOST_IP = os.getenv('HOST_IP', "0.0.0.0")
# WS_SERVER_ADDRESS = "192.168.0.167"
#WS_SERVER_ADDRESS = "localhost"
class WSServerNode(Node):
def __init__(self, port=8765):
super().__init__('ws_server')
# Speed commands publisher (commands are received by WS server)
self.cmd_publisher_ = self.create_publisher(Twist, 'cmd_vel', 10)
# Start WS Server and loop
start_server = websockets.serve(
self.run_server, HOST_IP, port)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
async def run_server(self, websocket, path):
# callback when cmd msg is received through WS
received_msg = await websocket.recv()
# Unpack info from json
received_msg_json = json.loads(received_msg)
steering = received_msg_json["steering"]
throttle = received_msg_json["throttle"]
print(steering)
# Cmd values into msg and publish
cmd_msg = Twist()
cmd_msg.linear.x = float(throttle)
cmd_msg.angular.z = float(steering)
self.cmd_publisher_.publish(cmd_msg)
def main(args=None):
print('Hi from ws_server.')
rclpy.init(args=args)
ws_server = WSServerNode()
rclpy.spin(ws_server)
ws_server.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
```
|
{
"source": "jdgaravito/portfolio-backend",
"score": 2
}
|
#### File: app/api/portfolio_api.py
```python
import fastapi
from fastapi import Depends, status
from fastapi.exceptions import HTTPException
from sqlalchemy.future import select
from sqlalchemy.ext.asyncio import AsyncSession
from typing import Optional, List
from app.db import get_session
from app.models.portfolio_model import Project
router = fastapi.APIRouter()
@router.get('/portfolio', response_model=List[Project],
status_code=status.HTTP_200_OK)
async def get_all_projects(session: AsyncSession = Depends(get_session)):
statement=select(Project)
result= await session.execute(statement)
projects = result.scalars().all()
# return [Project(name=project.name,
# summary=project.summary,
# description=project.description,
# category=project.category,
# award=project.award,
# url=project.url,
# published=project.published,
# image=project.image,
# images=project.images,
# learning=project.learning,
# tech=project.tech,
# tools=project.tools)
# for project in projects]
return projects
@router.post('/portfolio', response_model=Project, status_code=status.HTTP_201_CREATED)
async def add_project(project: Project, session: AsyncSession = Depends(get_session)):
project = Project(name=project.name,
summary=project.summary,
description=project.description,
category=project.category,
award=project.award,
url=project.url,
published=project.published,
image=project.image,
images=project.images,
learning=project.learning,
tech=project.tech,
tools=project.tools)
session.add(project)
await session.commit()
await session.refresh(project)
return project
@router.get('/portfolio/{project_id}', response_model=Project)
async def get_a_project(project_id: int, session: AsyncSession= Depends(get_session)):
statement = select(Project).where(Project.id == project_id)
result = await session.execute(statement)
if None == result:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
return result
```
|
{
"source": "jdgarrett/geowave",
"score": 2
}
|
#### File: store/redis/options.py
```python
from pygw.config import geowave_pkg
from pygw.store import DataStoreOptions
class RedisOptions(DataStoreOptions):
"""
Redis data store options.
"""
def __init__(self):
super().__init__(geowave_pkg.datastore.redis.config.RedisOptions())
def set_address(self, address):
"""
Sets the address of the Redis data store.
Args:
address (str): The address of the Redis data store.
"""
self._java_ref.setAddress(address)
def get_address(self):
"""
Returns:
The address of the Redis data store.
"""
return self._java_ref.getAddress()
def set_compression(self, compression):
"""
Sets the compression to use for the Redis data store. Valid options are `SNAPPY`,
`L4Z`, or `NONE`.
Args:
compression (str): The compressioin to use.
"""
converter = geowave_pkg.datastore.redis.config.RedisOptions.CompressionConverter()
self._java_ref.setCompression(converter.convert(compression))
def get_compression(self):
"""
Returns:
The compression used by the data store.
"""
return self._java_ref.getCompression().name()
```
|
{
"source": "jdgenes/client-data-check",
"score": 3
}
|
#### File: jdgenes/client-data-check/get-client-data.py
```python
from __future__ import print_function
import pickle
import os.path
import sqlite3
import json
import pprint
import re
import time
from copy import deepcopy
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
spreadsheets = [
'timesheet ID', # This is a timesheet other data is compared to
'google sheet IDs also go in this list'
]
attendanceRanges = [
"name of attendance sheet and range" # for example: JAN'20_GR!A2:Y52
]
dataRanges = [
# below are the examples of sheet names and their ranges
'Jan!A1:AF5',
'Feb!A1:AF5',
'Mar!A1:AF5',
'Apr!A1:AF5',
'May!A1:AF5',
'June!A1:AF5',
'July!A1:AF5',
'Aug!A1:AF5',
'Sep!A1:AF5',
'Oct!A1:AF5',
'Nov!A1:AF5',
'Dec!A1:AF5'
]
sheetObj = []
def main(sheetArr, rangeArr):
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server()
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
sheetData = service.spreadsheets().values().get(spreadsheetId=sheetArr,
range=rangeArr).execute()
rowData = sheetData.get('values', [])
sheetObject = service.spreadsheets().get(spreadsheetId=sheetArr).execute()
print('making a google sheets request')
if not rowData:
print('No data found.')
else:
return [sheetObject, rowData]
if __name__ == '__main__':
attenGreen = main(spreadsheets[0], attendanceRanges[0])
attenBlue = main(spreadsheets[0], attendanceRanges[1])
atten = deepcopy(attenBlue[1] + attenGreen[1])
weekdays = atten[0][2:]
ispObj = {
'weekdays': weekdays
}
for i in range(1, len(spreadsheets)):
j = 1
s = main(spreadsheets[i], dataRanges[0])
pattern = atten[j][0].lower()
searchIn = re.compile(r"("+s[0]['properties']['title'].lower()[0:len(atten[j][0])]+")")
# below, an excpetion is written if a client's name is used inconsistently in different sheets
if s[0]['properties']['title'].lower()[0:len(atten[j][0])] in "<NAME>":
searchIn = re.compile(r"smith, john")
while atten[j][0] == "":
j += 1
while not searchIn.search(pattern):
j += 1
try:
print("notmatched ", j-1, atten[j-1][0].lower(), " and ", i, s[0]['properties']['title'].lower())
pattern = atten[j][0].lower()
searchIn = re.compile(r"("+s[0]['properties']['title'].lower()[0:len(atten[j][0])]+")")
if s[0]['properties']['title'].lower()[0:len(atten[j][0])] in "<NAME>":
searchIn = re.compile(r"smith, john")
except IndexError:
print(s[0]['properties']['title'], " not found in data set")
break
while atten[j][0] == "":
j += 1
try:
pattern = atten[j][0].lower()
searchIn = re.compile(r"("+s[0]['properties']['title'].lower()[0:len(atten[j][0])]+")")
if s[0]['properties']['title'].lower()[0:len(atten[j][0])] in "<NAME>":
searchIn = re.compile(r"smith, john")
except IndexError:
print(s[0]['properties']['title'], " not found in data set")
break
if searchIn.search(pattern):
try:
print('item', "i", i, "j", j)
print(s[0]['properties']['title'].lower())
ispObj.setdefault(s[0]['properties']['title'].lower(), {
'data-set': s[1],
'attendance': atten[j]
})
print('----END MATCH----')
for i in range(0, 2):
time.sleep(7)
print('pausing for Google API limit ', i+1, "of 2")
except IndexError:
print("Final check not found in data set")
pprint.pprint(atten)
cliJSON = open('client-data.json', 'w')
cliJSON.write(json.dumps(ispObj))
```
|
{
"source": "JDGiacomantonio-98/jobShop-scheduling-simulatedAnniealing",
"score": 4
}
|
#### File: JDGiacomantonio-98/jobShop-scheduling-simulatedAnniealing/search.py
```python
def basic_search(target, vector: list, use_sort=False, use_py_sort=False):
i=0
while i < len(vector):
if vector[i] == target:
return True
i += 1
return False
def bin_search(target, vector: list, use_sort=False, use_py_sort=False):
start = 0
end = len(vector)-1
i = (start+end)/2
if vector[i] == target:
return True
if vector[i] > target:
bin_search(target, vector[:end])
bin_search(target, vector[start:])
```
|
{
"source": "jdgillespie91/trackerSpend",
"score": 4
}
|
#### File: trackerSpend/configs/config.py
```python
import configparser
import os
class Config(object):
"""
When instantiated, the Config() class provides an object whose attributes are the properties defined in [section] of config.ini (where config.ini is a configuration file in the same directory as this module).
"""
def __init__(self, section):
""" Sets the properties of the config file as attributes of the class. """
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
for key, value in config.items(section):
setattr(self, key, value)
```
#### File: data/expenditure/submit_automated_expenditure.py
```python
import datetime
import gspread
import json
import logging
import os
import requests
import smtplib
import sys
from configs import config
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from oauth2client.client import SignedJwtAssertionCredentials
class Script:
def __init__(self):
self.today = datetime.datetime.today()
self.directory = os.path.dirname(__file__)
self.filename = os.path.splitext(os.path.basename(__file__))[0]
self.path = os.path.join(self.directory, self.filename)
class Flag(Script):
def __init__(self, entry):
Script.__init__(self) # Change this to super if more pythonic.
self.today = self.today.strftime('%Y-%m-%d')
self.directory = os.path.join(self.directory, 'flags')
self.filename = '{0}_{1}.flag'.format(entry.category, self.today)
self.path = os.path.join(self.directory, self.filename)
def exists(self):
if os.path.isfile(self.path):
return True
else:
return False
def touch(self):
open(self.path, 'w').close()
def untouch(self):
os.remove(self.path)
class Entry:
def __init__(self, amount, category, peer_pressure, notes, frequency, due_date, active):
self.amount = amount
self.category = category
self.peer_pressure = peer_pressure
self.notes = notes
self.frequency = frequency
self.due_date = due_date
self.active = active
class Form:
def __init__(self, entry):
self.amount = entry.amount
self.category = entry.category
self.peer_pressure = entry.peer_pressure
self.notes = entry.notes
self.conf = config.Config('expenditure_form')
self.submission = {'entry.1788911046': self.amount,
'entry.22851461': '__other_option__',
'entry.22851461.other_option_response': self.category,
'entry.2106932303': self.peer_pressure,
'entry.1728679999': self.notes}
self.response_code = None
def submit(self):
response = requests.post(self.conf.url, self.submission)
self.response_code = response.status_code
def email(self, success):
# The following code is based on
# http://stackoverflow.com/questions/778202/smtplib-and-gmail-python-script-problems
# http://en.wikibooks.org/wiki/Python_Programming/Email
# I need to troubleshoot and test for errors.
message = MIMEMultipart()
message['From'] = self.conf.sender
message['To'] = self.conf.recipient
message['Subject'] = 'Expenditure Submission Update (Automated Email)'
if success:
body = 'The following entry has been submitted.\n\nAmount: {0}\nCategory: {1}\nPeer pressure: {2}\n' \
'Notes: {3}\n'.format(self.amount, self.category, self.peer_pressure, self.notes)
else:
body = 'The following entry failed submission.\n\nAmount: {0}\nCategory: {1}\nPeer pressure: {2}\n' \
'Notes: {3}\n'.format(self.amount, self.category, self.peer_pressure, self.notes)
message.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(self.conf.username, self.conf.password)
server.sendmail(self.conf.sender, self.conf.recipient, message.as_string())
server.close()
# Initialise the Entry class based on a list row.
def create_entry(row):
category = row[1]
peer_pressure = row[2]
notes = row[3]
frequency = row[4]
active = True if row[6] == 'Yes' else False
# We assign zero to both amount and due_date if either are invalid types. We do this silently because the email
# confirmation will contain the details of the submission and highlight any issues that need to be addressed.
try:
amount = float(row[0])
due_date = int(row[5])
except (TypeError, ValueError):
amount = 0
due_date = 0
entry = Entry(amount, category, peer_pressure, notes, frequency, due_date, active)
return entry
def create_logger(script):
today = script.today.strftime('%Y-%m-%d_%H:%M:%S')
directory = os.path.join(script.directory, 'logs')
filename = '{0}_{1}.log'.format(script.filename, today)
path = os.path.join(directory, filename)
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
# Add file handler to logger.
file_handler = logging.FileHandler(path)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('Log file created: {0}\n'.format(path))
# Add smtp handler to logger.
# smtp_handler = logging.handlers.SMTPHandler(... # Complete this
# logger.debug('SMTP functionality configured.')
return logger
def parse_entries_sheet():
conf = config.Config('expenditure_entries')
json_key = json.load(open(conf.key))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], bytes(json_key['private_key'], 'UTF-8'), scope)
session = gspread.authorize(credentials)
workbook = session.open_by_key(conf.workbook)
worksheet = workbook.worksheet(conf.worksheet)
# Parse row-by-row until an empty row is encountered (data starts on second row).
row_index = 2
entries = []
while worksheet.row_values(row_index) and row_index <= worksheet.row_count:
row = worksheet.row_values(row_index)
entry = create_entry(row)
entries.append(entry)
row_index += 1
return entries
if __name__ == '__main__':
script = Script()
logger = create_logger(script)
logger.info('Processing entries sheet.')
entries = parse_entries_sheet()
logger.info('Entries sheet processed.\n')
for entry in entries:
logger.info('Processing entry: {0}.'.format(entry.category))
if entry.active:
logger.info('Entry is active. Continuing...')
flag = Flag(entry)
if not flag.exists():
logger.info('The flag file does not exist. Touching...')
flag.touch()
if entry.frequency == 'Monthly':
if entry.due_date == script.today.day: # Think about introducing a "today" variable. I don't think it's logical to include "today" in the Script class.
logger.info('An entry is required. Submitting...')
form = Form(entry)
form.submit()
if form.response_code == requests.codes.ok: # Have this as try: form.submit() as opposed to if/else (will read better).
logger.info('The submission was accepted. Moving to next entry.\n')
form.email(success=True)
else:
logger.info('The submission was not accepted. '
'Removing flag file and moving to next entry.\n')
form.email(success=False)
flag.untouch()
else:
logger.info('A submission is not required today. '
'Removing flag file and moving to next entry.\n'.format(entry.frequency))
flag.untouch()
else:
logger.info('{0} spend is not yet implemented. '
'Removing flag file and moving to next entry.\n'.format(entry.frequency))
flag.untouch()
continue
else:
logger.info('The flag file exists. Moving to next entry.\n')
else:
logger.info('Entry is inactive. Moving to next entry.\n')
logger.info('End of script.')
sys.exit(0)
```
#### File: tests/utils/get_message_unit_tests.py
```python
import unittest
from utils import get_message
class GetMessageUnitTests(unittest.TestCase):
def test_get_message_is_callable(self):
self.assertTrue(callable(get_message))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(GetMessageUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: tests/utils/publish_message_unit_tests.py
```python
import unittest
from utils import publish_message
class PublishMessageUnitTests(unittest.TestCase):
def test_publish_message_is_callable(self):
self.assertTrue(callable(publish_message))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(PublishMessageUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
```
|
{
"source": "jdgillespie91/twitter-ads",
"score": 2
}
|
#### File: twitter-ads/tests/test_main.py
```python
from twads import Client
import pytest
import json
import requests_mock
@pytest.fixture()
def client():
return Client(
consumer_key='foo', consumer_secret='foo', access_key='foo', access_secret='foo'
)
@pytest.fixture(params=['single', 'multiple', 'bad_auth'])
def accounts(request):
with open('tests/responses/accounts_{}.json'.format(request.param), 'r') as f:
return json.load(f)
class TestGetAccounts:
def test_get_accounts(self, client, accounts):
try:
expected_response = accounts['data']
except KeyError:
expected_response = accounts['errors']
with requests_mock.mock() as m:
m.get('https://ads-api.twitter.com/0/accounts', json=accounts)
actual_response = client.get_accounts()
assert expected_response == actual_response
```
|
{
"source": "jdglaser/census-dw",
"score": 3
}
|
#### File: census-dw/python/Generate_Employee_Codes_Data.py
```python
__author__ = "<NAME>"
__last_updated__ = "2019-11-08"
import pandas as pd
from sqlalchemy import create_engine
values = {
"001": "All establishments",
"204": "Establishments with no paid employees",
"205": "Establishments with paid employees",
"207": "Establishments with less than 10 employees",
"209": "Establishments with less than 20 employees",
"210": "Establishments with less than 5 employees",
"211": "Establishments with less than 4 employees",
"212": "Establishments with 1 to 4 employees",
"213": "Establishments with 1 employee",
"214": "Establishments with 2 employees",
"215": "Establishments with 3 or 4 employees",
"219": "Establishments with 0 to 4 employees",
"220": "Establishments with 5 to 9 employees",
"221": "Establishments with 5 or 6 employees",
"222": "Establishments with 7 to 9 employees",
"223": "Establishments with 10 to 14 employees",
"230": "Establishments with 10 to 19 employees",
"231": "Establishments with 10 to 14 employees",
"232": "Establishments with 15 to 19 employees",
"235": "Establishments with 20 or more employees",
"240": "Establishments with 20 to 99 employees",
"241": "Establishments with 20 to 49 employees",
"242": "Establishments with 50 to 99 employees",
"243": "Establishments with 50 employees or more",
"249": "Establishments with 100 to 499 employees",
"250": "Establishments with 100 or more employees",
"251": "Establishments with 100 to 249 employees",
"252": "Establishments with 250 to 499 employees",
"253": "Establishments with 500 employees or more",
"254": "Establishments with 500 to 999 employees",
"260": "Establishments with 1,000 employees or more",
"261": "Establishments with 1,000 to 2,499 employees",
"262": "Establishments with 1,000 to 1,499 employees",
"263": "Establishments with 1,500 to 2,499 employees",
"270": "Establishments with 2,500 employees or more",
"271": "Establishments with 2,500 to 4,999 employees",
"272": "Establishments with 5,000 to 9,999 employees",
"273": "Establishments with 5,000 employees or more",
"280": "Establishments with 10,000 employees or more",
"281": "Establishments with 10,000 to 24,999 employees",
"282": "Establishments with 25,000 to 49,999 employees",
"283": "Establishments with 50,000 to 99,999 employees",
"290": "Establishments with 100,000 employees or more",
"298": "Covered by administrative records"
}
def load_employee_codes(engine):
df = pd.DataFrame({"CBP_Employee_Size_Code":list(values.keys()),"Long_Description":list(values.values())})
df["Short_Description"] = df["Long_Description"].str.replace("Establishments with ","").str.replace(" to ","-").str.title()
df.to_sql("DIM_CBP_Employee_Sizes",engine,if_exists="append",index=False)
if __name__ == "__main__":
engine = create_engine('INSERT CONNECTION STRING',fast_executemany=True)
print("Done")
```
|
{
"source": "jdgoal512/Life",
"score": 4
}
|
#### File: jdgoal512/Life/life.py
```python
import time
# Rules
D = 0 # Die
G = 1 # Grow
S = 2 # Stay the same
class Life:
def __init__(self, width=10, height=10, rules=[D, D, S, G, D, D, D, D, D]):
self.generation = 0
self.width = width
self.height = height
self.rules = rules
self.grid = [[False for i in range(height)] for j in range(width)]
def get(self, x, y):
# No wrapping
if 0 <= x < self.width and 0 <= y < self.height:
return self.grid[x][y]
return 0
def get_neighbors(self, x, y):
neighbors = 0
for x_offset in [-1, 0, 1]:
for y_offset in [-1, 0, 1]:
# Don't count the offset 0, 0
if not x_offset == y_offset == 0:
if self.get(x+x_offset, y+y_offset):
neighbors += 1
return neighbors
def print_grid(self):
print(f'Generation {self.generation}')
print('+{}+'.format('-'*self.width))
for row in self.grid:
row_text = ''.join(['0' if x else ' ' for x in row])
print(f'|{row_text}|')
print('+{}+'.format('-'*self.width))
def next_generation(self):
next_grid = [y[:] for y in self.grid[:]]
for x in range(self.width):
for y in range(self.height):
neighbors = self.get_neighbors(x, y)
next_state = self.rules[neighbors]
if next_state == G:
next_grid[x][y] = True
elif next_state == D:
next_grid[x][y] = False
self.grid = next_grid
self.generation += 1
def add_figure(self, points, x=5, y=5):
for a, b in points:
self.grid[x+a][y+b] = True
RPENTOMINO = [[1, 0], [2, 0], [0, 1], [1, 1], [1, 2]]
BLOCK = [[0, 0], [1, 0], [0, 1], [1, 1]]
BLINKER = [[1, 0], [1, 1], [1, 2]]
BEACON = [[0, 0], [0, 1], [1, 0], [2, 3], [3, 2], [3, 3]]
if __name__ == '__main__':
game = Life()
# game.add_figure(BEACON, x=2, y=1)
game.add_figure(RPENTOMINO, x=4, y=4)
while True:
game.print_grid()
game.next_generation()
time.sleep(1)
```
|
{
"source": "jdgoal512/markov-passwords",
"score": 3
}
|
#### File: jdgoal512/markov-passwords/markov.py
```python
import argparse
import numpy
import random
unique_words = set()
#Read in the text
def getWords(filename):
global unique_words
unique_words.add("$TOP")
try:
with open(filename) as f:
content = f.readlines();
#Get list of words
for text in content:
words = text.split()
for word in words:
if (word != ""):
unique_words.add(word)
except FileNotFoundError as exception:
print("Error: " + filename + " doesn't exist, aborting")
exit()
return unique_words
def addProbability(filename):
with open(filename) as f:
content = f.readlines();
#Add probabilities
global stats
global all_words
for text in content:
if text != "":
text = text[:-1] + " $TOP"
words = text.split()
last_word = words[0]
for word in words[1:]:
if (word != ""):
stats[all_words.index(last_word )][all_words.index(word)] += 1
last_word = word
return stats
#Normalize the probabilities
def normalizeStats():
global stats
global all_words
for i in range(len(all_words)):
total = 0
for j in range(len(all_words)):
total += stats[i][j]
if total > 0:
for j in range(len(all_words)):
stats[i][j] = stats[i][j] / total
return stats
#Generate a phrase
def babble(max_length = 3):
global all_words
current_word = all_words[random.randint(0, len(all_words) - 1)]
text = current_word
current_count = 1
while current_word != "$TOP" and current_count < max_length:
pval = stats[all_words.index(current_word)]
next_index = numpy.nonzero(numpy.random.multinomial(n=1, pvals=pval, size=1))[1][0]
next_word = all_words[next_index]
text = text + next_word
current_word = next_word;
current_count += 1
#Remove $TOP
if text[-4:] == "$TOP":
text = text[:-4]
#Don't return blank lines
if text == "":
return babble(max_length)
return text
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-file', dest='input_file', type=str, default="", help='Name of file to read')
parser.add_argument('-o', '--output-file', dest='output_file', type=str, default="", help='File to write output to (defaults to stdout)')
parser.add_argument('-n', '--number-of-times', dest='number_of_times', type=int, default=1, help='Number of phrases to generate')
parser.add_argument('-l', '--max-length', dest='max_length', type=int, default=3, help='Max number of words to be in a phrase')
args = parser.parse_args()
#Get command line parameters
input_file = args.input_file
output_file = args.output_file
number_of_phrases = args.number_of_times
max_length = args.max_length
if input_file == "":
parser.print_help()
exit()
if output_file == "":
write_output = False
else:
write_output = True
#Get all the words from the input file
getWords((input_file))
all_words = list(unique_words)
#Create probability matrix
stats = [[0 for x in range(len(all_words))] for y in range(len(all_words))]
addProbability(input_file)
normalizeStats()
#Generate phrases
if (write_output):
output = open(output_file, 'w')
for i in range(number_of_phrases):
output.write(babble(max_length) + "\n")
output.close()
else:
for i in range(number_of_phrases):
print(babble(max_length))
```
|
{
"source": "jdgoettsch/aldryn-search",
"score": 2
}
|
#### File: aldryn-search/aldryn_search/receivers.py
```python
from django.dispatch.dispatcher import receiver
from cms.models.titlemodels import Title
from cms.signals import post_publish, post_unpublish
from .signals import add_to_index, remove_from_index
@receiver(post_publish, dispatch_uid='publish_cms_page')
def publish_cms_page(sender, instance, language, **kwargs):
title = instance.publisher_public.get_title_obj(language)
add_to_index.send(sender=Title, instance=title, object_action='publish')
@receiver(post_unpublish, dispatch_uid='unpublish_cms_page')
def unpublish_cms_page(sender, instance, language, **kwargs):
title = instance.publisher_public.get_title_obj(language)
remove_from_index.send(sender=Title, instance=title, object_action='unpublish')
```
|
{
"source": "jdgordon/includegrapher",
"score": 3
}
|
#### File: jdgordon/includegrapher/checker.py
```python
import os
import sys
import re
import glob
import pygraphviz as pgv
class Include(object):
def __init__(self, fullpath):
self._filename = fullpath
self._includes = []
@property
def includes(self):
return self._includes
@property
def filename(self):
return self._filename
def add_include(self, inc):
if inc not in self.includes:
self.includes.append(inc)
def __repr__(self):
return "Include(%s)" % (self.filename)
def find_include(request, include_paths):
print "Finding path for:", request, include_paths
for root in include_paths:
full = os.path.join(root, request)
if os.path.exists(full):
return full
def parse_file(filename, include_paths, parsed_files):
print "Parsing:", filename
include = Include(filename)
parsed_files[filename] = include
with open(filename, 'r') as fh:
for line in fh.readlines():
matches = re.match('#\s*include\s+["<](.*)[>"]', line)
if matches:
fname = find_include(matches.group(1), [os.path.dirname(filename)] + include_paths)
if fname is None:
fname = matches.group(1)
if fname in parsed_files:
inc = parsed_files[fname]
else:
inc = Include(fname)
else:
if fname not in parsed_files:
inc = parse_file(fname, include_paths, parsed_files)
else:
inc = parsed_files[fname]
include.add_include(inc)
return include
def safe_print(include):
print "--> ", include
visited = []
def recurse(child):
print child.filename
visited.append(child)
for x in child.includes:
if x not in visited:
recurse(x)
else:
print "**", x
recurse(include)
def create_graph(include):
visited = []
G = pgv.AGraph(strict=False, directed=True)
def recurse(child):
visited.append(child)
for x in child.includes:
if x not in visited:
G.add_edge(child.filename, x.filename)
recurse(x)
recurse(include)
return G
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Check #includes')
parser.add_argument('include_dirs', metavar='INCLUDE_PATHS', type=str, nargs='+',
help='List of include dirs to use for search paths')
parser.add_argument('--source-dir', dest='sourcedir', action='store',
required=True, help='Directory to start parsing from')
parser.add_argument('-R', '--recursive', action='store_true', default=False,
help='Recursivly scan the source directory')
args = parser.parse_args()
include_paths = args.include_dirs
roots = []
includes = {}
cpp_files = {}
if args.recursive is True:
for root, dirnames, filenames in os.walk(args.sourcedir):
roots.append(root)
else:
roots = [args.sourcedir]
for root in roots:
for filename in glob.glob(os.path.join(root, '*.c')) + glob.glob(os.path.join(root, '*.cpp')):
print filename
cpp_files[filename] = parse_file(filename, include_paths, includes)
for key, x in cpp_files.iteritems():
G = create_graph(x)
G.layout('dot')
G.draw('%s.png' %(key))
```
|
{
"source": "jdgwartney/boundary-api-cli",
"score": 2
}
|
#### File: boundary-api-cli/boundary/alarm_list.py
```python
from boundary import ApiCli
from boundary.alarm_common import result_to_alarm
import json
import requests
class AlarmList(ApiCli):
def __init__(self):
ApiCli.__init__(self)
def get_description(self):
return "List alarm definitions associated with the {0} account".format(self.product_name)
def get_api_parameters(self):
self.path = "v2/alarms"
self.method = "GET"
def handle_key_word_args(self):
pass
def _handle_api_results(self):
# Only process if we get HTTP result of 200
if self._api_result.status_code == requests.codes.ok:
alarm_result = json.loads(self._api_result.text)
alarms = []
for alarm in alarm_result['result']:
alarms.append(result_to_alarm(alarm))
return alarms
else:
return None
```
#### File: boundary-api-cli/boundary/alarm_modify.py
```python
import json
import requests
from boundary import ApiCli
from boundary.alarm_common import result_to_alarm
class AlarmModify(ApiCli):
def __init__(self, update):
ApiCli.__init__(self)
self._update = update
# Mapping of alarm types to corresponding codes
self._alarm_types = {"threshold": 3, "host": 4, "api": 5}
self._aggregate = None
self._actions = None
self._alarm_name = None
self._alarm_id = None
self._host_group_id = None
self._interval = None
self._is_disabled = None
self._metric = None
self._note = None
self._operation = None
self._per_host_notify = None
self._threshold = None
self._type_id = None
self._notify_clear = None
self._notify_set = None
self._payload = {}
self._trigger_interval = None
self._timeout_interval = None
def add_arguments(self):
ApiCli.add_arguments(self)
self.parser.add_argument('-m', '--metric', dest='metric', action='store',
required=True,
metavar='metric_name', help='Name of the metric to alarm')
self.parser.add_argument('-g', '--trigger-aggregate', dest='aggregate', action='store',
required=(False if self._update else True),
choices=['sum', 'avg', 'max', 'min'], help='Metric aggregate to alarm upon')
self.parser.add_argument('-o', '--trigger-operation', dest='operation', action='store',
required=(False if self._update else True),
choices=['eq', 'gt', 'lt'], help='Trigger threshold comparison')
self.parser.add_argument('-v', '--trigger-threshold', dest='threshold', action='store',
required=(False if self._update else True),
metavar='value', help='Trigger threshold value')
self.parser.add_argument('-r', '--trigger-interval', dest='trigger_interval', action='store',
metavar='trigger_interval', required=False, type=int,
help='Interval of time in ms that the alarm state should be in fault ' +
'before triggering')
self.parser.add_argument('-j', '--timeout-interval', dest='timeout_interval', action='store',
metavar = 'timeout_interval', required=False, type=int,
help='The interval after which an individual host state should resolve by timeout. ' +
'Default: 259200000 milli-seconds (3 days)')
self.parser.add_argument('-u', '--host-group-id', dest='host_group_id', action='store', metavar='host_group_id',
type=int, help='Host group the alarm applies to')
self.parser.add_argument('-d', '--note', dest='note', action='store', metavar='note',
help='A description or resolution of the alarm')
self.parser.add_argument('-k', '--action', dest='actions', action='append', metavar='action-id', type=int,
help='An action to be performed when an alarm is triggered')
self.parser.add_argument('-c', '--notify-clear', dest='notify_clear', action='store', default=None,
choices=['true', 'false'],
help='Perform actions when all hosts')
self.parser.add_argument('-s', '--notify-set', dest='notify_set', action='store', default=None,
choices=['true', 'false'],
help='Perform actions when an alarm threshold and interval is breached.')
self.parser.add_argument('-p', '--per-host-notify', dest='per_host_notify', action='store',
default=None, choices=['true', 'false'],
help='An alarm by default will run the associated actions when \
any server in the host group violates the threshold, and then at the end when \
all servers are back within the threshold. If perHostNotify is set to true, \
the actions will run when ANY server in the group violates \
and falls back within the threshold.')
self.parser.add_argument('-x', '--is-disabled', dest='is_disabled', action='store', default=None,
choices=['true', 'false'], help='Enable or disable the alarm definition')
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
self._actions = self.args.actions if self.args.actions is not None else None
self._alarm_name = self.args.alarm_name if self.args.alarm_name is not None else None
self._metric = self.args.metric if self.args.metric is not None else None
self._aggregate = self.args.aggregate if self.args.aggregate is not None else None
self._operation = self.args.operation if self.args.operation is not None else None
self._threshold = self.args.threshold if self.args.threshold is not None else None
self._trigger_interval = self.args.trigger_interval if self.args.trigger_interval is not None else None
self._host_group_id = self.args.host_group_id if self.args.host_group_id is not None else None
self._note = self.args.note if self.args.note is not None else None
self._per_host_notify = self.args.per_host_notify if self.args.per_host_notify is not None else None
self._is_disabled = self.args.is_disabled if self.args.is_disabled is not None else None
self._notify_clear = self.args.notify_clear if self.args.notify_clear is not None else None
self._notify_set = self.args.notify_set if self.args.notify_set is not None else None
self._timeout_interval = self.args.timeout_interval if self.args.timeout_interval is not None else None
def get_api_parameters(self):
# Create trigger predicate dictionary
predicate = {}
if self._aggregate is not None:
predicate['agg'] = self._aggregate
if self._operation is not None:
predicate['op'] = self._operation
if self._threshold is not None:
predicate['val'] = float(self._threshold)
if 'agg' in predicate or 'op' in predicate or 'val' in predicate:
self._payload['triggerPredicate'] = predicate
# Create payload dictionary
if self._alarm_name:
self._payload['name'] = self._alarm_name
if self._host_group_id is not None:
self._payload['hostgroupId'] = self._host_group_id
if self._interval is not None:
self._payload['triggerInterval'] = self._interval
if self._is_disabled is not None:
self._payload['isDisabled'] = self._is_disabled
if self._metric is not None:
self._payload['metric'] = self._metric
if self._note is not None:
self._payload['note'] = self._note
if self._actions is not None:
self._payload['actions'] = self._actions
if self._per_host_notify is not None:
self._payload['perHostNotify'] = True if self._per_host_notify == 'yes' else False
if self._is_disabled is not None:
self._payload['isDisabled'] = True if self._is_disabled == 'yes' else False
if self._notify_clear is not None:
self._payload['notifyClear'] = self._notify_clear
if self._notify_set is not None:
self._payload['notifySet'] = self._notify_set
if self._timeout_interval is not None:
self._payload['timeoutInterval'] = self._timeout_interval
if self._trigger_interval is not None:
self._payload['triggerInterval'] = self._trigger_interval
self.data = json.dumps(self._payload, sort_keys=True)
self.headers = {'Content-Type': 'application/json'}
self.path = 'v2/alarms'
def handle_key_word_args(self):
self._actions = self._kwargs['actions'] if 'actions' in self._kwargs else None
self._aggregate = self._kwargs['aggregate'] if 'aggregate' in self._kwargs else None
self._alarm_name = self._kwargs['name'] if 'name' in self._kwargs else None
self._alarm_id = self._kwargs['id'] if 'id' in self._kwargs else None
self._operation = self._kwargs['operation'] if 'operation' in self._kwargs else None
self._threshold = self._kwargs['threshold'] if 'threshold' in self._kwargs else None
self._host_group_id = self._kwargs['host_group_id'] if 'host_group_id' in self._kwargs else None
self._trigger_interval = self._kwargs['trigger_interval'] if 'trigger_interval' in self._kwargs else None
self._metric = self._kwargs['metric'] if 'metric' in self._kwargs else None
self._note = self._kwargs['note'] if 'note' in self._kwargs else None
self._actions = self._kwargs['actions'] if 'actions' in self._kwargs else None
self._timeout_interval = self._kwargs['timeout_interval'] if 'timeout_interval' in self._kwargs else None
self._notify_clear = self._kwargs['notifyClear'] if 'notifyClear' in self._kwargs else None
self._notify_set = self._kwargs['notifySet'] if 'notifySet' in self._kwargs else None
self.data = json.dumps(self._payload, sort_keys=True)
self.headers = {'Content-Type': 'application/json'}
self.path = 'v2/alarms'
def _handle_api_results(self):
# Only process if we get HTTP result of 201
if self._api_result.status_code == requests.codes.created:
alarm_result = json.loads(self._api_result.text)
return result_to_alarm(alarm_result)
else:
return None
```
#### File: boundary-api-cli/boundary/metric_modify.py
```python
import json
from boundary import MetricCommon
"""
Common Base class for defining and update metric definitions
"""
class MetricModify (MetricCommon):
def __init__(self, update):
"""
"""
MetricCommon.__init__(self)
self.update = update
self.metricName = None
self.displayName = None
self.displayNameShort = None
self.description = None
self.aggregate = None
self.unit = None
self.resolution = None
self.isDisabled = None
self.type = None
def add_arguments(self):
"""
Add the specific arguments of this CLI
"""
MetricCommon.add_arguments(self)
self.parser.add_argument('-n', '--metric-name', dest='metricName', action='store',
required=True, metavar='metric_name', help='Metric identifier')
self.parser.add_argument('-d', '--display-name', dest='displayName', action='store',
required=True, metavar='display_name', help='Metric display name')
self.parser.add_argument('-s', '--display-name-short', dest='displayNameShort', action='store',
required=True, metavar='display_short_name', help='Metric short display name')
self.parser.add_argument('-i', '--description', dest='description', action='store',
required=not self.update, metavar='description', help='Metric description')
self.parser.add_argument('-g', '--aggregate', dest='aggregate', action='store',
required=True, choices=['avg', 'max', 'min', 'sum'],
help='Metric default aggregate')
self.parser.add_argument('-u', '--unit', dest='unit', action='store',
required=False, choices=['percent', 'number', 'bytecount', 'duration'],
help='Metric unit')
self.parser.add_argument('-r', '--resolution', dest='resolution', action='store', metavar='resolution',
required=False, help='Metric default resolution')
self.parser.add_argument('-y', '--type', dest='type', action='store', default=None,
required=False, metavar='type', help='Sets the type metadata field')
self.parser.add_argument('-x', '--is-disabled', dest='isDisabled', action='store', default=None,
required=False,
choices=['true', 'false'], help='Enable or disable the metric definition')
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
MetricCommon.get_arguments(self)
if self.args.metricName is not None:
self.metricName = self.args.metricName
if self.args.displayName is not None:
self.displayName = self.args.displayName
if self.args.displayNameShort is not None:
self.displayNameShort = self.args.displayNameShort
if self.args.description is not None:
self.description = self.args.description
if self.args.aggregate is not None:
self.aggregate = self.args.aggregate
if self.args.unit is not None:
self.unit = self.args.unit
if self.args.resolution is not None:
self.resolution = self.args.resolution
if self.args.isDisabled is not None:
self.isDisabled = self.args.isDisabled
if self.args.type is not None:
self.type = self.args.type
data = {}
if self.metricName is not None:
data['name'] = self.metricName
if self.displayName is not None:
data['displayName'] = self.displayName
if self.displayNameShort is not None:
data['displayNameShort'] = self.displayNameShort
if self.description is not None:
data['description'] = self.description
if self.aggregate is not None:
data['defaultAggregate'] = self.aggregate
if self.unit is not None:
data['unit'] = self.unit
if self.resolution is not None:
data['defaultResolutionMS'] = self.resolution
if self.isDisabled is not None:
data['isDisabled'] = True if self.isDisabled == 'yes' else False
if self.type is not None:
data['type'] = self.type
self.path = "v1/metrics/{0}".format(self.metricName)
self.data = json.dumps(data, sort_keys=True)
self.headers = {'Content-Type': 'application/json', "Accept": "application/json"}
```
#### File: boundary-api-cli/boundary/plugin_add.py
```python
from boundary import PluginBase
class PluginAdd (PluginBase):
def __init__(self):
PluginBase.__init__(self)
self.method = "PUT"
self.path = "v1/plugins/private"
self.pluginName = None
self.organizationName = None
self.repositoryName = None
def add_arguments(self):
PluginBase.add_arguments(self)
self.parser.add_argument('-o', '--organization-name', dest='organizationName', action='store',
required=True, metavar="organization_name",
help='Name of the GitHub user or organization')
self.parser.add_argument('-r', '--repository-name', dest='repositoryName', action='store',
required=True, metavar="repository_name",
help='Name of the GitHub repository')
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
PluginBase.get_arguments(self)
if self.args.organizationName is not None:
self.organizationName = self.args.organizationName
if self.args.repositoryName is not None:
self.repositoryName = self.args.repositoryName
self.path = "v1/plugins/private/{0}/{1}/{2}".format(self.pluginName, self.organizationName, self.repositoryName)
def get_description(self):
return 'Imports a meter plugin from a GitHub repository into a {0} account'.format(self.product_name)
```
#### File: boundary-api-cli/boundary/property_handler.py
```python
class PropertyHandler(object):
def __init__(self):
self._properties = None
def _process_properties(self, properties):
"""
Transforms the command line properties into python dictionary
:return:
"""
if properties is not None:
self._properties = {}
for p in properties:
d = p.split('=')
self._properties[d[0]] = d[1]
def _add_property_argument(self, parser, help_text):
parser.add_argument('-p', '--property', dest='properties', action='append',
required=False, metavar='property=value', help=help_text)
```
#### File: boundary-api-cli/boundary/relay_get_config.py
```python
from boundary import ApiCli
class RelayGetConfig(ApiCli):
def __init__(self):
ApiCli.__init__(self)
self.meter = None
self.since = None
def add_arguments(self):
"""
"""
ApiCli.add_arguments(self)
self.parser.add_argument('-n', '--name', metavar='meter_name', dest='meter', action='store', required=True,
help='Name of the meter to set plugin configuration information')
self.parser.add_argument('-s', '--since', metavar='timestamp', dest='since', action='store', required=False,
help='Unix timestamp of when configuration was last checked. '
+ 'If configuration has not changed, null is returned.')
def get_arguments(self):
"""
"""
ApiCli.get_arguments(self)
if self.args.meter is not None:
self.meter = self.args.meter
if self.args.since is not None:
self.since = self.args.since
self.path = 'v1/relays/{0}/config'.format(self.meter)
if self.since is not None:
self.url_parameters = {"since": self.since}
def get_description(self):
return 'Returns relay configuration from a {0} account'.format(self.product_name)
```
#### File: unit/boundary/action_installed_test.py
```python
import json
from unittest import TestCase
from boundary import ActionInstalled
from cli_runner import CLIRunner
from cli_test import CLITest
class ActionInstalledTest(TestCase):
def setUp(self):
self.cli = ActionInstalled()
def test_cli_description(self):
CLITest.check_description(self, self.cli)
def test_cli_curl(self):
runner = CLIRunner(self.cli)
curl = runner.get_output(['-z'])
CLITest.check_curl(self, self.cli, curl)
def test_cli_help(self):
CLITest.check_cli_help(self, self.cli)
def test_get_actions_installed(self):
runner = CLIRunner(ActionInstalled())
result = runner.get_output([])
actions_result = json.loads(result)
actions = actions_result['result']
self.assertGreaterEqual(len(actions), 1)
```
#### File: unit/boundary/alarm_common_test.py
```python
from unittest import TestCase
from boundary import Alarm
class AlarmCommonTest(TestCase):
def setUp(self):
self.alarm = Alarm()
def test_alarm_defaults(self):
self.assertIsNone(self.alarm.actions)
self.assertIsNone(self.alarm.aggregate)
self.assertIsNone(self.alarm.host_group_id)
self.assertIsNone(self.alarm.id)
self.assertIsNone(self.alarm.is_disabled)
self.assertIsNone(self.alarm.metric)
self.assertIsNone(self.alarm.name)
self.assertIsNone(self.alarm.note)
self.assertIsNone(self.alarm.operation)
self.assertIsNone(self.alarm.per_host_notify)
self.assertIsNone(self.alarm.threshold)
self.assertIsNone(self.alarm.notify_clear)
self.assertIsNone(self.alarm.notify_set)
self.assertIsNone(self.alarm.timeout_interval)
self.assertIsNone(self.alarm.trigger_interval)
def test_alarm_init(self):
actions = [1, 2]
aggregate = 'avg'
host_group_id = 1000
alarm_id = 123456789
trigger_interval = 3600
is_disabled = True
metric = "TEST_METRIC"
name = 'My Alarm'
note = 'just a note'
operation = 'eq'
per_host_notify = True
threshold = 1000
alarm = Alarm(
actions=actions,
aggregate=aggregate,
host_group_id=host_group_id,
id=alarm_id,
trigger_interval=trigger_interval,
is_disabled=is_disabled,
metric=metric,
name=name,
note=note,
operation=operation,
per_host_notify=per_host_notify,
threshold=threshold
)
self.assertEqual(actions, alarm.actions)
self.assertEqual(aggregate, alarm.aggregate)
self.assertEqual(host_group_id, alarm.host_group_id)
self.assertEqual(alarm_id, alarm.id)
self.assertEqual(trigger_interval, alarm.trigger_interval)
self.assertEqual(is_disabled, alarm.is_disabled)
self.assertEqual(metric, alarm.metric)
self.assertEqual(name, alarm.name)
self.assertEqual(note, alarm.note)
self.assertEqual(operation, alarm.operation)
self.assertEqual(per_host_notify, alarm.per_host_notify)
self.assertEqual(threshold, alarm.threshold)
def test_set_actions(self):
self.alarm.actions = [1, 2, 3, 4]
self.assertEqual([1, 2, 3, 4], self.alarm.actions)
def test_set_aggregate(self):
self.alarm.aggregate = 'sum'
self.assertEqual('sum', self.alarm.aggregate)
def test_set_id(self):
self.alarm.id = 1076
self.assertEqual(1076, self.alarm.id)
def test_set_interval(self):
self.alarm.interval = 60
self.assertEqual(60, self.alarm.interval)
def test_set_is_disabled(self):
self.alarm.is_disabled = True
self.assertEqual(True, self.alarm.is_disabled)
def test_set_metric_name(self):
self.alarm.metric_name = 'toad'
self.assertEqual('toad', self.alarm.metric_name)
def test_set_name(self):
self.alarm.name = 'blue'
self.assertEqual('blue', self.alarm.name)
def test_set_note(self):
self.alarm.note = 'This is a note'
self.assertEqual('This is a note', self.alarm.note)
def test_set_operation(self):
self.alarm.operation = 'gt'
self.assertEqual('gt', self.alarm.operation)
def test_set_per_host_notify(self):
self.alarm.per_host_notify = True
self.assertEqual(True, self.alarm.per_host_notify)
def test_set_threshold(self):
self.alarm.threshold = 2000
self.assertEqual(2000, self.alarm.threshold)
def test_set_bad_aggregate(self):
with self.assertRaises(AttributeError, msg='Check bad aggregate'):
alarm = Alarm()
alarm.aggregate = 'foo'
def test_repr(self):
alarm = Alarm(
actions=[1, 2],
aggregate='avg',
host_group_id=1000,
alarm_id=123456789,
trigger_interval=900,
is_disabled=True,
metric='TEST_METRIC',
name='My Alarm',
note='just a note',
operation='eq',
per_host_notify=True,
threshold=1000
)
self.assertEqual('Alarm(aggregate="avg", actions=[1, 2], host_group_id=1000, id=None, interval=900, '
'is_disabled=True, metric="TEST_METRIC", name="My Alarm", '
'note="just a note", operation="eq", per_host_notify=True, threshold=1000',
alarm.__repr__())
```
#### File: unit/boundary/alarm_delete_test.py
```python
from unittest import TestCase
import json
from boundary import API
from boundary import AlarmCreate
from boundary import AlarmDelete
from cli_test import CLITest
from cli_runner import CLIRunner
class AlarmDeleteTest(TestCase):
def setUp(self):
self.cli = AlarmDelete()
self.api = API()
def test_cli_description(self):
CLITest.check_description(self, self.cli)
def test_cli_help(self):
CLITest.check_cli_help(self, self.cli)
def test_create_curl(self):
runner = CLIRunner(self.cli)
alarm_id = 1024
curl = runner.get_output(['-i', str(alarm_id),
'-z'])
CLITest.check_curl(self, self.cli, curl)
def test_api_call(self):
api = API()
name = 'ALARM_DELETE_API_TEST' + CLITest.random_string(6)
metric = 'CPU'
trigger_interval = 60000
aggregate = 'sum'
operation = 'gt'
threshold = '0.80'
note = CLITest.random_string(20)
alarm = api.alarm_create(name=name,
metric=metric,
trigger_interval=trigger_interval,
aggregate=aggregate,
operation=operation,
threshold=threshold,
note=note)
self.api.alarm_delete(id=alarm.id)
def test_delete_alarm(self):
name = 'ALARM_DELETE_TEST' + CLITest.random_string(6)
metric = 'CPU'
trigger_interval = 60000
aggregate = 'sum'
operation = 'gt'
threshold = '0.80'
note = CLITest.random_string(20)
runner_create = CLIRunner(AlarmCreate())
create = runner_create.get_output(['-n', name,
'-m', metric,
'-g', aggregate,
'-o', operation,
'-v', str(threshold),
'-r', str(trigger_interval),
'-d', note])
alarm = json.loads(create)
runner_delete = CLIRunner(AlarmDelete())
delete = runner_delete.get_output(['-i', str(alarm['id'])])
```
#### File: unit/boundary/cli_test.py
```python
import random
import string
import subprocess
import sys
import re
from cli_test_parameters import CLITestParameters
class CLITest:
def __init__(self):
pass
@staticmethod
def check_description(test_case, cli):
parameters = CLITestParameters()
test_case.assertEqual(parameters.get_value(cli.__class__.__name__, 'description'), cli.get_description())
@staticmethod
def check_curl(test_case, cli, output):
parameters = CLITestParameters()
p = re.compile(r'-u ".*?"\s')
a = p.findall(output)
output = output.replace(a[0], '')
test_case.assertEqual(parameters.get_value(cli.__class__.__name__, 'curl').encode('utf-8'), output.encode('utf-8'))
@staticmethod
def get_cli_name_from_class(i):
name = i.__class__.__name__
m = re.findall("([A-Z][a-z]+)", name)
m = [a.lower() for a in m]
cli_name = str.join('-', m)
return cli_name
@staticmethod
def check_cli_help(test_case, cli):
parameters = CLITestParameters()
name = cli.__class__.__name__
expected_output = parameters.get_cli_help(name)
m = re.findall("([A-Z][a-z]+)", name)
m = [a.lower() for a in m]
command = str.join('-', m)
try:
output = subprocess.check_output([command, '-h'])
test_case.assertEqual(expected_output, output)
except subprocess.CalledProcessError as e:
sys.stderr.write("{0}: {1}\n".format(e.output, e.returncode))
@staticmethod
def get_cli_output(cli, args):
output = None
try:
command = CLITest.get_cli_name_from_class(cli)
args.insert(0, command)
output = subprocess.check_output(args=args)
except subprocess.CalledProcessError as e:
sys.stderr.write("{0}: {1}\n".format(e.output, e.returncode))
return output
@staticmethod
def random_string(n):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n))
@staticmethod
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
```
#### File: unit/boundary/plugin_manifest_test.py
```python
from unittest import TestCase
from boundary import PluginManifest
import os.path
class TestPluginManifest(TestCase):
def setUp(self):
self.filename = os.path.join(os.path.dirname(__file__), 'plugin.json')
self.pm = PluginManifest(self.filename)
self.pm.load()
def test_load(self):
pm = PluginManifest(self.filename)
pm.load()
def test_check_data_members(self):
self.assertEqual('Boundary README Test', self.pm.name,
'Check for name')
self.assertEqual('Example plugin.json for testing README.md generation', self.pm.description, 'Check for description')
self.assertEqual('2.0', self.pm.version)
self.assertEqual('meter', self.pm.tags)
self.assertEqual('icon.png', self.pm.icon)
self.assertEqual('node index.js', self.pm.command)
self.assertEqual('boundary-meter init.lua', self.pm.command_lua)
self.assertEqual('npm install', self.pm.post_extract)
self.assertEqual('', self.pm.post_extract_lua)
self.assertEqual('node_modules', self.pm.ignore)
self.assertEqual(['BOUNDARY_README_METRIC'], self.pm.metrics)
def test_check_for_param_array(self):
a = self.pm.param_array
self.assertTrue(a is not None)
def test_check_param_array(self):
pass
```
|
{
"source": "jdgwartney/boundary-plugin-rabbitmq",
"score": 2
}
|
#### File: jdgwartney/boundary-plugin-rabbitmq/rabbitmq_monitoring.py
```python
import json
from time import sleep
import collections
import sys
from os.path import basename
import urllib2
from base64 import b64encode
from string import replace
#
# Maps the API path names to Boundary Metric Identifiers
#
KEY_MAPPING = [
("object_totals_queues", "RABBITMQ_OBJECT_TOTALS_QUEUES"),
("object_totals_channels", "RABBITMQ_OBJECT_TOTALS_CHANNELS"),
("object_totals_exchanges", "RABBITMQ_OBJECT_TOTALS_EXCHANGES"),
("object_totals_consumers", "RABBITMQ_OBJECT_TOTALS_CONSUMERS"),
("object_totals_connections", "RABBITMQ_OBJECT_TOTALS_CONNECTIONS"),
("message_stats_deliver", "RABBITMQ_MESSAGE_STATS_DELIVER"),
("message_stats_deliver_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_DETAILS_RATE"),
("message_stats_deliver_no_ack", "RABBITMQ_MESSAGE_STATS_DELIVER_NO_ACK"),
("message_stats_deliver_no_ack_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_NO_ACK_DETAILS_RATE"),
("message_stats_deliver_get", "RABBITMQ_MESSAGE_STATS_DELIVER_GET"),
("message_stats_deliver_get_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_GET_DETAILS_RATE"),
("message_stats_redeliver", "RABBITMQ_MESSAGE_STATS_REDELIVER"),
("message_stats_redeliver_details_rate", "RABBITMQ_MESSAGE_STATS_REDELIVER_DETAILS_RATE"),
("message_stats_publish", "RABBITMQ_MESSAGE_STATS_PUBLISH"),
("message_stats_publish_details_rate", "RABBITMQ_MESSAGE_STATS_PUBLISH_DETAILS_RATE"),
("queue_totals_messages", "RABBITMQ_QUEUE_TOTALS_MESSAGES"),
("queue_totals_messages_details_rate", "RABBITMQ_QUEUE_TOTALS_MESSAGES_DETAILS_RATE"),
("queue_totals_messages_ready", "RABBITMQ_QUEUE_TOTALS_MESSAGES_READY"),
("queue_totals_messages_ready_details_rate", "RABBITMQ_QUEUE_TOTALS_MESSAGES_READY_DETAILS_RATE"),
("queue_totals_messages_unacknowledged", "RABBITMQ_QUEUE_TOTALS_MESSAGES_UNACKNOWLEDGED"),
("queue_totals_messages_unacknowledged_details_rate","RABBITMQ_QUEUE_TOTALS_MESSAGES_UNACKNOWLEDGED_DETAILS_RATE"),
("mem_used","RABBITMQ_MEM_USED"),
("disk_free","RABBITMQ_DISK_FREE")
]
class RabitMQMonitoring():
def __init__(self):
self.pollInterval = None
self.hostname = None
self.port = None
self.user = None
self.password = None
self.url = None
def send_get(self,url):
response = requests.get(url, auth=(self.user, self.password))
return response.json()
def call_api(self, endpoint):
url = self.url + endpoint
auth = b64encode(self.user + ":" + self.password)
headers = {
"Accept": "application/json",
"Authorization": "Basic %s" % auth,
}
request = urllib2.Request(url,headers=headers)
try:
response = urllib2.urlopen(request)
except urllib2.URLError as e:
sys.stderr.write("Error connecting to host: {0} ({1}), Error: {2}".format(self.hostname,e.errno,e.message))
raise
except urllib2.HTTPError as e:
sys.stderr.write("Error getting data from AWS Cloud Watch API: %s (%d), Error: %s",
getattr(h, "reason", "Unknown Reason"),h.code, h.read())
raise
return json.load(response)
def print_dict(self, dic):
for (key, value) in KEY_MAPPING:
if dic.get(key,"-") != "-":
name = dic.get("name")
print("%s %s %s" % (value.upper(), dic.get(key, "-"), name))
sys.stdout.flush()
def get_details(self):
overview = self.call_api("overview")
nodes = self.call_api("nodes")
if nodes:
overview.update(nodes[0])
if overview:
data = self.flatten_dict(overview)
self.print_dict(data)
def flatten_dict(self, dic, parent_key='', sep='_'):
items = []
for k, v in dic.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self.flatten_dict(v, new_key, sep).items())
else:
items.append((new_key, v))
return dict(items)
def extractMetrics(self):
self.get_details()
def get_configuration(self):
'''
1) Reads the param.json file that contains the configuration of the plugin.
2) Sets the values to member variables of the class instance.
'''
with open('param.json') as f:
parameters = json.loads(f.read())
self.hostname = parameters['hostname']
self.port = parameters['port']
self.pollInterval = float(parameters['pollInterval'])/1000.0
self.user = parameters['user']
self.password = parameters['password']
self.url = "http://" + self.hostname + ":" + self.port + "/api/"
def continuous_monitoring(self):
while True:
self.get_details()
sleep(float(self.pollInterval))
if __name__ == "__main__":
monitor = RabitMQMonitoring()
monitor.get_configuration()
monitor.continuous_monitoring()
```
|
{
"source": "jdgwartney/esdvd",
"score": 2
}
|
#### File: esdvd/esdvd/query.py
```python
from esdvd import ESCommon
from lru import lru_cache_function
import json
import random
import time
import os
class QueryData(ESCommon):
def __init__(self):
ESCommon.__init__(self)
@staticmethod
@lru_cache_function(max_size=1024*8, expiration=60)
def get_id(q, rindex):
path = q.get_file_path(rindex)
record_id = q.get_id_from_file(path)
return record_id
def get_description(self):
return "Queries Elasticsearch for random DVD documents"
def add_command_line_arguments(self):
ESCommon.add_command_line_arguments(self)
def get_command_line_arguments(self):
ESCommon.get_command_line_arguments(self)
def get_file_path(self, findex):
"""
Returns the full path to file using index into list of a directory
:return:
"""
file_name = self.files[findex]
file_path = os.path.join(self.extraction_directory,file_name)
return file_path
def get_id_from_file(self, path):
contents = self.get_file_contents(path)
doc = json.loads(contents)
record_id = doc[self.id_field_name]
return record_id
def query_record(self, record_id):
"""
Look up record in elastic search
:param record_id:
:return:
"""
res = self.es.get(index=self.index, doc_type=self.doc_type, id=record_id)
if not self.quiet:
print(res['_source'])
def increment_count(self):
self.count += 1
def done(self):
result = None
if self.limit is not None and self.count > self.limit:
result = True
else:
result = False
return result
def query_database(self):
self.files = self.get_files()
if not self.quiet:
print("number of files: {0}".format(len(self.files)))
while not self.done():
rindex = random.randrange(0, len(self.files) - 1)
record_id = QueryData.get_id(self, rindex)
if not self.quiet:
print("+++++")
self.query_record(record_id)
if not self.quiet:
print("-----")
time.sleep(self.sleep)
self.increment_count()
def execute(self):
self.handle_arguments()
self.set_extraction_directory()
self.query_database()
def main():
l = QueryData()
l.execute()
if __name__ == '__main__':
main()
```
#### File: esdvd/esdvd/update.py
```python
from esdvd import Common
class UpdateData(Common):
def __init__(self):
pass
def execute(self):
pass
def main():
l = Update()
l.execute()
if __name__ == '__main__':
main()
```
|
{
"source": "jdgwartney/flask-blueprint",
"score": 2
}
|
#### File: application/users/views.py
```python
from flask import Blueprint
users = Blueprint('users', __name__)
@users.route('/me')
def me():
return "This is my page.", 200
```
#### File: flask-blueprint/application/views.py
```python
from application import app
@app.route("/")
def hello():
return "Hello World!"
@app.route("/contact")
def contact():
return "You can contact me at 555-5555, or "
" email me at <EMAIL>"
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
# Logic for handling login
return "login-post"
else:
# Display login form
return "login-get"
```
|
{
"source": "jdgwartney/graph-scaler",
"score": 3
}
|
#### File: jdgwartney/graph-scaler/scale.py
```python
import math
import logging
logger = logging.getLogger(__name__)
class ScaleAxis(object):
def __init__(self):
pass
def scale_axis(data):
up = max(data)
lo = min(data)
logger.debug("upper: {0}, lower: {1}".format(lo, up))
r = up - lo
logger.debug("range: {0}".format(r))
tick_count = 8
unrounded_tick_size = r/(tick_count - 1)
logger.debug("tick_count: {0}, unrounded_tick_size: {1}".format(tick_count, unrounded_tick_size))
x = math.ceil(math.log10(unrounded_tick_size) - 1)
pow10x = math.pow(10, x)
logger.debug("x: {0}, pow10x: {1}".format(x, pow10x))
rounded_tick_range = math.ceil(unrounded_tick_size / pow10x) * pow10x
rounded_tick_range = math.ceil(unrounded_tick_size/pow10x) * pow10x
logger.debug("rounded_tick_range: {0}".format(rounded_tick_range))
new_lower = rounded_tick_range * round(lo/rounded_tick_range)
new_upper = rounded_tick_range * round((1+up)/rounded_tick_range)
logger.debug("new_lower: {0}, new_upper: {1}".format(new_lower, new_upper))
return (new_lower, new_upper)
def step_size(r, target_steps):
# calculate an initial guess at step size
temp_step = r/target_steps
logger.debug("temp_step: {0}".format(temp_step))
# get the magnitude of the step size
mag = float(math.floor(math.log(temp_step)))
mag_pow = float(math.pow(10, mag))
# calculate most significant digit of the new step size
mag_msd = int((temp_step/mag_pow + 0.5))
# promote the MSD to either 1, 2, or 5
if mag_msd > 5.0:
mag_msd = 10.0
elif mag_msd > 2.0:
mag_msd = 5.0
elif mag_msd > 1.0:
mag_msd = 2.0;
return mag_msd * mag_pow;
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
data = [15, 234, 140, 65, 90]
lo, up = scale_axis(data)
size = step_size(up-lo, 5)
print("lower: {0}, upper: {1}, size: {2}".format(lo, up, size))
```
|
{
"source": "jdgwartney/measurement-debugging",
"score": 3
}
|
#### File: jdgwartney/measurement-debugging/send_measures.py
```python
from tspapi import API
from tspapi import Measurement
from random import randint
from datetime import datetime
from time import sleep
def create_batch():
delay = 30
api = API()
measurements = []
timestamp = int(datetime.now().strftime('%s'))
# skew timestamp by 5 seconds
timestamp = timestamp - delay
measurements.append(Measurement(metric='API_TEST_METRIC', value=randint(0, 99), source='red', timestamp=timestamp))
measurements.append(Measurement(metric='API_TEST_METRIC', value=randint(0, 99), source='green', timestamp=timestamp))
measurements.append(Measurement(metric='API_TEST_METRIC', value=randint(0, 99), source='blue', timestamp=timestamp))
# sleep(float(delay))
api.measurement_create_batch(measurements)
def create():
api = API()
metric_id = 'API_TEST_METRIC'
value = ranint(0, 99)
source = 'foo'
timestamp = datetime.now().strftime('%s')
api.measurement_create(metric_id, value, source, timestamp)
if __name__ == "__main__":
create_batch()
```
|
{
"source": "jdgwartney/mysql-to-pulse-api",
"score": 2
}
|
#### File: mysql-to-pulse-api/tspmysql/cli.py
```python
import argparse
from etl import ETL
class Cli(object):
def __init__(self):
pass
def get_arguments(self):
parser = argparse.parser(description="get args")
def run(self):
print("running")
etl = ETL()
etl.run()
def main():
cli = Cli()
cli.run()
```
|
{
"source": "jdgwartney/sdk",
"score": 3
}
|
#### File: plugins/framework/exec_proc.py
```python
from subprocess import Popen,PIPE
import shlex
import logging
from string import replace
class ExecProc:
def __init__(self):
self.command = None
self.debug = False
def setDebug(self,debug):
self.debug = debug
def setCommand(self,command):
if type(command) != str:
raise ValueError
self.command = command
def execute(self):
if self.command == None:
raise ValueError
args = shlex.split(self.command)
if self.debug == True:
logging.info("command=\"%s\"",args)
p = Popen(args,stdout=PIPE)
o,e = p.communicate()
if self.debug == True:
logging.info("before: " + ':'.join(x.encode('hex') for x in o))
# Remove carriage returns from output
o = replace(o,"\r","")
if self.debug == True:
logging.info("after: " + ':'.join(x.encode('hex') for x in o))
if self.debug == True:
logging.info("output=\"%s\"",o)
logging.info(':'.join(x.encode('hex') for x in o))
return o
```
|
{
"source": "jdgwartney/tsi",
"score": 2
}
|
#### File: jdgwartney/tsi/cleanup.py
```python
import json
import pycurl
from requests.auth import HTTPDigestAuth
import os
#-------------------------------------------------------
# define function to subit requests
#-------------------------------------------------------
def submitRequest( url ):
headers = ['Expect:', 'Content-Type: application/json' , 'X-API-KEY: ' + apikey]
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER,headers )
c.setopt(pycurl.CUSTOMREQUEST, "DELETE")
c.perform()
print ("status code:=" + str(c.getinfo(pycurl.HTTP_CODE)))
c.close()
return
#-------------------------------------------------------
# setup api key
#-------------------------------------------------------
apikey = os.environ["TSI_API_KEY"]
submitRequest("https://truesight.bmc.com/api/v1/entities/APPLICATION/online_auc")
submitRequest("https://truesight.bmc.com/api/v1/entities/DEVICE/oa-appserver-1")
submitRequest("https://truesight.bmc.com/api/v1/entities/TRANSACTION/oa-appserver-1.bid-tx")
submitRequest("https://truesight.bmc.com/api/v1/entities/TRANSACTION/oa-appserver-1.browse-catalog")
```
|
{
"source": "jdgwartney/tsi-lab",
"score": 3
}
|
#### File: labs/lab-5/log_utils.py
```python
import apachelog
import os
import sys
import time
def monitor_file(f):
"""
Reads a line from a file when available
:param f: open file
:return: a line from the file
"""
# Go to the end of the file
f.seek(0, 2)
# Loop waiting for lines to be written
while True:
log_line = f.readline()
# If there is nothing to read then wait a bit
# and try again
if not log_line:
time.sleep(0.1)
continue
# We have a line return the line
yield log_line
def parse_apache_line(parser, line):
"""
Uses the apachelog package to parse a line
of a Apache HTTP server log.
:param parser:
:param line:
:return:
"""
s = None
try:
s = parser.parse(line)
except apachelog.ApacheLogParserError:
sys.stderr.write("Unable to parse %s" % line)
return s
class LogfileParser(object):
def __init__(self, path=None):
"""
Constructs a Logfile parser instance given a path to a log file
:param path:
:return: None
"""
# Opened handle to our log file
self.log_file = open(path, "r")
# Contains the text from each line append to the file
self.line = None
# Set our application id from the environment variable
self.app_id = os.environ['TSI_APP_ID']
def monitor_file(self):
"""
Monitors a file for lines append to it then calls
a method to process the line
:return: None
"""
lines = monitor_file(self.log_file)
for line in lines:
self.line = line.strip()
self.parse_line()
def parse_line(self):
"""
Default callback for processing a line which prints
the line to standard out.
:return: None
"""
print(self.line)
if __name__ == '__main__':
if len(sys.argv) == 2:
parser = LogfileParser()
parser.monitor_file()
else:
sys.stderr.write("usage: {0} <path>".format(os.path.baseline(sys.argv[0])))
```
#### File: labs/lab-6/common.py
```python
import time
import sys
import os
from tspapi import API
class Common(object):
def __init__(self, ):
self.api = API()
self.usage_args = ""
# Set our application id from the environment variable
self.app_id = os.environ['TSI_APP_ID']
@staticmethod
def usage(args):
sys.stderr.write("usage: {0} {1}\n".format(os.path.basename(sys.argv[0]), args))
def send_measurements(self, measurements):
"""
Sends measurements using the Measurement API
:param measurements:
:return: None
"""
self.api.measurement_create_batch(measurements)
def run(self):
"""
Main loop
"""
while True:
print("Doing absolutely nothing")
time.sleep(self.interval)
```
#### File: labs/lab-6/ex6-1.stocks.py
```python
import ystockquote
import os
import sys
import time
from tspapi import API
from tspapi import Measurement
from common import Common
class Ticker(Common):
"""
Collects the current stock price and volume from ticker
"""
def __init__(self, interval=10, tickers=None):
"""
Construct a Ticker instance
:param interval: How often to collect stock price and volume
:return:
"""
super(Ticker, self).__init__()
self.interval = interval
self.tickers = tickers
def send_measurements(self, measurements):
"""
Sends measurements using the Measurement API
:param measurements:
:return: None
"""
self.api.measurement_create_batch(measurements)
def run(self):
"""
Main loop
"""
properties = {"app_id": self.app_id}
while True:
# Loop over the tickers and lookup the stock price and volume
for ticker in self.tickers:
measurements = []
price = ystockquote.get_price(ticker)
volume = ystockquote.get_volume(ticker)
timestamp = int(time.time())
if volume == 'N/A' or price == 'N/A':
sys.stderr.write('Could not find ticker \"{0}\", skipping'.format(ticker))
else:
print("ticker: {0}, price: {1}, volume: {2}".format(ticker, price, volume))
measurements.append(Measurement(metric='STOCK_PRICE',
value=price,
source=ticker,
properties=properties))
measurements.append(Measurement(metric='STOCK_VOLUME',
value=volume,
source=ticker,
properties=properties))
self.send_measurements(measurements)
time.sleep(self.interval)
if __name__ == "__main__":
if len(sys.argv) > 1:
first = True
tickers = []
for arg in sys.argv:
# Skip the first arguments which is the program name
if first:
first = False
continue
tickers.append(arg)
stocks = Ticker(interval=10, tickers=tickers)
stocks.run()
else:
sys.stderr.write("usage: {0} ticker [ticker [ticker]...]\n".format(os.path.basename(sys.argv[0])))
```
|
{
"source": "jdgwartney/tsi",
"score": 2
}
|
#### File: tsi/api/event_api.py
```python
from tsi.api_object import ApiObject
class EventApi(ApiObject):
def __init__(self, api_key=None):
pass
def create(self, **kwargs):
o = {}
return o
def delete(self, **kwargs):
o = {}
return o
def get(self, **kwargs):
o = {}
return o
def update(self, **kwargs):
o = {}
return o
```
#### File: tsi/tsi/Entity.py
```python
from tsi.api_object import ApiObject
class Entity(ApiObject):
def __init__(self,
cfg_attr_values=None,
entity_id=None,
entity_type_id=None,
name=None,
parent_entity_type_id=None,
parent_entity_id=None,
source_id=None
):
ApiObject.__init__(self)
self._cfg_attr_values = cfg_attr_values
self._entity_id = entity_id
self._entity_type_id = entity_type_id
self._name = name
self._parent_entity_type_id=parent_entity_type_id
self._parent_entity_id=parent_entity_id
self._source_id = source_id
self._tags = None
@property
def cfg_attr_values(self):
return self._cfg_attr_values
@property
def entity_type_id(self):
return self._entity_type_id
@property
def name(self):
return self._name
@property
def source_id(self):
return self._source_id
@property
def tags(self):
return self._tags
# newEntity = {
# "entity_type_id": "APPLICATION",
# "name": "Online Auction",
# "tags": [
# "app_id:online_auc"
# ],
# "cfg_attr_values": {},
# "entity_id": "online_auc",
# "source_id": "sample",
# "cfg_attr_values":
# {
# "kpis":[
# {"entity_type_id":"TRANSACTION",
# "entity_type_name":"Transaction",
# "entity_id":"oa-appserver-1.bid_tx",
# "title":"Number of Requests",
# "application_id":"online_auc",
# "application_name":"Online Auction",
# "metric_name":"Number of Requests",
# "metric_uom":"#",
# "metric_id":"number_of_requests"},
# {"entity_type_id":"TRANSACTION",
# "entity_type_name":"Transaction",
# "entity_id":"oa-appserver-1.bid_tx",
# "title":"Request Response Time",
# "application_id":"online_auc",
# "application_name":"Online Auction",
# "metric_name":"Request Response Time",
# "metric_uom":"Seconds",
# "metric_id":"request_response_time"}
# ]
# }
# }
```
|
{
"source": "jdgwartney/tsp-etl",
"score": 2
}
|
#### File: tsp-etl/tspetl/db_tool.py
```python
from tspetl import ETLTool
class DBTool(ETLTool):
def __init__(self):
self._name = None
self._password = None
self._database = None
self._query = None
@property
def name(self):
return 'db'
@property
def help(self):
return 'Import data from a relational database (Future Release)'
def add_parser(self, parser):
self._parser = parser.add_parser(self.name, help=self.help)
self._parser.add_argument('-u', '--user', metavar='name',
help="name of the user to connect to the database")
self._parser.add_argument('-p', '--password', metavar='password',
help="password of the user to connect to the database")
self._parser.add_argument('-d', '--database', metavar='db_name',
help="database to extract data from")
self._parser.add_argument('-q', '--query', metavar='sql_query',
help="SQL query to use to extract data")
def handle_arguments(self, args):
if args.name is not None:
self._name = args.name
pass
def run(self, sink):
print("Import CSV")
```
#### File: tsp-etl/tspetl/weather_tool.py
```python
from time import sleep
import string
from tspetl import ETLCollector
from tspetl import ETLTool
import pyowm
from tspapi import Measurement
class WeatherCollector(ETLCollector):
def __init__(self, sink, api_key=None, cities=None):
super(WeatherCollector, self).__init__(sink)
self._api_key = None
self._cities = None
if api_key is not None:
self._api_key = api_key
if cities is not None:
self._cities = cities
self._owm = pyowm.OWM(self._api_key)
def collect(self):
measurements = []
for city in self._cities:
observation = self._owm.weather_at_place(city)
weather = observation.get_weather()
city = string.replace(city, ',', '_')
city = string.replace(city, ' ', '_')
temperature = weather.get_temperature('celsius')['temp']
measurement = Measurement(metric='OWM_TEMPERATURE', source=city, value=temperature)
measurements.append(measurement)
self._sink.send_measurements(measurements=measurements)
class WeatherTool(ETLTool):
def __init__(self):
super(WeatherTool, self).__init__()
self._city_names = None
self._interval = 60
self._api_key = None
@property
def name(self):
return 'weather'
@property
def help(self):
return 'Collects weather measurements from a city and optional country code. (Future Release)'
def add_parser(self, sub_parser):
super(WeatherTool, self).add_parser(sub_parser)
self._parser.add_argument('-c', '--city-name', dest='city_names', action='append', metavar="city_name",
required=True,
help="Name of a city with an optional country code")
self._parser.add_argument('-i', '--interval', dest='interval', action='store', metavar="seconds",
required=False,
help="How often to collect in each API call in seconds. Defaults to 1 minute")
self._parser.add_argument('-k', '--api-key', dest='api_key', action='store', metavar="key", required=True,
help="Open Weather Map API Key")
def handle_arguments(self, args):
super(WeatherTool, self).handle_arguments(args)
if args.city_names is not None:
print(type(args.city_names))
self._city_names = args.city_names
if args.interval is not None:
self._interval = args.interval
if args.api_key is not None:
self._api_key = args.api_key
def run(self, sink):
collector = WeatherCollector(sink, cities=self._city_names, api_key=self._api_key)
while True:
collector.collect()
sleep(float(self._interval))
```
|
{
"source": "jdgwf/ISPWatcher",
"score": 2
}
|
#### File: jdgwf/ISPWatcher/ispwatcher.py
```python
VERSION = "3.0.1"
import datetime
import urllib
import urllib.request
import smtplib
import sys, os
import poplib
import imaplib
import getopt
import json
# Global Variables for command line option handling (see VERSION variable above)
SENDEMAILS = 1
CHATTY = 1
MAILSERVER = "smtp.gmail.com"
MAILSERVERPORT = 587
MAILSERVERUSERNAME = ""
MAILSERVERPASSWORD = ""
MAILSERVERSTARTTLS = 1
MAILFROM = ""
MAILSUBJECT = "ISPWatcher2 Failure"
EMAILS = {'':''}
def printversion():
"""Prints the version of ISPWatcher2
Returns nothing."""
print("* ISPWatcher Version " + VERSION)
def printusage():
"""Prints the usage of ISPWatcher2
Returns nothing."""
printversion()
print("\tispwatcher.py -h - Prints help screen")
print("\tispwatcher.ph -v - Prints Version information.")
print("\tispwatcher.py -t - Outputs only to standard output, sends no emails")
print("\tispwatcher.py -q - Checks servers quietly")
try:
opts, args = getopt.getopt(sys.argv[1:], "hvtq" )
except:
print(str(err) )
printusage()
sys.exit(2)
for o, a in opts:
if o == "-h":
printusage()
sys.exit()
if o == "-t":
SENDEMAILS = 0
if o == "-q":
CHATTY = 0
if o == "-v":
printversion()
sys.exit()
from xml.dom import minidom
#reload(sys)
#sys.setdefaultencoding("latin1")
def CheckServerJSON(settings):
# print settings
for key, value in settings["options"].items():
if key.lower() == "mailserver":
MAILSERVER = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserver: " + MAILSERVER)
if key.lower() == "mailserverport":
MAILSERVERPORT = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserverport: " + MAILSERVERPORT)
if key.lower() == "mailserverusername":
MAILSERVERUSERNAME = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserverusername: " + MAILSERVERUSERNAME)
if key.lower() == "mailserverstarttls":
MAILSERVERSTARTTLS = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserverstarttls: " + MAILSERVERSTARTTLS)
if key.lower() == "mailserverpassword":
MAILSERVERPASSWORD = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailserverpassword: " + MAILSERVERPASSWORD)
if key.lower() == "mailfrom":
MAILFROM = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailfrom: " + MAILFROM)
if key.lower() == "mailsubject":
MAILSUBJECT = value;
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found JSON configuration mailsubject: " + MAILSUBJECT)
for server in settings["servers"]:
sms = "0"
host = ""
recipients = []
watchfor = ""
warnif = ""
port = "0"
type = ""
active = "1"
timeoutalert = "0"
for key, value in server.items():
key = key.lower()
if key == "type":
type = value
if key == "host":
host = value
if key == "recipients":
recipients.append(value)
if key == "watchfor":
watchfor = value
if key == "warnif":
warnif = value
if key == "port":
port = value
if key == "timeoutalert":
timeoutalert = value
if key == "active":
active = value
if type == "http":
if port == "0":
port = "80"
if active == "1":
CheckHTTPServer(host, port, recipients, watchfor, warnif, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "smtp":
if port == "0":
port = "25"
if active == "1":
CheckSMTPServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "pop3":
if port == "0":
port = "110"
if active == "1":
CheckPOP3Server(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "imap" or type == "imap4":
if port == "0":
port = "143"
if active == "1":
CheckIMAPServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "pop3ssl" or type == "popssl":
if port == "0":
port = "995"
if active == "1":
CheckPOP3SSLServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "imapssl" or type == "imap4ssl":
if port == "0":
port = "993"
if active == "1":
CheckIMAPSSLServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
def CheckServerXML(oServer):
"""Parses through XML Object of Server and delegates oServer object to otehr functionstype in server type
Returns nothing."""
global MAILSERVER
global MAILSERVERPORT
global MAILSERVERUSERNAME
global MAILSERVERPASSWORD
global MAILFROM
global MAILSERVERSTARTTLS
global MAILSUBJECT
type = ""
for oAttributes in oServer.childNodes:
if oAttributes.nodeType != minidom.Node.TEXT_NODE:
if oAttributes.nodeName.lower() == "type":
type = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "mailserver":
MAILSERVER = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServer: " + MAILSERVER)
if oAttributes.nodeName.lower() == "mailserverport":
MAILSERVERPORT = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServerPort: " + MAILSERVERPORT)
if oAttributes.nodeName.lower() == "mailserverusername":
MAILSERVERUSERNAME = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServerUserName: " + MAILSERVERUSERNAME)
if oAttributes.nodeName.lower() == "mailserverstarttls":
MAILSERVERSTARTTLS = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServerStartTLS: " + MAILSERVERSTARTTLS)
if oAttributes.nodeName.lower() == "mailserverpassword":
MAILSERVERPASSWORD = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailServerPassword: " + MAILSERVERPASSWORD)
if oAttributes.nodeName.lower() == "mailfrom":
MAILFROM = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailFrom: " + MAILFROM)
if oAttributes.nodeName.lower() == "mailsubject":
MAILSUBJECT = oAttributes.childNodes[0].nodeValue
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Found XML configuration MailSubject: " + MAILSUBJECT)
sms = "0"
host = ""
recipients = []
watchfor = ""
warnif = ""
port = "0"
active = "1"
timeoutalert = "0"
for oAttributes in oServer.childNodes:
if oAttributes.nodeType != minidom.Node.TEXT_NODE:
if oAttributes.nodeName.lower() == "host":
host = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "port":
port = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "recipients":
recipients.append(oAttributes.childNodes[0].nodeValue)
if oAttributes.nodeName.lower() == "warnif":
warnif = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "watchfor":
watchfor = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "sms":
sms = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "timeoutalert":
timeoutalert = oAttributes.childNodes[0].nodeValue
if oAttributes.nodeName.lower() == "active":
active = oAttributes.childNodes[0].nodeValue
if type == "http":
if port == "0":
port = "80"
if active == "1":
CheckHTTPServer(host, port, recipients, watchfor, warnif, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "smtp":
if port == "0":
port = "25"
if active == "1":
CheckSMTPServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "pop3" or type == "pop":
if port == "0":
port = "110"
if active == "1":
CheckPOP3Server(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "imap" or type == "imap4":
if port == "0":
port = "143"
if active == "1":
CheckIMAPServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "pop3ssl" or type == "popssl":
if port == "0":
port = "995"
if active == "1":
CheckPOP3SSLServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
if type == "imapssl" or type == "imap4ssl":
if port == "0":
port = "993"
if active == "1":
CheckIMAPSSLServer(host, port, recipients, watchfor, sms, timeoutalert)
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Ignoring inactive server " + type + "://" + host + ":" + port + " - edit config to reenable")
def CheckPOP3SSLServer(host, port, recipients, watchfor, sms, timeoutalert):
"""Parses through XML Object of Server and checks server as per type
Returns nothing."""
try:
a = poplib.POP3_SSL(host, int(port))
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Connected successfully to POP3 (SSL) host '" + host + "' on port " + port)
except:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": ERROR can't connect to POP3 (SSL) host '" + host + "' on port " + port + " Error- " + str(sys.exc_info()[0]))
CreateEmailMessage(recipients, "POP3 (SSL) Error: Can't connect to '" + host + "' at " + now.strftime("%Y-%m-%d %H:%M") + " (" + str(sys.exc_info()[0]) + ")", "POP3", sms)
def CheckPOP3Server(host, port, recipients, watchfor, sms, timeoutalert):
"""Parses through XML Object of Server and checks server as per type
Returns nothing."""
try:
a = poplib.POP3(host, int(port), 15)
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Connected successfully to POP3 host '" + host + "' on port " + port)
except:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": ERROR can't connect to POP3 host '" + host + "' on port " + port + " Error- " + str(sys.exc_info()[0]))
CreateEmailMessage(recipients, "POP3 Error: Can't connect to '" + host + "' at " + now.strftime("%Y-%m-%d %H:%M") + " (" + str(sys.exc_info()[0]) + ")", "POP3", sms)
def CheckIMAPSSLServer(host, port, recipients, watchfor, sms, timeoutalert):
"""Parses through XML Object of Server and checks server as per type
Returns nothing."""
try:
a = imaplib.IMAP4_SSL(host, int(port))
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Connected successfully to IMAP4 (SSL) host '" + host + "' on port " + port)
except:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": ERROR can't connect to IMAP4 (SSL) host '" + host + "' on port " + port + " Error- " + str(sys.exc_info()[0]))
CreateEmailMessage(recipients, "IMAP4 (SSL) Error: Can't connect to '" + host + "' at " + now.strftime("%Y-%m-%d %H:%M"), "IMAP4", sms)
def CheckIMAPServer(host, port, recipients, watchfor, sms, timeoutalert):
"""Parses through XML Object of Server and checks server as per type
Returns nothing."""
try:
a = imaplib.IMAP4(host, int(port))
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Connected successfully to IMAP4 host '" + host + "' on port " + port)
except:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": ERROR can't connect to IMAP4 host '" + host + "' on port " + port + " Error- " + str(sys.exc_info()[0]))
CreateEmailMessage(recipients, "IMAP4 Error: Can't connect to '" + host + "' at " + now.strftime("%Y-%m-%d %H:%M"), "IMAP4", sms)
def CheckSMTPServer(host, port, recipients, watchfor, sms, timeoutalert):
"""Parses through XML Object of Server and checks server as per type
Returns nothing."""
try:
smtpserver = smtplib.SMTP(host, int(port), '', 30)
smtpserver.ehlo()
smtpserver.quit()
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Connected successfully to SMTP host '" + host + "' on port " + port)
except:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": ERROR can't connect to SMTP host '" + host + "'")
CreateEmailMessage(recipients, "SMTP Error: Can't connect to '" + host + "' at " + now.strftime("%Y-%m-%d %H:%M"), "SMTP", sms)
def CheckHTTPServer(host, port, recipients, watchfor, warnif, sms, timeoutalert):
"""Parses through XML Object of Server and checks server as per type
Returns nothing."""
try:
page = urllib.request.urlopen(host)
pagedata = str( page.read() )
pagedata = pagedata.lower()
sms = sms.lower()
watchfor = watchfor.lower()
if sms != "true" or sms != "1" or sms != "yes":
sms = "0"
if warnif != "":
if pagedata.find(watchfor) > 0:
CreateEmailMessage(recipients, "HTTP Error: Found '" + warnif + "' in " + host + " at " + now.strftime("%Y-%m-%d %H:%M"), "HTTP", sms)
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": ERROR '" + warnif + "' was found in HTTP host " + host + "")
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": '" + warnif + "' was found in HTTP host " + host + "")
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": '" + warnif + "' was not found in HTTP host " + host + "")
if watchfor != "":
if pagedata.find( str(watchfor) ) == -1:
CreateEmailMessage(recipients, "HTTP Error: Can't find '" + watchfor + "' in " + host + " at " + now.strftime("%Y-%m-%d %H:%M"), "HTTP", sms)
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": ERROR '" + watchfor + "' was NOT found in HTTP host " + host + "")
else:
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": '" + watchfor + "' was found in HTTP host " + host + "")
except error:
print("error", error)
CreateEmailMessage(recipients, "HTTP Error: Can't connect to " + host + " at " + now.strftime("%Y-%m-%d %H:%M"), "HTTP", sms)
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Can't connect to HTTP host " + host + "")
def CreateEmailMessage(recipients, message, type, sms):
"""Creates an email message and stacks them on the EMAILS global variable
Returns nothing."""
for emails in recipients:
for recipient in emails.split(","):
if recipient in EMAILS:
EMAILS[recipient] = EMAILS[recipient] + "\n" + message
else:
EMAILS[recipient] = message
def SendEmails():
"""Parses through EMAIL global variable and sends error emails via SMTP
Returns nothing."""
global MAILSERVER
global MAILSERVERPORT
global MAILSERVERSTARTTLS
global MAILSERVERUSERNAME
global MAILSERVERPASSWORD
global MAILFROM
global MAILSUBJECT
MAILSERVERPORT = int(MAILSERVERPORT)
day = now.strftime('%a')
date = now.strftime('%d %b %Y %X %z')
for recipients in EMAILS.keys():
message = EMAILS[recipients]
if message.lstrip().rstrip() != "" and recipients != "":
email = """\
From: %s
To: %s
Subject: %s
Date: %s
%s
""" % (MAILFROM, recipients, MAILSUBJECT, day + ', ' + date, message)
server = smtplib.SMTP(MAILSERVER,MAILSERVERPORT)
if int(MAILSERVERSTARTTLS) > 0:
server.starttls()
server.ehlo()
server.login(MAILSERVERUSERNAME,MAILSERVERPASSWORD)
MakeLog( now.strftime("%Y-%m-%d %H:%M") + ": Sending Email Message to '" + recipients + "'")
server.sendmail(MAILFROM,recipients, email)
server.quit()
def MakeLog(logstring):
if int(CHATTY) > 0:
print(logstring)
now = datetime.datetime.now()
if int(CHATTY) > 0:
print("Starting ISPWatcher2 at " + now.strftime("%Y-%m-%d %H:%M"))
# Found a JSON File
path = sys.path[0] + "/controlfile.json"
if os.path.isfile(path):
if int(CHATTY) > 0:
print("* Using config file " + path)
json_settings = open(path)
settings = json.load(json_settings)
json_settings.close()
CheckServerJSON(settings)
else:
# Found an XML File
path = sys.path[0] + "/ControlFile.xml"
if os.path.isfile(path):
if int(CHATTY) > 0:
print("* Using config file " + path)
dom = minidom.parse(path)
for oDocument in dom.childNodes:
for oServer in oDocument.childNodes:
CheckServerXML(oServer)
if SENDEMAILS > 0:
SendEmails()
else:
if int(CHATTY) > 0:
print("* TEST MODE ENABLED - not sending emails")
else:
path = sys.path[0] + "/controlfile.xml"
if os.path.isfile(path):
if int(CHATTY) > 0:
print("* Using config file " + path)
dom = minidom.parse(path)
for oDocument in dom.childNodes:
for oServer in oDocument.childNodes:
CheckServerXML(oServer)
if int(SENDEMAILS) > 0:
SendEmails()
else:
if int(CHATTY) > 0:
print("* TEST MODE ENABLED - not sending emails")
else:
print("* No controlfile.json or controlfile.xml was found")
```
|
{
"source": "jdh4/tigergpu_visualization",
"score": 3
}
|
#### File: jdh4/tigergpu_visualization/alert_checkgpu.py
```python
import subprocess
def low_utilization():
cmd = "/home/jdh4/bin/checkgpu -d 2 -c 10 -g 24"
output = subprocess.run(cmd, shell=True, capture_output=True)
lines = output.stdout.decode("utf-8").split('\n')
if "No results were found" in lines[5]:
return []
else:
cases = []
skip = ['mcmuniz', 'dongdong']
lines = lines[8:-3]
for line in lines:
parts = line.split()
user = parts[0]
util = parts[1]
std = parts[2]
hours = parts[3]
if int(util) == 0 and int(std) == 0:
cases.append([user, util, std, hours])
elif user not in skip:
cases.append([user, util, std, hours])
else:
pass
return cases
if __name__ == '__main__':
cases = low_utilization()
if cases:
SERVER = "localhost"
FROM = "<EMAIL>"
TO = ["<EMAIL>"]
SUBJECT = "Alert: checkgpu"
TEXT = "\n".join([" ".join(case) for case in cases])
# prepare actual message
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
import smtplib
server = smtplib.SMTP(SERVER)
server.sendmail(FROM, TO, message)
server.quit()
```
|
{
"source": "jdha/cmip6-object-store",
"score": 2
}
|
#### File: cmip6_object_store/cmip6_zarr/compare.py
```python
import glob
import random
import traceback
import warnings
import xarray as xr
from cmip6_object_store.cmip6_zarr.results_store import get_results_store, get_verification_store
from cmip6_object_store.cmip6_zarr.utils import (
get_archive_path,
get_var_id,
read_zarr,
)
def compare_zarrs_with_ncs(project, n_to_test=5, dataset_id=None):
"""
Randomly selects some datasets and checks that the contents
of a NetCDF files in the archive matches that in a Zarr file
in the Caringo object store.
This logs its outputs for use elsewhere.
If a dataset ID is passed in, this will check the specified single
dataset ID instead of a random sample (and n_to_test is ignored)
"""
tested = []
successes, failures = 0, 0
verification_store = get_verification_store(project)
if dataset_id == None:
print(f"\nVerifying up to {n_to_test} datasets for: {project}...")
results_store = get_results_store(project)
dataset_ids = results_store.get_successful_runs()
force = False
else:
print(f"\nComparing single dataset {dataset_id} for: {project}...")
n_to_test = 1
dataset_ids = [dataset_id]
force = True
while len(tested) < n_to_test:
dataset_id = random.choice(dataset_ids)
if not force:
if dataset_id in tested or verification_store.ran_successfully(dataset_id):
continue
print(f"==========================\nVerifying: {dataset_id}")
try:
_compare_dataset(dataset_id, project)
verification_store.insert_success(dataset_id)
successes += 1
print(f"Comparison succeeded for: {dataset_id}")
except Exception as exc:
verification_store.insert_failure(dataset_id, f'failed: {exc}')
failures += 1
tb = traceback.format_exc()
print(f"FAILED comparison for {dataset_id}: traceback was\n\n: {tb}")
tested.append(dataset_id)
total = successes + failures
return (successes, total)
def _get_nc_file(dataset_id, project):
archive_dir = get_archive_path(dataset_id, project)
nc_files = glob.glob(f"{archive_dir}/*.nc")
if not nc_files:
return None
return nc_files[0]
def _compare_dataset(dataset_id, project):
nc_file = _get_nc_file(dataset_id, project)
if not nc_file:
return False
print(f"\nWorking on: {dataset_id}")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nc_subset = xr.open_dataset(nc_file)
zarr_ds = read_zarr(dataset_id, project)
zarr_subset = zarr_ds.sel(time=slice(nc_subset.time[0], nc_subset.time[-1]))
result = nc_subset.identical(zarr_subset)
print(f"Testing: {dataset_id}")
print(f"\tResult: {result}")
for prop in ("data_vars", "coords"):
a, b = [
sorted(list(_.keys()))
for _ in (getattr(nc_subset, prop), getattr(zarr_subset, prop))
]
print(f'\nComparing "{prop}": {a} \n------------\n {b}')
assert a == b
a, b = nc_subset.time.values, zarr_subset.time.values
assert list(a) == list(b)
print("Times are identical")
var_id = get_var_id(dataset_id, project=project)
a_var, b_var = nc_subset[var_id], zarr_subset[var_id]
a_min, a_max = float(a_var.min()), float(a_var.max())
b_min, b_max = float(b_var.min()), float(b_var.max())
assert a_min == b_min
print("Minima are identical")
assert a_max == b_max
print("Maxima are identical")
for attr in ("units", "long_name"):
a, b = getattr(a_var, attr), getattr(b_var, attr)
print(f"{attr}: {a} VS {b}")
assert a == b
return result
```
#### File: cmip6_object_store/cmip6_zarr/intake_cat.py
```python
import os
from functools import wraps
from time import time
import numpy as np
import pandas as pd
from cmip6_object_store import CONFIG, logging
from cmip6_object_store.cmip6_zarr.utils import (
get_archive_path,
get_zarr_url,
read_zarr,
)
from cmip6_object_store.cmip6_zarr.results_store import get_results_store
LOGGER = logging.getLogger(__file__)
def timer(f):
@wraps(f)
def wrap(*args, **kw):
ts = time()
result = f(*args, **kw)
te = time()
print(f"func: {f.__name__} args: [{args} {kw}] took: {(te-ts):2.4f} sec")
return result
return wrap
class IntakeCatalogue:
def __init__(self, project, limit=None):
self._iconf = CONFIG["intake"]
self._project = project
self._results_store = get_results_store(self._project)
self._limit = limit
def create(self):
self._create_json()
self._create_csv()
def _create_json(self):
template_file = self._iconf["json_template"]
with open(template_file) as reader:
content = reader.read()
description = self._iconf["description_template"].format(project=self._project)
cat_id = self._iconf["id_template"].format(project=self._project)
csv_catalog_url = self._iconf["csv_catalog_url"].format(project=self._project)
json_catalog = self._iconf["json_catalog"].format(project=self._project)
content = (
content.replace("__description__", description)
.replace("__id__", cat_id)
.replace("__cat_file__", csv_catalog_url)
)
with open(json_catalog, "w") as writer:
writer.write(content)
LOGGER.info(f"Wrote intake JSON catalog: {json_catalog}")
def _create_csv(self):
csv_catalog = self._iconf["csv_catalog"].format(project=self._project)
# if os.path.isfile(csv_catalog):
# raise FileExistsError(f'File already exists: {csv_catalog}')
# Read in Zarr catalogue
zarr_cat_as_df = self._get_zarr_df()
zarr_cat_as_df.to_csv(csv_catalog, index=False)
LOGGER.info(
f"Wrote {len(zarr_cat_as_df)} records to CSV catalog file:\n {csv_catalog}"
)
@timer
def _get_zarr_df(self):
# Read in Zarr results store and convert to DataFrame, and return
dataset_ids = self._results_store.get_successful_runs()
print(f"{len(dataset_ids)} datasets")
headers = [
"mip_era",
"activity_id",
"institution_id",
"source_id",
"experiment_id",
"member_id",
"table_id",
"variable_id",
"grid_label",
"version",
"dcpp_start_year",
"time_range",
"zarr_path",
"nc_path",
]
rows = []
for row, dataset_id in enumerate(dataset_ids):
if self._limit is not None and row == self._limit:
break
items = dataset_id.split(".")
dcpp_start_year = self._get_dcpp_start_year(dataset_id)
temporal_range = self._get_temporal_range(dataset_id)
zarr_url = get_zarr_url(dataset_id, self._project)
nc_path = get_archive_path(dataset_id, self._project) + "/*.nc"
items.extend([dcpp_start_year, temporal_range, zarr_url, nc_path])
rows.append(items[:])
return pd.DataFrame(rows, columns=headers)
def _get_dcpp_start_year(self, dataset_id):
member_id = dataset_id.split(".")[5]
if not "-" in member_id or not member_id.startswith("s"):
return np.nan
return member_id.split("-")[0][1:]
def _get_temporal_range(self, dataset_id):
try:
nc_files = os.listdir(get_archive_path(dataset_id, self._project))
nc_files = [
fn for fn in nc_files if not fn.startswith(".") and fn.endswith(".nc")
]
time_ranges = [fn.split(".")[-2].split("_")[-1].split("-") for fn in nc_files]
start = f"{min(int(tr[0]) for tr in time_ranges)}"
if len(start) == 4:
start += "01"
end = f"{max(int(tr[1]) for tr in time_ranges)}"
if len(end) == 4:
end += "12"
time_range = f"{start}-{end}"
LOGGER.info(f"Found {time_range} for {dataset_id}")
except Exception as exc:
LOGGER.warning(f"FAILED TO GET TEMPORAL RANGE FOR: {dataset_id}: {exc}")
time_range = ""
# ds = read_zarr(dataset_id, self._project, use_cftime=True)
# time_var = ds.time.values
# time_range = "-".join(
# [tm.strftime("%Y%m") for tm in (time_var[0], time_var[-1])]
# )
# ds.close()
return time_range
def create_intake_catalogue(project, limit=None):
cat = IntakeCatalogue(project, limit=limit)
cat.create()
if __name__ == '__main__':
create_intake_catalogue(CONFIG["workflow"]["default_project"])
```
#### File: cmip6_object_store/cmip6_zarr/utils.py
```python
import json
import os
import uuid
import s3fs
import xarray as xr
from ..config import CONFIG, get_from_proj_or_workflow
def get_credentials(creds_file=None):
if not creds_file:
creds_file = CONFIG["store"]["credentials_file"]
with open(creds_file) as f:
creds = json.load(f)
return creds
def get_uuid():
_uuid = uuid.uuid4()
return _uuid
def get_var_id(dataset_id, project):
var_index = CONFIG[f"project:{project}"]["var_index"]
return dataset_id.split(".")[var_index]
def create_dir(dr):
if not os.path.isdir(dr):
os.makedirs(dr)
def split_string_at(s, sep, indx):
items = s.split(sep)
first, last = sep.join(items[:indx]), sep.join(items[indx:])
return first, last
def to_dataset_id(path, project):
if ("/") in path:
path = path.replace("/", ".")
prefix = get_from_proj_or_workflow("bucket_prefix", project)
if not path.startswith(prefix):
raise ValueError(f"path {path} does not start with expected prefix {prefix}")
path = path[len(prefix):]
items = path.split(".")
if items[-1].endswith(".nc") or items[-1] == "zarr":
items = items[:-1]
n_facets = CONFIG[f"project:{project}"]["n_facets"]
return ".".join(items[-n_facets:])
def get_zarr_url(dataset_id, project):
prefix = CONFIG["store"]["endpoint_url"]
zarr_path = get_zarr_path(dataset_id, project, join=True)
return f"{prefix}{zarr_path}"
def get_zarr_path(dataset_id, project, join=False):
split_at = get_from_proj_or_workflow("split_level", project)
prefix = get_from_proj_or_workflow("bucket_prefix", project)
parts = dataset_id.split(".")
bucket = prefix + ".".join(parts[:split_at])
zarr_file = ".".join(parts[split_at:]) + ".zarr"
if join:
return f"{bucket}/{zarr_file}"
else:
return (bucket, zarr_file)
def read_zarr(path, project, **kwargs):
dataset_id = to_dataset_id(path, project)
zarr_path = get_zarr_path(dataset_id, project, join=True)
endpoint_url = CONFIG["store"]["endpoint_url"]
jasmin_s3 = s3fs.S3FileSystem(
anon=True, client_kwargs={"endpoint_url": endpoint_url}
)
s3_store = s3fs.S3Map(root=zarr_path, s3=jasmin_s3)
ds = xr.open_zarr(store=s3_store, consolidated=True, **kwargs)
return ds
def get_archive_path(path, project):
dataset_id = to_dataset_id(path, project)
archive_dir = CONFIG[f"project:{project}"]["archive_dir"]
return os.path.join(archive_dir, dataset_id.replace(".", "/"))
```
#### File: cmip6_object_store/misc/change_perms.py
```python
import json
import boto3
import botocore
from cmip6_object_store.config import CONFIG
from cmip6_object_store.cmip6_zarr.utils import get_zarr_path, get_credentials
project = 'cmip6'
"""
Changes permissions on all buckets, and writes output files "successes" and
"failures" with relevant lists. Note that (deliberately), "failures" only
apply to "Access Denied" - a failure for any other reason will abort this
script.
"""
class PolicyChanger:
def __init__(self):
creds = get_credentials()
s3_uri = CONFIG["store"]["endpoint_url"]
self._clnt = boto3.client('s3', endpoint_url=s3_uri,
aws_access_key_id=creds['token'],
aws_secret_access_key=creds['secret'])
policy = {
"Version": "2008-10-17",
"Id": "Read All Policy",
"Statement": [
{
"Sid": "Read-only and list bucket access for Everyone",
"Effect": "Allow",
"Principal": {
"anonymous": [
"*"
]
},
"Action": [
"GetObject",
"ListBucket"
],
"Resource": "*"
}
]
}
self._bucket_policy_s = json.dumps(policy)
def change_bucket_policy(self, bucket):
self._clnt.put_bucket_policy(Bucket=bucket, Policy=self._bucket_policy_s)
def get_buckets():
datasets_file = CONFIG["datasets"]["datasets_file"]
buckets = set()
with open(datasets_file) as f:
for line in f:
dataset_id = line.strip().split(',')[0]
bucket, obj_name = get_zarr_path(dataset_id, project)
buckets.add(bucket)
return sorted(buckets)
def main():
changer = PolicyChanger()
buckets = get_buckets()
n = len(buckets)
with open('successes', 'w') as succlog, open('failures', 'w') as faillog:
for i, bucket in enumerate(buckets):
print(f'{i}/{n} {bucket} ', end='')
try:
changer.change_bucket_policy(bucket)
except botocore.exceptions.ClientError as exc:
if 'Access Denied' in str(exc):
print('Access denied')
faillog.write(f'{bucket}\n')
else:
raise
else:
print('Success')
succlog.write(f'{bucket}\n')
main()
```
#### File: cmip6_object_store/test/writeit.py
```python
import os
import s3fs
import xarray as xr
import json
import time
import sys
from memory_profiler import profile
os.environ.update({
'OMP_NUM_THREADS': '1',
'MKL_NUM_THREADS': '1',
'OPENBLAS_NUM_THREADS': '1',
'VECLIB_MAXIMUM_THREADS': '1',
'NUMEXPR_NUM_THREADS': '1'})
opts = {
'bucket': '00alantest',
'endpoint_url': 'http://cmip6-zarr-o.s3.jc.rl.ac.uk/',
'creds_file': f'{os.environ["HOME"]}/.credentials/caringo-credentials.json',
# list of 2-tuples: (dataset-id, chunk length) None means don't chunk
'datasets': [('CMIP6.CMIP.MIROC.MIROC6.amip.r6i1p1f1.Amon.tasmin.gn.v20181214', None),
('CMIP6.ScenarioMIP.IPSL.IPSL-CM6A-LR.ssp126.r3i1p1f1.day.zg.gr.v20190410', 2)]
}
def get_input_files(ds_id):
dirname = os.path.join('/badc/cmip6/data',
ds_id.replace('.', '/'))
return [os.path.join(dirname, file)
for file in os.listdir(dirname)
if file.endswith('.nc')]
def make_bucket(fs, bucket, max_tries=3, sleep_time=3):
tries = 0
while not fs.exists(bucket):
try:
print("try to make bucket")
fs.mkdir(bucket)
except KeyboardInterrupt:
raise
except Exception as exc:
tries += 1
print(f"making bucket failed ({tries} tries): {exc}")
if tries == max_tries:
print("giving up")
raise
time.sleep(sleep_time)
print(f"bucket {bucket} now exists")
def get_store_map(zpath, fs):
return s3fs.S3Map(root=zpath, s3=fs)
def show_first_data_value(ds):
"show first data value of any 3d fields"
for name, var in ds.items():
if len(var.shape) == 3:
print(f"{name} {float(var[0,0,0].values)}")
def remove_path(zpath, fs):
if fs.exists(zpath):
print(f"removing existing {zpath}")
fs.rm(zpath, True)
tries = 0
while fs.exists(zpath):
tries += 1
if tries == 5:
raise Exception(f"could not remove {zpath}")
time.sleep(1)
# simplified version assuming CMIP6
def get_var_id(ds_id):
return ds_id.split('.')[-3]
# simplified version that takes chunk_length as input
def get_chunked_ds(ds_id, ds, chunk_length):
print(f'chunking with length={chunk_length}')
var_id = get_var_id(ds_id)
chunked_ds = ds.chunk({"time": chunk_length})
chunked_ds[var_id].unify_chunks()
return chunked_ds
@profile(precision=1)
def main(opts):
creds = json.load(open(opts['creds_file']))
endpoint_url = opts["endpoint_url"]
fs = s3fs.S3FileSystem(anon=False,
secret=creds['secret'],
key=creds['token'],
client_kwargs={'endpoint_url': endpoint_url},
config_kwargs={'max_pool_connections': 50})
make_bucket(fs, opts["bucket"])
for ds_id, chunk_length in opts["datasets"]:
print(f"DATASET: {ds_id}")
zarr_file = ds_id
zpath = f'{opts["bucket"]}/{zarr_file}'
zarr_url = f'{endpoint_url}{zpath}'
store_map = get_store_map(zpath, fs)
input_files = get_input_files(ds_id)
print("opening xarray dataset")
ds = xr.open_mfdataset(input_files, use_cftime=True, combine='by_coords')
show_first_data_value(ds)
remove_path(zpath, fs)
ds_to_write = ds if chunk_length == None else get_chunked_ds(ds_id, ds, chunk_length)
print("writing data")
ds_to_write.to_zarr(store=store_map, mode='w', consolidated=True)
if fs.exists(zpath):
print(f"wrote: {zarr_url}")
else:
raise Exception(f"could not write {zarr_url}")
print()
main(opts)
```
|
{
"source": "jdhaines/MIT6.00",
"score": 4
}
|
#### File: MIT6.00/PS1/ps1.py
```python
import os, sys
# Problem Set 1
# Name: <NAME>
# Collaborators: None
# Time Spent: ~7hrs
# 3 Credit Card Debt Problems
# Problem 1: Paying the minimum. Calculate the credit card balance after
# one year if a person only pays the minimum monthly payment required by
# the credit card company each month.
#
# Problem 2: Calculating the minimum payment you can make and still pay
# off the loan in a year. Two loops will iterate through a year to see
# final balance, and if it isn't big enough increase the guessed payment
# amount and re-run the inner loop until you pay off the loan.
# Problem 3: Very similar to problem 2, except use a bisection search
# for the guessPayment to speed up the program.
def minimumPayment(currentBalance, percent):
"""Take in a current balance and minimum payment percent, then return the
minimum payment"""
currentBalance = float(currentBalance) #convert argument to float
percent = float(percent) #convert argument to float
percent = percent / 100
if percent >= 100:
sys.exit("Please provide a minimum payment percentage value less" + \
"than 100 (eg. 2.5, 13, etc.).")
payment = currentBalance * percent
return payment
def interestPaidPerMonth(annualPercent,currentBalance):
"""Function that takes in the annual interest rate percentage and the
current principle balance and calualates how much interest must be
paid in that month. Returns that interest amount."""
annualPercent = float(annualPercent) #convert argument to float
currentBalance = float(currentBalance) #convert argument to float
annualPercent = annualPercent / 100
interestPaid = annualPercent / 12 * currentBalance
return interestPaid
def newBalance(oldBalance, monthlyRate, guessPayment):
"""Calculate the remaining balance at the end of 12 months"""
newBalance = (oldBalance * (1 + monthlyRate)) - guessPayment
return newBalance
def balanceAfterYear(creditCardBalance,monthlyRate,guessPayment):
"""This will take in the balance, monthlyRate, and payment then calculate
how much balance will be left after a year paying on the loan."""
remainingBalance = creditCardBalance
#main problem loops. outer loop iterates if the guess isn't big enough
#inner loop iterates to find the balance after a year of trial payments
monthsNeeded = 0
while monthsNeeded < 12 and remainingBalance > 0:
remainingBalance = newBalance(remainingBalance,monthlyRate,guessPayment)
monthsNeeded += 1
return remainingBalance, monthsNeeded
def problemThreeCalcs(annualPercent,creditCardBalance):
"""This is the entire calculations for the third problem. This is pulled
into a function in order to use return to break a value out of multiple
levels of loops."""
monthlyRate = (annualPercent / 100) / 12.0
paymentLowerBound = creditCardBalance / 12
paymentUpperBound = (creditCardBalance * ((1 + monthlyRate) ** 12)) / 12
#main problem loops. outer loop iterates if the guess isn't big enough
#inner loop iterates to find the balance after a year of trial payments
n = 1 #n is just a counter
while n <= 1000:
midPoint = (paymentLowerBound + paymentUpperBound) / 2 #new midpoint
endingBalance, monthsNeeded = balanceAfterYear(creditCardBalance,monthlyRate,midPoint)
endingBalance = round(float(endingBalance),2)
if endingBalance == 0:
return endingBalance,midPoint, n, monthsNeeded
n += 1
if (endingBalance > 0 and paymentLowerBound > 0) or (endingBalance < 0 and paymentLowerBound < 0): #adjust the interval
paymentLowerBound = midPoint
else:
paymentUpperBound = midPoint
def problemOne():
"""Call this function to kick off the problem 1 solution"""
#get the account balance and validate
print "Please enter the current account balance."
currentBalance = raw_input(">> ")
currentBalance = float(currentBalance)
#get the annual interest rate
print "Please enter the annual interest rate."
annualPercent = raw_input(">> ")
annualPercent = float(annualPercent)
#get the min payment percent and validate
print "Please enter the minimum payment percentage."
minPercent = raw_input(">> ")
minPercent = float(minPercent)
#main problem loop
remainingBalance = round(float(currentBalance),2)
totalPaid = 0.0
for i in range(1,13):
minimumMonthlyPayment = \
round(float(minimumPayment(remainingBalance,minPercent)),2)
principlePaid = round(float(minimumMonthlyPayment - \
interestPaidPerMonth(annualPercent,remainingBalance)),2)
interestPaid = \
round(float(interestPaidPerMonth(annualPercent,remainingBalance)),2)
remainingBalance = round((remainingBalance - principlePaid),2)
print remainingBalance
print "Month: %d" % (i)
print "Minimum Monthly Payment: $%.2f" % (minimumMonthlyPayment)
print "Principle paid: $%.2f" % (principlePaid)
print "Remaining Balance: $%.2f" % (remainingBalance)
totalPaid = totalPaid + principlePaid + interestPaid
print "RESULT"
print "Total Amount Paid: $%.2f" % totalPaid
print "Remaining Balance: $%.2f" % remainingBalance
start() #recall the start function to start over
def problemTwo():
"""Call this function to kick off the problem 2 solution"""
#get the credit card balance
print "Please enter the outstanding credit card balance."
creditCardBalance = raw_input(">> ")
creditCardBalance = float(creditCardBalance)
#get the annual interest rate
print "Please enter the annual interest rate."
annualPercent = raw_input(">> ")
annualPercent = float(annualPercent)
print "RESULT"
guessPayment = 0.00
remainingBalance = creditCardBalance
monthlyRate = (annualPercent / 100) / 12.0
#main problem loops. outer loop iterates if the guess isn't big enough
#inner loop iterates to find the balance after a year of trial payments
while remainingBalance > 0:
remainingBalance = creditCardBalance
monthsNeeded = 0
guessPayment = guessPayment + .01
#remainingBalance = balanceAfterYear(creditCardBalance,annualPercent,guessPayment,monthsNeeded)
while monthsNeeded < 12 and remainingBalance > 0:
remainingBalance = newBalance(remainingBalance,monthlyRate,guessPayment)
monthsNeeded += 1
print "Monthly payment to pay off debt in 1 year: %d" % guessPayment
print "Number of months needed: %d" % monthsNeeded
print "Balance: %.2f" % remainingBalance
start() #recall the start function to start over
def problemThree():
"""Call this function to kick off the problem 3 solution"""
#get the credit card balance
print "Please enter the outstanding credit card balance."
creditCardBalance = raw_input(">> ")
creditCardBalance = float(creditCardBalance)
#get the annual interest rate
print "Please enter the annual interest rate."
annualPercent = raw_input(">> ")
annualPercent = float(annualPercent)
print "RESULT"
endingBalance, midPoint, iterations, monthsNeeded = problemThreeCalcs(annualPercent,creditCardBalance,)
print "Monthly payment to pay off debt in 1 year: %.2f" % midPoint
print "Number of months needed: %d" % monthsNeeded ##fix this
print "Balance: %.2f" % endingBalance
print "It took %d iterations." % iterations
start() #recall the start function to start over
def start():
"""Function to kick things off."""
print "Welcome to the credit card processing center."
print "If you would like to calculate your credit card balance after" + \
" a year of only paying the minimum payment...please Press 1."
print "If you would like to calculate your minimum payment to pay off" + \
" your balance in 12 months...please Press 2."
print "If you would like to calculate your minimum payment to pay off" + \
" your balance in 12 months...please Press 3. (Bisection serach solution)"
print "If you would like to exit...please press 4"
choice = raw_input(">> ")
choice = int(choice)
if isinstance( choice, int ):
if choice == 1:
problemOne()
elif choice == 2:
problemTwo()
elif choice == 3:
problemThree()
elif choice == 4:
sys.exit("exiting...")
else:
sys.exit("You are dumb, exiting...")
start();
#end
```
#### File: MIT6.00/PS3/ps3b.py
```python
from ps3a import *
# import time
from perm import *
#
# Problem #6A: Computer chooses a word
#
def comp_choose_word(hand, word_list):
"""
Given a hand and a word_dict, find the word that gives the maximum value
score, and return it. This word should be calculated by considering
all possible permutations of lengths 1 to HAND_SIZE.
hand: dictionary (string -> int)
word_list: list (string)
"""
from random import randint
n = 10
while n > 8:
n = randint(0, calculate_handlen(hand))
perms = get_perms(hand, n)
for i in perms:
if i in word_list:
return i
# Rarely this returns None, make sure to check and
# re-call this function in case that happens. It u
# usually only happens when the random number is very small.
# The timing also gets out of hand when the random
# number goes much over 8. The upper while loop should
# minimize these problems.
# Turns out they want None to be the symbol that the computer
# is done playing...
#
# Problem #6B: Computer plays a hand
#
def comp_play_hand(hand, word_list):
"""
Allows the computer to play the given hand, as follows:
* The hand is displayed.
* The computer chooses a word using comp_choose_words(hand, word_dict).
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the computer
chooses another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when the computer has exhausted its possible choices
(i.e. comp_play_hand returns None).
hand: dictionary (string -> int)
word_list: list (string)
"""
end_hand = 0
hand_total = 0
while calculate_handlen(hand) > 0 and end_hand == 0:
print()
print('Your Hand: {}'.format(display_hand(hand)))
print()
# loop until given a valid word
valid_word = 0
word_total = 0
while valid_word == 0:
print('Enter a valid word, or a "." to indicate you are finished.')
word = comp_choose_word(hand, word_list)
if word == '.':
print('Total Score: {}'.format(hand_total))
return hand_total
elif word is None:
print('No more plays, hand is over.')
return hand_total
elif is_valid_word(word, hand, word_list) is False:
print('That word is invalid.')
valid_word = 0
else:
word_total = get_word_score(word, calculate_handlen(hand))
hand_total += word_total
print('"{}"" earned {} points. Hand total: {} points.'.format(
word, word_total, hand_total))
valid_word = 1
hand = update_hand(hand, word)
#
# Problem #6C: Playing a game
#
def play_game(word_list):
"""Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', play a new (random) hand.
* If the user inputs 'r', play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, ask them again.
2) Ask the user to input a 'u' or a 'c'.
* If the user inputs 'u', let the user play the game as before using
play_hand.
* If the user inputs 'c', let the computer play the game using
comp_play_hand (created above).
* If the user inputs anything else, ask them again.
3) After the computer or user has played the hand, repeat from step 1
word_list: list (string)
"""
old_hand = deal_hand(HAND_SIZE) # Make sure old hand isn't empty
# Opening Message
human_score = 0
pc_score = 0
play = 1
while play == 1:
new_hand = deal_hand(HAND_SIZE)
print('Welcome to the word game.')
print('Human score is: {}'.format(human_score))
print('PC Score is: {}'.format(pc_score))
print('If you would like to play a new (random) hand, press "n".')
print('If you would like to play the last hand again, press "r".')
print('If you would like to exit, press "e"')
choice = input('>>> ')
if choice == 'e':
print('Thanks for playing!')
exit()
print('If you would like to play the next hand, press "u".')
print('If you would like the computer to play the hand, press "c".')
print('If you would like to exit, press "e"')
who_plays = input('>>> ')
if who_plays == 'u': # User plays
if choice == 'n':
human_score += play_hand(new_hand, word_list)
old_hand = new_hand # Update the old hand with the new one
elif choice == 'r':
human_score += play_hand(old_hand, word_list)
else:
print('Your selection was not understood. Try again... ')
elif who_plays == 'c': # Computer Plays
if choice == 'n':
pc_score += comp_play_hand(new_hand, word_list)
old_hand = new_hand # Update the old hand with the new one
elif choice == 'r':
pc_score += comp_play_hand(old_hand, word_list)
else:
print('Your selection was not understood. Try again... ')
elif who_plays == 'e': # exit
print('Thanks for playing!')
exit()
else:
print('Your selection was not understood.')
print('Thanks for playing!')
exit()
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
word_list = load_words()
play_game(word_list)
```
|
{
"source": "jdhaisne/kaggle-house-price",
"score": 3
}
|
#### File: jdhaisne/kaggle-house-price/main.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
def clean_data(data):
cols_proto = ['GrLivArea', 'LotArea', '1stFlrSF', 'TotalBsmtSF', 'BsmtUnfSF',
'YearBuilt', 'GarageArea', 'MoSold', 'YearRemodAdd', 'OverallQual']
data = data.fillna(data.std())
return data[cols_proto]
data_train = pd.read_csv('train.csv')
data_Y = data_train[['SalePrice']]
data_X = data_train.drop(['SalePrice'], axis=1)
data_X = clean_data(data_X)
rng = np.random.RandomState(42)
forest = IsolationForest(max_samples=100, random_state=rng)
line_reg = LinearRegression()
scores = cross_val_score(line_reg, data_X, data_Y, cv=10)
line_reg.fit(data_X, data_Y)
print(scores.mean())
X_predict = pd.read_csv('test.csv')
X_predict_clean = clean_data(X_predict)
print(X_predict.describe())
Y_pred = line_reg.predict(X_predict_clean)
data = pd.DataFrame({'Id':X_predict.Id, 'SalePrice': np.reshape(Y_pred, (-1))})
data.set_index('Id').to_csv('my_test.csv')
```
|
{
"source": "jdhalimi/awesomeness",
"score": 3
}
|
#### File: awesomeness/awesomeness/__main__.py
```python
from argparse import ArgumentParser
from awesomeness import __version__
def main():
"""
command line interface entry point
"""
parser = ArgumentParser('awesomeness',
description=f'awesomeness version {__version__}')
parser.add_argument('--version', action='version',
version=__version__)
parser.parse_args()
if __name__ == '__main__':
main()
```
|
{
"source": "jdhardy/dlr",
"score": 3
}
|
#### File: tests/regressions/test_args.py
```python
import unittest
class ArgsRegression(unittest.TestCase):
def test_exec_and_splat_args(self):
exec 'def f(*args): return repr(args)'
self.assertEqual('(1, 2, 3, 4)', f(1,2,3,4))
```
#### File: tests/regressions/test_autoaddref.py
```python
import unittest
class AutoAddRefRegression(unittest.TestCase):
def test_referencecount(self):
import clr
self.assert_(len(clr.References) >= 6)
```
#### File: tests/regressions/test_issubclass.py
```python
import unittest
import clr
import System
class IsSubClassRegression(unittest.TestCase):
def x(self):
return 2
def y(self):
return 'abc'
def test_same_class(self):
self.assertFalse(self.x().GetType().IsSubclassOf(System.Int32))
self.assertFalse(self.y().GetType().IsSubclassOf(System.String))
def test_nonparent_class(self):
self.assertFalse(self.y().GetType().IsSubclassOf(System.Array))
self.assertFalse(self.y().GetType().IsSubclassOf(System.Int32))
def test_ancestor(self):
self.assert_(self.y().GetType().IsSubclassOf(System.Object))
self.assert_(self.y().GetType().IsSubclassOf(System.Object))
```
#### File: tests/regressions/test_name.py
```python
import unittest
class NameRegression(unittest.TestCase):
def test___name__(self):
self.assertEqual(__name__, 'regressions.test_name')
def test___name__inmodule(self):
from fixtures.child1 import child1_name
self.assertNotEqual(child1_name(), '__main__')
```
#### File: tests/regressions/test_thread.py
```python
import sys
import thread
import unittest
class ThreadRegression(unittest.TestCase):
def runthread(self, function, *args):
import time
thread_info = [None, None]
def testthread(*args2):
thread_info[1] = function(*args2)
thread_info[0] = thread.get_ident()
thread.start_new_thread(testthread, args)
start = time.time()
end = start + 10 # seconds
while not thread_info[0] and time.time() < end:
pass
threadid = thread_info[0]
result = thread_info[1]
if not result or not threadid:
self.fail('thread timed out without returning a result')
# verify that we didn't execute on the UI thread
sl.verify_not_exact(thread.get_ident(), threadid)
return result
def raise_exception(self):
raise StandardError("this is an exception that should be swallowed")
def background_import(self):
if 'System' in sys.modules: sys.modules.pop('System')
from System import Math
return Math.Floor(4.4)
def test_backgroundthreadthrow(self):
self.runthread(self.raise_exception)
self.assert_(True)
def test_import(self):
self.assertEqual(4.0, runthread(self.background_import))
```
|
{
"source": "jdhardy/moto",
"score": 2
}
|
#### File: moto/ssm/models.py
```python
from __future__ import unicode_literals
from moto.core import BaseBackend, BaseModel
from moto.ec2 import ec2_backends
class Parameter(BaseModel):
def __init__(self, name, value, type, description, keyid):
self.name = name
self.type = type
self.description = description
self.keyid = keyid
if self.type == 'SecureString':
self.value = self.encrypt(value)
else:
self.value = value
def encrypt(self, value):
return 'kms:{}:'.format(self.keyid or 'default') + value
def decrypt(self, value):
if self.type != 'SecureString':
return value
prefix = 'kms:{}:'.format(self.keyid or 'default')
if value.startswith(prefix):
return value[len(prefix):]
def response_object(self, decrypt=False):
return {
'Name': self.name,
'Type': self.type,
'Value': self.decrypt(self.value) if decrypt else self.value
}
class SimpleSystemManagerBackend(BaseBackend):
def __init__(self):
self._parameters = {}
def delete_parameter(self, name):
try:
del self._parameters[name]
except KeyError:
pass
def get_parameters(self, names, with_decryption):
result = []
for name in names:
if name in self._parameters:
result.append(self._parameters[name])
return result
def put_parameter(self, name, description, value, type, keyid, overwrite):
if not overwrite and name in self._parameters:
return
self._parameters[name] = Parameter(
name, value, type, description, keyid)
ssm_backends = {}
for region, ec2_backend in ec2_backends.items():
ssm_backends[region] = SimpleSystemManagerBackend()
```
#### File: tests/test_rds/test_rds.py
```python
from __future__ import unicode_literals
import boto3
import boto.rds
import boto.vpc
from boto.exception import BotoServerError
import sure # noqa
from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds
from tests.helpers import disable_on_py3
@disable_on_py3()
@mock_rds_deprecated
def test_create_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds_deprecated
def test_get_databases():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(2)
databases = conn.get_all_dbinstances("db-master-1")
list(databases).should.have.length_of(1)
databases[0].id.should.equal("db-master-1")
@disable_on_py3()
@mock_rds
def test_get_databases_paginated():
conn = boto3.client('rds', region_name="us-west-2")
for i in range(51):
conn.create_db_instance(AllocatedStorage=5,
Port=5432,
DBInstanceIdentifier='rds%d' % i,
DBInstanceClass='db.t1.micro',
Engine='postgres')
resp = conn.describe_db_instances()
resp["DBInstances"].should.have.length_of(50)
resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier'])
resp2 = conn.describe_db_instances(Marker=resp["Marker"])
resp2["DBInstances"].should.have.length_of(1)
@mock_rds_deprecated
def test_describe_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with(
"not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds_deprecated
def test_delete_database():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(1)
conn.delete_dbinstance("db-master-1")
list(conn.get_all_dbinstances()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with(
"not-a-db").should.throw(BotoServerError)
@mock_rds_deprecated
def test_create_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
security_group.name.should.equal('db_sg')
security_group.description.should.equal("DB Security Group")
list(security_group.ip_ranges).should.equal([])
@mock_rds_deprecated
def test_get_security_groups():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
conn.create_dbsecurity_group('db_sg1', 'DB Security Group')
conn.create_dbsecurity_group('db_sg2', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
databases = conn.get_all_dbsecurity_groups("db_sg1")
list(databases).should.have.length_of(1)
databases[0].name.should.equal("db_sg1")
@mock_rds_deprecated
def test_get_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with(
"not-a-sg").should.throw(BotoServerError)
@mock_rds_deprecated
def test_delete_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
conn.delete_dbsecurity_group("db_sg")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
@mock_rds_deprecated
def test_delete_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with(
"not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds_deprecated
def test_security_group_authorize():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(security_group.ip_ranges).should.equal([])
security_group.authorize(cidr_ip='10.3.2.45/32')
security_group = conn.get_all_dbsecurity_groups()[0]
list(security_group.ip_ranges).should.have.length_of(1)
security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32')
@disable_on_py3()
@mock_rds_deprecated
def test_add_security_group_to_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
database.modify(security_groups=[security_group])
database = conn.get_all_dbinstances()[0]
list(database.security_groups).should.have.length_of(1)
database.security_groups[0].name.should.equal("db_sg")
@mock_ec2_deprecated
@mock_rds_deprecated
def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24")
subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2")
subnet_group = conn.create_db_subnet_group(
"db_subnet", "my db subnet", subnet_ids)
subnet_group.name.should.equal('db_subnet')
subnet_group.description.should.equal("my db subnet")
list(subnet_group.subnet_ids).should.equal(subnet_ids)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
conn.get_all_db_subnet_groups.when.called_with(
"not-a-subnet").should.throw(BotoServerError)
@mock_ec2_deprecated
@mock_rds_deprecated
def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
conn.delete_db_subnet_group("db_subnet1")
list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with(
"db_subnet1").should.throw(BotoServerError)
@disable_on_py3()
@mock_ec2_deprecated
@mock_rds_deprecated
def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small',
'root', 'hunter2', db_subnet_group_name="db_subnet1")
database = conn.get_all_dbinstances("db-master-1")[0]
database.subnet_group.name.should.equal("db_subnet1")
@disable_on_py3()
@mock_rds_deprecated
def test_create_database_replica():
conn = boto.rds.connect_to_region("us-west-2")
primary = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
replica = conn.create_dbinstance_read_replica(
"replica", "db-master-1", "db.m1.small")
replica.id.should.equal("replica")
replica.instance_class.should.equal("db.m1.small")
status_info = replica.status_infos[0]
status_info.normal.should.equal(True)
status_info.status_type.should.equal('read replication')
status_info.status.should.equal('replicating')
primary = conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
conn.delete_dbinstance("replica")
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds_deprecated
def test_create_cross_region_database_replica():
west_1_conn = boto.rds.connect_to_region("us-west-1")
west_2_conn = boto.rds.connect_to_region("us-west-2")
primary = west_1_conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1"
replica = west_2_conn.create_dbinstance_read_replica(
"replica",
primary_arn,
"db.m1.small",
)
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
replica = west_2_conn.get_all_dbinstances("replica")[0]
replica.instance_class.should.equal("db.m1.small")
west_2_conn.delete_dbinstance("replica")
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds_deprecated
def test_connecting_to_us_east_1():
# boto does not use us-east-1 in the URL for RDS,
# and that broke moto in the past:
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
conn = boto.rds.connect_to_region("us-east-1")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(
('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds_deprecated
def test_create_database_with_iops():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance(
"db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
database.status.should.equal('available')
database.iops.should.equal(6000)
# boto>2.36.0 may change the following property name to `storage_type`
database.StorageType.should.equal('io1')
```
#### File: tests/test_sns/test_publishing.py
```python
from __future__ import unicode_literals
from six.moves.urllib.parse import parse_qs
import boto
from freezegun import freeze_time
import sure # noqa
from moto.packages.responses import responses
from moto import mock_sns_deprecated, mock_sqs_deprecated
@mock_sqs_deprecated
@mock_sns_deprecated
def test_publish_to_sqs():
conn = boto.connect_sns()
conn.create_topic("some-topic")
topics_json = conn.get_all_topics()
topic_arn = topics_json["ListTopicsResponse"][
"ListTopicsResult"]["Topics"][0]['TopicArn']
sqs_conn = boto.connect_sqs()
sqs_conn.create_queue("test-queue")
conn.subscribe(topic_arn, "sqs",
"arn:aws:sqs:us-east-1:123456789012:test-queue")
conn.publish(topic=topic_arn, message="my message")
queue = sqs_conn.get_queue("test-queue")
message = queue.read(1)
message.get_body().should.equal('my message')
@mock_sqs_deprecated
@mock_sns_deprecated
def test_publish_to_sqs_in_different_region():
conn = boto.sns.connect_to_region("us-west-1")
conn.create_topic("some-topic")
topics_json = conn.get_all_topics()
topic_arn = topics_json["ListTopicsResponse"][
"ListTopicsResult"]["Topics"][0]['TopicArn']
sqs_conn = boto.sqs.connect_to_region("us-west-2")
sqs_conn.create_queue("test-queue")
conn.subscribe(topic_arn, "sqs",
"arn:aws:sqs:us-west-2:123456789012:test-queue")
conn.publish(topic=topic_arn, message="my message")
queue = sqs_conn.get_queue("test-queue")
message = queue.read(1)
message.get_body().should.equal('my message')
```
|
{
"source": "jdhare/turbulence_tracing",
"score": 3
}
|
#### File: turbulence_tracing/gaussian_fields/mainGen.py
```python
import numpy as np
import turboGen as tg
import time
import matplotlib.pyplot as plt
import cmpspec
import matplotlib.cm
from mpl_toolkits.mplot3d import Axes3D
# ____ ____ ____ ___ ____ ____ _ _ _ _
# / ___)( _ \( __)/ __)(_ _)( _ \/ )( \( \/ )
# \___ \ ) __/ ) _)( (__ )( ) /) \/ (/ \/ \
# (____/(__) (____)\___) (__) (__\_)\____/\_)(_/
# this is the standard kolmogorov spectrum -5/3
#
class k41:
def evaluate(self, k):
espec = pow(k,-5.0/3.0)
return espec
# __ ____ ____ __ ____ __ ____
# / \ ___( \ ( __)( )( __)( ) ( \
# (_/ /(___)) D ( ) _) )( ) _) / (_/\ ) D (
# (__) (____/ (__) (__)(____)\____/(____/
# First case. let's assume 1-D
# GRID RESOLUTION nx
nx = 64
# DOMAIN DEFINITION
lx = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename1 = inputspec + '_' + str(nx) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = 2.0*np.pi/lx
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 1D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx)
print("Grid Resolution", nx)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_x = tg.gaussian1Dcos(lx, nx, nmodes, wn1, whichspect)
#
t1 = time.time() # final time
computing_time = t1 - t0
#
print("It took me ", computing_time, "to generate the 1D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 1-D FIELD
# verify that the generated velocities fit the spectrum
knyquist1D, wavenumbers1D, tkespec1D = cmpspec.compute1Dspectrum(r_x, lx, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/1D_tkespec_' + filename1 + '.txt', np.transpose([wavenumbers1D, tkespec1D]))
# ____ ____ ____ __ ____ __ ____
# (___ \ ___( \ ( __)( )( __)( ) ( \
# / __/(___)) D ( ) _) )( ) _) / (_/\ ) D (
# (____) (____/ (__) (__)(____)\____/(____/
# First case. let's assume 2-D
# GRID RESOLUTION nx, ny
nx = 64
ny = 64
# DOMAIN DEFINITION
lx = 1
ly = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename2 = inputspec + '_' + str(nx) + '_' + str(ny) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = min(2.0*np.pi/lx, 2.0*np.pi/ly)
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 2D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx, ly)
print("Grid Resolution", nx, ny)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
dy = ly/ny
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_xy = tg.gaussian2Dcos(lx, ly, nx, ny, nmodes, wn1, whichspect)
t1 = time.time() # final time
computing_time = t1 - t0
print("It took me ", computing_time, "to generate the 2D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 2-D FIELD
# verify that the generated velocities fit the spectrum
knyquist2D, wavenumbers2D, tkespec2D = cmpspec.compute2Dspectrum(r_xy, lx, ly, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/2D_tkespec_' + filename2 + '.txt', np.transpose([wavenumbers2D, tkespec2D]))
# ____ ____ ____ __ ____ __ ____
# ( __ \ ___( \ ( __)( )( __)( ) ( \
# (__ ((___)) D ( ) _) )( ) _) / (_/\ ) D (
# (____/ (____/ (__) (__)(____)\____/(____/
# First case. let's assume 3-D
# GRID RESOLUTION nx, ny, nz
nx = 64
ny = 64
nz = 64
# DOMAIN DEFINITION
lx = 1
ly = 1
lz = 1
# NUMBER OF MODES
nmodes = 100
# SPECIFY THE SPECTRUM THAT WE WANT
# right now only kolmogorov -5/3
inputspec = 'k41'
# PATH folder
pathfolder = './Output'
filename3 = inputspec + '_' + str(nx) + '_' + str(ny) + '_' + str(nz) + '_' + str(nmodes) + '_modes'
# CALL CLASS SPECTRUM
whichspect = k41().evaluate
# Defining the smallest wavenumber represented by this spectrum
wn1 = min(2.0*np.pi/lx, 2.0*np.pi/ly)
# Summary of the user input
print("SUMMARY OF THE USER INPUTs:")
print("---------------------------")
print("Type of generator: 3D")
print("Spectrum: ", inputspec)
print("Domain size: ", lx, ly, lz)
print("Grid Resolution", nx, ny, nz)
print("Fourier accuracy (modes): ", nmodes)
#
# STARTING...
# Smallest step size
dx = lx/nx
dy = ly/ny
dz = lz/nz
t0 = time.time() # initial time
# --------------------------------------------------
# Run the function TurboGenerator
# --------------------------------------------------
r_xyz = tg.gaussian3Dcos(lx, ly, lz, nx, ny, nz, nmodes, wn1, whichspect)
t1 = time.time() # final time
computing_time = t1 - t0
print("It took me ", computing_time, "to generate the 3D turbulence.")
# COMPUTE THE POWER SPECTRUM OF THE 2-D FIELD
# verify that the generated velocities fit the spectrum
knyquist3D, wavenumbers3D, tkespec3D = cmpspec.compute3Dspectrum(r_xyz, lx, ly, lz, False)
# save the generated spectrum to a text file for later post processing
np.savetxt(pathfolder + '/3D_tkespec_' + filename3 + '.txt', np.transpose([wavenumbers3D, tkespec3D]))
# ____ __ __ ____ ____ ____ ____ _ _ __ ____ ____
# ( _ \( ) / \(_ _) ( _ \( __)/ ___)/ )( \( ) (_ _)/ ___)
# ) __// (_/\( O ) )( ) / ) _) \___ \) \/ (/ (_/\ )( \___ \
# (__) \____/ \__/ (__) (__\_)(____)(____/\____/\____/(__) (____/
# PLOT THE 1D, 2D, 3D FIELD IN REAL DOMAIN AND RELATIVE POWER SPECTRUM
# ---------------------------------------------------------------------
# Plot 1D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
X = np.arange(0,lx,dx)
plt.plot(X,r_x, 'k-', label='computed')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel(r'$ \rho(x) $')
plt.legend()
plt.grid()
fig.savefig(pathfolder + '/1D_field_' + filename1 + '.pdf')
# Plot 2D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
cp = plt.contourf(X, Y, r_xy, cmap = matplotlib.cm.get_cmap('plasma'))
cb = plt.colorbar(cp)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel('Meter [m]')
cb.set_label(r'$ \rho(x,y) $', rotation=270)
plt.grid()
fig.savefig(pathfolder + '/2D_field_' + filename2 + '.pdf')
plt.show()
# Plot 3D-FIELD
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
# X, Y, Z = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy),np.arange(0,lz,dz))
X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
cp = plt.contourf(X, Y, r_xyz[:,:,1], cmap = matplotlib.cm.get_cmap('plasma'))
cb = plt.colorbar(cp)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlabel('Meter [m]')
plt.ylabel('Meter [m]')
cb.set_label(r'$ \rho(x,y) $', rotation=270)
plt.grid()
fig.savefig(pathfolder + '/3D_field_slice_' + filename3 + '.pdf')
plt.show()
# --------------------------------------------------------------
# PLOT NUMERICAL AND THEORICAL POWER SPECTRUM
# Plot in log-log
# --------------------------------------------------------------
# PLOT 1-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers1D[1:6], tkespec1D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers1D[5:], tkespec1D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist1D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/1D_tkespec_' + filename1 + '.pdf')
plt.show()
# PLOT 2-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers2D[1:6], tkespec2D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers2D[5:], tkespec2D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist2D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/2D_tkespec_' + filename2 + '.pdf')
plt.show()
# PLOT 3-D FIELD SPECTRUM
# Range of wavenumbers from minimum wavenumber wn1 up to 2000
plt.rc("font", size=10, family='serif')
fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
wnn = np.arange(wn1, 2000)
l1, = plt.loglog(wnn, whichspect(wnn), 'k-', label='input')
l2, = plt.loglog(wavenumbers3D[1:6], tkespec3D[1:6], 'bo--', markersize=3, markerfacecolor='w', markevery=1, label='computed')
plt.loglog(wavenumbers3D[5:], tkespec3D[5:], 'bo--', markersize=3, markerfacecolor='w', markevery=4)
plt.axis([3, 10000, 1e-7, 1e-1])
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.axvline(x=knyquist3D, linestyle='--', color='black')
plt.xlabel('$\kappa$ [1/m]')
plt.ylabel('$E(\kappa)$ [m$^3$/s$^2$]')
plt.grid()
plt.legend()
fig.savefig(pathfolder + '/3D_tkespec_' + filename3 + '.pdf')
plt.show()
# ____ ____ ____ __ __ ____ ____ __ ____ __ ____
# ( __ \ ___( \ ( _ \( ) / \(_ _) ( __)( )( __)( ) ( \
# (__ ((___)) D ( ) __// (_/\( O ) )( ) _) )( ) _) / (_/\ ) D (
# (____/ (____/ (__) \____/ \__/ (__) (__) (__)(____)\____/(____/
# plt.rc("font", size=10, family='serif')
# fig = plt.figure(figsize=(3.5, 2.8), dpi=200, constrained_layout=True)
# ax = fig.gca(projection='3d')
# X, Y = np.meshgrid(np.arange(0,lx,dx),np.arange(0,ly,dy))
# cset = [[],[],[]]
# # this is the example that worked for you:
# Z = r_xyz[0,:,:]
# cset[0] = ax.contourf(Z, X, Y, zdir = 'x', offset = , cmap = matplotlib.cm.get_cmap('plasma'))
# # cset[0] = ax.contourf(X, Y, Z, zdir = 'y', offset = , levels=np.linspace(np.min(Z),np.max(Z),30), cmap = matplotlib.cm.get_cmap('plasma'))
# # now, for the x-constant face, assign the contour to the x-plot-variable:
# # cset[1] = ax.contourf(X, Y, r_xyz[:,:,31], levels=np.linspace(np.min(r_xyz[:,:,31]),np.max(r_xyz[:,:,31]),30), cmap = matplotlib.cm.get_cmap('plasma'))
# # likewise, for the y-constant face, assign the contour to the y-plot-variable:
# # cset[2] = ax.contourf(X, Y, r_xyz[:,:,63] , levels=np.linspace(np.min(r_xyz[:,:,63]),np.max(r_xyz[:,:,63]),30), cmap = matplotlib.cm.get_cmap('plasma'))
# # # setting 3D-axis-limits:
# # ax.set_xlim3d(0,nx)
# # ax.set_ylim3d(0,ny)
# # ax.set_zlim3d(0,nz)
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# plt.grid()
# fig.savefig(pathfolder + '/3D_field_' + filename3 + '.pdf')
# plt.show()
```
#### File: turbulence_tracing/particle_tracking/example_kitchensink.py
```python
import numpy as np
import particle_tracker as pt
import matplotlib.pyplot as plt
import vtk
from vtk.util import numpy_support as vtk_np
def pvti_readin(filename):
'''
Reads in data from pvti with filename, use this to read in electron number density data
'''
reader = vtk.vtkXMLPImageDataReader()
reader.SetFileName(filename)
reader.Update()
data = reader.GetOutput()
dim = data.GetDimensions()
spacing = np.array(data.GetSpacing())
v = vtk_np.vtk_to_numpy(data.GetCellData().GetArray(0))
n_comp = data.GetCellData().GetArray(0).GetNumberOfComponents()
vec = [int(i-1) for i in dim]
if(n_comp > 1):
vec.append(n_comp)
if(n_comp > 2):
img = v.reshape(vec,order="F")[0:dim[0]-1,0:dim[1]-1,0:dim[2]-1,:]
else:
img = v.reshape(vec,order="F")[0:dim[0]-1,0:dim[1]-1,0:dim[2]-1]
dim = img.shape
return img,dim,spacing
# Kitchen sink test
Np = int(1e5)
## Load in data
print("Loading data...")
vti_file = "./data/x08_rnec-400.pvti"
rnec,dim,spacing = pvti_readin(vti_file)
#vti_file = "./data/x08_Te-300.pvti"
#Te,dim,spacing = pvti_readin(vti_file)
vti_file = "./data/x08_Bvec-400.pvti"
Bvec,dim,spacing = pvti_readin(vti_file)
# Probing direction is along y
M_V2 = dim[2]//2
ne_extent2 = 2*spacing[2]*((M_V2-1)/2)
ne_z = np.linspace(-ne_extent2,ne_extent2,M_V2)
M_V1 = 80
ne_extent1 = 2*spacing[0]*((M_V1-1)/2)
ne_x = np.linspace(-ne_extent1,ne_extent1,M_V1)
M_V = dim[1]//2
ne_extent = 2*spacing[1]*((M_V-1)/2)
ne_y = np.linspace(-ne_extent,ne_extent,M_V)
rnec = rnec[-2*M_V1::2,::2,::2]
#Te = Te[-2*M_V1::2,::2,::2]
Bvec = Bvec[-2*M_V1::2,::2,::2,:]
print("Data loaded...")
fig = plt.figure(dpi=200)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
test = pt.ElectronCube(ne_x,ne_y,ne_z,ne_extent,B_on=True,inv_brems=False,phaseshift=True, probing_direction = 'y')
test.external_ne(rnec)
#test.external_Te(Te)
#test.external_Z(1.0)
test.external_B(Bvec)
test.calc_dndr()
test.set_up_interps()
test.clear_memory()
rnec = None
Bvec = None
## Beam properties
beam_size = 10e-3 # 10 mm
divergence = 0.05e-3 #0.05 mrad, realistic
## Initialise laser beam
ss = pt.init_beam(Np = Np, beam_size=beam_size, divergence = divergence, ne_extent = ne_extent, probing_direction = 'y')
## Propogate rays through ne_cube
rf = test.solve(ss) # output of solve is the rays in (x, theta, y, phi) format
Jf = test.Jf
# Convert to mm, a nicer unit
rf[0:4:2,:] *= 1e3
rx = rf[0,:]
ry = rf[2,:]
amp = np.sqrt(np.abs(Jf[0,:]**2+Jf[1,:]**2))
aEy = np.arctan(np.real(Jf[0,:]/Jf[1,:]))
REy = np.cos(np.angle(Jf[1,:]))**2
fig = plt.figure(dpi=200)
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
bin_edges = 1e3*beam_size*np.linspace(-1.0,1.0,100)
amp_hist,xedges,yedges = np.histogram2d(rx,ry,bins=bin_edges,weights=amp)
im1 = ax1.imshow(amp_hist,extent=[bin_edges[0],bin_edges[-1],bin_edges[0],bin_edges[-1]])
ax1.set_xlim([bin_edges[0],bin_edges[-1]])
ax1.set_ylim([bin_edges[0],bin_edges[-1]])
ax1.set_aspect('equal')
ax1.set_title(r"$\sqrt{E_x^2+E_y^2}$")
fig.colorbar(im1,ax=ax1,orientation='horizontal')
aEy_hist,xedges,yedges = np.histogram2d(rx,ry,bins=bin_edges,weights=aEy)
# Normalise
aEy_hist /= amp_hist
im2 = ax2.imshow(aEy_hist,cmap='coolwarm',extent=[bin_edges[0],bin_edges[-1],bin_edges[0],bin_edges[-1]])
ax2.set_aspect('equal')
ax2.set_title(r"atan$\left(\frac{E_x}{E_y}\right)$")
fig.colorbar(im2,ax=ax2,orientation='horizontal')
REy_hist,xedges,yedges = np.histogram2d(rx,ry,bins=bin_edges,weights=REy)
# Normalise
REy_hist /= amp_hist
im3 = ax3.imshow(REy_hist,cmap='Greys',extent=[bin_edges[0],bin_edges[-1],bin_edges[0],bin_edges[-1]])
ax3.set_aspect('equal')
ax3.set_title(r"cos$^2$(arg$\left(E_y\right)$)")
fig.colorbar(im3,ax=ax3,orientation='horizontal')
fig.savefig("DannyExpRayTrace.png")
plt.show()
```
#### File: turbulence_tracing/particle_tracking/paraxial_solver.py
```python
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
from scipy.interpolate import RectBivariateSpline
from ipywidgets import FloatProgress
from turboGen import gaussian3D_FFT, gaussian3Dcos, gaussian2D_FFT, gaussian1D_FFT
def power_spectrum(k,a):
"""Simple function for power laws
Args:
k (float array): wave number
a (float): power law
Returns:
float array: k^-a (note minus sign is assumed!)
"""
return k**-a
def k41_3D(k):
"""Helper function to generate a kolmogorov spectrum in 3D
Args:
k (float array): wave number
Returns:
function: function of k
"""
return power_spectrum(k, 11/3)
def generate_collimated_beam(N, X):
"""Simple helper function to generate a collimate beam
Args:
N (float): number of rays
X (float): size of beam, will generate rays in -X/2 to X/2 in x and y.
Returns:
4xN float array: N rays, represented by x, theta, y, phi
"""
rr0=np.random.rand(4,int(N))
rr0[0,:]-=0.5
rr0[2,:]-=0.5
#x, θ, y, ϕ
scales=np.diag(np.array([X,0,X,0]))
r0=np.matmul(scales, rr0)
return r0
L = sym.symbols('L', real=True) # used for sympy
def transform(matrix, rays):
'''
Simple wrapper for matrix multiplication
'''
return np.matmul(matrix,rays)
def distance(d):
'''4x4 symbolic matrix for travelling a distance d
See: https://en.wikipedia.org/wiki/Ray_transfer_matrix_analysis
'''
d = sym.Matrix([[1, d],
[0, 1]])
L=sym.zeros(4,4)
L[:2,:2]=d
L[2:,2:]=d
return L
d1=distance(L) #symbolic matrix
Z1=sym.lambdify([L], d1, "numpy") #a function to return a numpy matrix
def gradient_interpolator(ne, x, y):
"""Deceptively simple. First we take the gradient of ne, use a second order centered differences approach
Then we create a bivariate spline to handle interpolation of this gradient field
Args:
ne (NxM float array): electron density
x (M float array): x coordinates
y (N float array): y coordinates
Returns:
RectBivariateSpline tuple: Two functions which take coordinates and return values of the gradient.
"""
grad_ney, grad_nex=np.gradient(ne, y, x)
gx=RectBivariateSpline(y,x,grad_nex)
gy=RectBivariateSpline(y,x,grad_ney)
return gx, gy
def deflect_rays(rays, grad_nex,grad_ney, dz, n_cr=1.21e21):
"""Deflects rays at a slice based on the gradient of the electron density
Args:
rays (4xN float): array representing N rays
grad_nex (RectBivariateSpline): function which take coordinates and return values of the gradient
grad_ney ([type]): function which take coordinates and return values of the gradient
dz (float): distance in z which each slice covers. Use a consistent system
n_cr (float): critical density. Change this based on wavelength of laser and units system. Default is 1 um laser in cm^-3
Returns:
4xN float array: rays after deflection
"""
n_cr=1.21e21 #cm^-3, assumes ne has the same units
xs=rays[0,:]
ys=rays[2,:]
dangle=np.zeros_like(rays)
dangle[1,:]=grad_nex(ys, xs, grid=False)*dz/(2*n_cr)
dangle[3,:]=grad_ney(ys, xs, grid=False)*dz/(2*n_cr)
return rays+dangle
def histogram(rays, bin_scale=10, pix_x=1000, pix_y=1000, Lx=10,Ly=10):
"""Bin data into a histogram. Defaults are for a KAF-8300.
Outputs are H, the histogram, and xedges and yedges, the bin edges.
Args:
rays (4xN float): array representing N rays
bin_scale (int, optional): bin size, same in x and y. Defaults to 10.
pix_x (int, optional): number of x pixels in detector plane. Defaults to 3448.
pix_y (int, optional): number of y pixels in detector plane. Defaults to 2574.
Lx (int, optional): x detector size in consistent units. Defaults to 10.
Ly (int, optional): y detector size in consistent units. Defaults to 10.
Returns:
MxN array, M array, N array: binned histogram and bin edges.
"""
x=rays[0,:]
y=rays[2,:]
x=x[~np.isnan(x)]
y=y[~np.isnan(y)]
H, xedges, yedges = np.histogram2d(x, y,
bins=[pix_x//bin_scale, pix_y//bin_scale],
range=[[-Lx/2, Lx/2],[-Ly/2,Ly/2]])
H=H.T
return H, xedges, yedges
def plot_histogram(H, xedges, yedges, ax, clim=None, cmap=None):
"""[summary]
Args:
H (MxN float): histogram
xedges (N float): x bin edges
yedges (M float): y bin edges
ax (matplotlib axis): axis to plot to
clim (tuple, optional): Limits for imshow Defaults to None.
cmap (str, optional): matplotlib colourmap. Defaults to None.
"""
ax.imshow(H, interpolation='nearest', origin='low', clim=clim, cmap=cmap,
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], aspect=1)
class GridTracer:
"""
Provides the functions for examining and tracing rays through a grid of electron density.
Inherit from this and implement __init__ for different grid configurations.
"""
def plot_ne_slices(self):
"""Plot 9 slices from the density grid, for inspection
Returns:
fig, ax: matplotlib figure and axis.
"""
fig,ax=plt.subplots(3,3, figsize=(8,8), sharex=True, sharey=True)
ax=ax.flatten()
sc=self.scale/2
for i, a in enumerate(ax):
r=(2*self.N+1)*i//9
d=self.ne_grid[r,:,:]
a.imshow(d, cmap='bwr', extent=[-sc,sc,-sc, sc])
a.set_title("z="+str(round(self.z[r])))
return fig, ax
def solve(self, r0):
"""Trace rays through the turbulent grid
Args:
r0 (4xN float): array of N rays, in their initial configuration
"""
f = FloatProgress(min=0, max=self.ne_grid.shape[0], description='Progress:')
display(f)
self.r0 = r0 # keep the original
dz = self.z[1]-self.z[0]
DZ = Z1(dz) # matrix to push rays by dz
rt = r0.copy() # iterate to save memory, starting at r0
for i, ne_slice in enumerate(self.ne_grid):
f.value = i
gx, gy = gradient_interpolator(ne_slice, self.x, self.y)
rr1 = deflect_rays(rt, gx, gy, dz=dz)
rt = transform(DZ, rr1)
self.rt = rt
def plot_rays(self, clim=None):
H0, xedges0, yedges0 = histogram(self.r0, bin_scale=10, pix_x=1000, pix_y=1000, Lx=10,Ly=10)
Hf, xedgesf, yedgesf = histogram(self.rt, bin_scale=10, pix_x=1000, pix_y=1000, Lx=10,Ly=10)
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(8,4))
plot_histogram(H0, xedges0,yedges0, ax1, clim=clim)
plot_histogram(Hf, xedgesf,yedgesf, ax2, clim=clim)
class TurbulentGrid(GridTracer):
"""Trace rays through a turbulent electron density defined on a grid
"""
def __init__(self, N, spectrum, n_e0, dn_e, scale):
"""generate a turbulent grid.
You can use cm^-3 for density and mm for scales.
This code assumes x,y, and z have the same grid spacing.
If this is so, the scale difference drops out because \nabla n_e/n_cr dz has units of
[mm^-1] [cm^-3]/[cm^-3] [mm] = dimless (radians).
If you are using this class as a guide on how to implement another GridTracer, be careful!
Safest would be to use cm for everything, but they are a bit large for our porpoises.
Args:
N (int): half size of cube, will be 2*N+1
spectrum (function of k): a spectrum, such as k**-11/3
n_e0 (float): mean electron density
dn_e (float): standard deviation of electron density
scale (float): length of a box side.
"""
self.N=N
self.scale=scale
self.x = np.linspace(-scale,scale,2*N+1)
self.y = self.x
self.z = self.x
s3 = gaussian3D_FFT(N, spectrum)
self.ne_grid = n_e0 + dn_e*s3/s3.std()
```
#### File: turbulence_tracing/particle_tracking/particle_tracker.py
```python
import numpy as np
from scipy.integrate import odeint,solve_ivp
from scipy.interpolate import RegularGridInterpolator
from time import time
import scipy.constants as sc
c = sc.c # honestly, this could be 3e8 *shrugs*
class ElectronCube:
"""A class to hold and generate electron density cubes
"""
def __init__(self, x, y, z, extent, B_on = False, inv_brems = False, phaseshift = False, probing_direction = 'z'):
"""
Example:
N_V = 100
M_V = 2*N_V+1
ne_extent = 5.0e-3
ne_x = np.linspace(-ne_extent,ne_extent,M_V)
ne_y = np.linspace(-ne_extent,ne_extent,M_V)
ne_z = np.linspace(-ne_extent,ne_extent,M_V)
Args:
x (float array): x coordinates, m
y (float array): y coordinates, m
z (float array): z coordinates, m
extent (float): physical size, m
"""
self.z,self.y,self.x = z, y, x
self.dx = x[1]-x[0]
self.XX, self.YY, self.ZZ = np.meshgrid(x,y,z, indexing='ij')
self.extent = extent
self.probing_direction = probing_direction
# Logical switches
self.B_on = B_on
self.inv_brems = inv_brems
self.phaseshift = phaseshift
def test_null(self):
"""
Null test, an empty cube
"""
self.ne = np.zeros_like(self.XX)
def test_slab(self, s=1, n_e0=2e23):
"""A slab with a linear gradient in x:
n_e = n_e0 * (1 + s*x/extent)
Will cause a ray deflection in x
Args:
s (int, optional): scale factor. Defaults to 1.
n_e0 ([type], optional): mean density. Defaults to 2e23 m^-3.
"""
self.ne = n_e0*(1.0+s*self.XX/self.extent)
def test_linear_cos(self,s1=0.1,s2=0.1,n_e0=2e23,Ly=1):
"""Linearly growing sinusoidal perturbation
Args:
s1 (float, optional): scale of linear growth. Defaults to 0.1.
s2 (float, optional): amplitude of sinusoidal perturbation. Defaults to 0.1.
n_e0 ([type], optional): mean electron density. Defaults to 2e23 m^-3.
Ly (int, optional): spatial scale of sinusoidal perturbation. Defaults to 1.
"""
self.ne = n_e0*(1.0+s1*self.XX/self.extent)*(1+s2*np.cos(2*np.pi*self.YY/Ly))
def test_exponential_cos(self,n_e0=1e24,Ly=1e-3, s=2e-3):
"""Exponentially growing sinusoidal perturbation
Args:
n_e0 ([type], optional): mean electron density. Defaults to 2e23 m^-3.
Ly (int, optional): spatial scale of sinusoidal perturbation. Defaults to 1e-3 m.
s ([type], optional): scale of exponential growth. Defaults to 2e-3 m.
"""
self.ne = n_e0*10**(self.XX/s)*(1+np.cos(2*np.pi*self.YY/Ly))
def external_ne(self, ne):
"""Load externally generated grid
Args:
ne ([type]): MxMxM grid of density in m^-3
"""
self.ne = ne
def external_B(self, B):
"""Load externally generated grid
Args:
B ([type]): MxMxMx3 grid of B field in T
"""
self.B = B
def external_Te(self, Te, Te_min = 1.0):
"""Load externally generated grid
Args:
Te ([type]): MxMxM grid of electron temperature in eV
"""
self.Te = np.maximum(Te_min,Te)
def external_Z(self, Z):
"""Load externally generated grid
Args:
Z ([type]): MxMxM grid of ionisation
"""
self.Z = Z
def test_B(self, Bmax=1.0):
"""A Bz field with a linear gradient in x:
Bz = Bmax*x/extent
Args:
Bmax ([type], optional): maximum B field, default 1.0 T
"""
self.B = np.zeros(np.append(np.array(self.XX.shape),3))
self.B[:,:,:,2] = Bmax*self.XX/self.extent
def calc_dndr(self, lwl=1053e-9):
"""Generate interpolators for derivatives.
Args:
lwl (float, optional): laser wavelength. Defaults to 1053e-9 m.
"""
self.omega = 2*np.pi*(c/lwl)
nc = 3.14207787e-4*self.omega**2
# Find Faraday rotation constant http://farside.ph.utexas.edu/teaching/em/lectures/node101.html
if (self.B_on):
self.VerdetConst = 2.62e-13*lwl**2 # radians per Tesla per m^2
self.ne_nc = self.ne/nc #normalise to critical density
#More compact notation is possible here, but we are explicit
self.dndx = -0.5*c**2*np.gradient(self.ne_nc,self.x,axis=0)
self.dndy = -0.5*c**2*np.gradient(self.ne_nc,self.y,axis=1)
self.dndz = -0.5*c**2*np.gradient(self.ne_nc,self.z,axis=2)
self.dndx_interp = RegularGridInterpolator((self.x, self.y, self.z), self.dndx, bounds_error = False, fill_value = 0.0)
self.dndy_interp = RegularGridInterpolator((self.x, self.y, self.z), self.dndy, bounds_error = False, fill_value = 0.0)
self.dndz_interp = RegularGridInterpolator((self.x, self.y, self.z), self.dndz, bounds_error = False, fill_value = 0.0)
# NRL formulary inverse brems - cheers <NAME> for coding in Python
# Converted to rate coefficient by multiplying by group velocity in plasma
def kappa(self):
# Useful subroutines
def omega_pe(ne):
'''Calculate electron plasma freq. Output units are rad/sec. From nrl pp 28'''
return 5.64e4*np.sqrt(ne)
def v_the(Te):
'''Calculate electron thermal speed. Provide Te in eV. Retrurns result in m/s'''
return 4.19e5*np.sqrt(Te)
def V(ne, Te, Z, omega):
o_pe = omega_pe(ne)
o_max = np.copy(o_pe)
o_max[o_pe < omega] = omega
L_classical = Z*sc.e/Te
L_quantum = 2.760428269727312e-10/np.sqrt(Te) # sc.hbar/np.sqrt(sc.m_e*sc.e*Te)
L_max = np.maximum(L_classical, L_quantum)
return o_max*L_max
def coloumbLog(ne, Te, Z, omega):
return np.maximum(2.0,np.log(v_the(Te)/V(ne, Te, Z, omega)))
ne_cc = self.ne*1e-6
o_pe = omega_pe(ne_cc)
CL = coloumbLog(ne_cc, self.Te, self.Z, self.omega)
return 3.1e-5*self.Z*c*np.power(ne_cc/self.omega,2)*CL*np.power(self.Te, -1.5) # 1/s
# Plasma refractive index
def n_refrac(self):
def omega_pe(ne):
'''Calculate electron plasma freq. Output units are rad/sec. From nrl pp 28'''
return 5.64e4*np.sqrt(ne)
ne_cc = self.ne*1e-6
o_pe = omega_pe(ne_cc)
o_pe[o_pe > self.omega] = self.omega
return np.sqrt(1.0-(o_pe/self.omega)**2)
def set_up_interps(self):
# Electron density
self.ne_interp = RegularGridInterpolator((self.x, self.y, self.z), self.ne, bounds_error = False, fill_value = 0.0)
# Magnetic field
if(self.B_on):
self.Bx_interp = RegularGridInterpolator((self.x, self.y, self.z), self.B[:,:,:,0], bounds_error = False, fill_value = 0.0)
self.By_interp = RegularGridInterpolator((self.x, self.y, self.z), self.B[:,:,:,1], bounds_error = False, fill_value = 0.0)
self.Bz_interp = RegularGridInterpolator((self.x, self.y, self.z), self.B[:,:,:,2], bounds_error = False, fill_value = 0.0)
# Inverse Bremsstrahlung
if(self.inv_brems):
self.kappa_interp = RegularGridInterpolator((self.x, self.y, self.z), self.kappa(), bounds_error = False, fill_value = 0.0)
# Phase shift
if(self.phaseshift):
self.refractive_index_interp = RegularGridInterpolator((self.x, self.y, self.z), self.n_refrac(), bounds_error = False, fill_value = 1.0)
def plot_midline_gradients(self,ax,probing_direction):
"""I actually don't know what this does. Presumably plots the gradients half way through the box? Cool.
Args:
ax ([type]): [description]
probing_direction ([type]): [description]
"""
N_V = self.x.shape[0]//2
if(probing_direction == 'x'):
ax.plot(self.y,self.dndx[:,N_V,N_V])
ax.plot(self.y,self.dndy[:,N_V,N_V])
ax.plot(self.y,self.dndz[:,N_V,N_V])
elif(probing_direction == 'y'):
ax.plot(self.y,self.dndx[N_V,:,N_V])
ax.plot(self.y,self.dndy[N_V,:,N_V])
ax.plot(self.y,self.dndz[N_V,:,N_V])
elif(probing_direction == 'z'):
ax.plot(self.y,self.dndx[N_V,N_V,:])
ax.plot(self.y,self.dndy[N_V,N_V,:])
ax.plot(self.y,self.dndz[N_V,N_V,:])
else: # Default to y
ax.plot(self.y,self.dndx[N_V,:,N_V])
ax.plot(self.y,self.dndy[N_V,:,N_V])
ax.plot(self.y,self.dndz[N_V,:,N_V])
def dndr(self,x):
"""returns the gradient at the locations x
Args:
x (3xN float): N [x,y,z] locations
Returns:
3 x N float: N [dx,dy,dz] electron density gradients
"""
grad = np.zeros_like(x)
grad[0,:] = self.dndx_interp(x.T)
grad[1,:] = self.dndy_interp(x.T)
grad[2,:] = self.dndz_interp(x.T)
return grad
# Attenuation due to inverse bremsstrahlung
def atten(self,x):
if(self.inv_brems):
return -self.kappa_interp(x.T)
else:
return 0.0
# Phase shift introduced by refractive index
def phase(self,x):
if(self.phaseshift):
return self.omega*self.refractive_index_interp(x.T)
else:
return 0.0
def get_ne(self,x):
return self.ne_interp(x.T)
def get_B(self,x):
B = np.array([self.Bx_interp(x.T),self.By_interp(x.T),self.Bz_interp(x.T)])
return B
def neB(self,x,v):
"""returns the VerdetConst ne B.v
Args:
x (3xN float): N [x,y,z] locations
v (3xN float): N [vx,vy,vz] velocities
Returns:
N float: N values of ne B.v
"""
if(self.B_on):
ne_N = self.get_ne(x)
Bv_N = np.sum(self.get_B(x)*v,axis=0)
pol = self.VerdetConst*ne_N*Bv_N
else:
pol = 0.0
return pol
def solve(self, s0, method = 'RK45'):
# Need to make sure all rays have left volume
# Conservative estimate of diagonal across volume
# Then can backproject to surface of volume
t = np.linspace(0.0,np.sqrt(8.0)*self.extent/c,2)
s0 = s0.flatten() #odeint insists
start = time()
dsdt_ODE = lambda t, y: dsdt(t, y, self)
sol = solve_ivp(dsdt_ODE, [0,t[-1]], s0, t_eval=t, method = method)
finish = time()
print("Ray trace completed in:\t",finish-start,"s")
Np = s0.size//9
self.sf = sol.y[:,-1].reshape(9,Np)
# Fix amplitudes
self.sf[6,self.sf[6,:] < 0.0] = 0.0
self.rf,self.Jf = ray_to_Jonesvector(self.sf, self.extent, probing_direction = self.probing_direction)
return self.rf
def clear_memory(self):
"""
Clears variables not needed by solve method, saving memory
Can also use after calling solve to clear ray positions - important when running large number of rays
"""
self.dndx = None
self.dndx = None
self.dndx = None
self.ne = None
self.ne_nc = None
self.sf = None
self.rf = None
# ODEs of photon paths
def dsdt(t, s, ElectronCube):
"""Returns an array with the gradients and velocity per ray for ode_int
Args:
t (float array): I think this is a dummy variable for ode_int - our problem is time invarient
s (9N float array): flattened 9xN array of rays used by ode_int
ElectronCube (ElectronCube): an ElectronCube object which can calculate gradients
Returns:
9N float array: flattened array for ode_int
"""
Np = s.size//9
s = s.reshape(9,Np)
sprime = np.zeros_like(s)
# Velocity and position
v = s[3:6,:]
x = s[:3,:]
# Amplitude, phase and polarisation
a = s[6,:]
p = s[7,:]
r = s[8,:]
sprime[3:6,:] = ElectronCube.dndr(x)
sprime[:3,:] = v
sprime[6,:] = ElectronCube.atten(x)*a
sprime[6,a < 1e-5] = 0.0
sprime[7,:] = ElectronCube.phase(x)
sprime[8,:] = ElectronCube.neB(x,v)
return sprime.flatten()
def init_beam(Np, beam_size, divergence, ne_extent, probing_direction = 'z', coherent = False):
"""[summary]
Args:
Np (int): Number of photons
beam_size (float): beam radius, m
divergence (float): beam divergence, radians
ne_extent (float): size of electron density cube, m. Used to back propagate the rays to the start
probing_direction (str): direction of probing. I suggest 'z', the best tested
Returns:
s0, 9 x N float: N rays with (x, y, z, vx, vy, vz) in m, m/s and amplitude, phase and polarisation (a, p, r)
"""
s0 = np.zeros((9,Np))
# position, uniformly within a circle
t = 2*np.pi*np.random.rand(Np) #polar angle of position
u = np.random.rand(Np)+np.random.rand(Np) # radial coordinate of position
u[u > 1] = 2-u[u > 1]
# angle
ϕ = np.pi*np.random.rand(Np) #azimuthal angle of velocity
χ = divergence*np.random.randn(Np) #polar angle of velocity
if(probing_direction == 'x'):
# Initial velocity
s0[3,:] = c * np.cos(χ)
s0[4,:] = c * np.sin(χ) * np.cos(ϕ)
s0[5,:] = c * np.sin(χ) * np.sin(ϕ)
# Initial position
s0[0,:] = -ne_extent
s0[1,:] = beam_size*u*np.cos(t)
s0[2,:] = beam_size*u*np.sin(t)
elif(probing_direction == 'y'):
# Initial velocity
s0[4,:] = c * np.cos(χ)
s0[3,:] = c * np.sin(χ) * np.cos(ϕ)
s0[5,:] = c * np.sin(χ) * np.sin(ϕ)
# Initial position
s0[0,:] = beam_size*u*np.cos(t)
s0[1,:] = -ne_extent
s0[2,:] = beam_size*u*np.sin(t)
elif(probing_direction == 'z'):
# Initial velocity
s0[3,:] = c * np.sin(χ) * np.cos(ϕ)
s0[4,:] = c * np.sin(χ) * np.sin(ϕ)
s0[5,:] = c * np.cos(χ)
# Initial position
s0[0,:] = beam_size*u*np.cos(t)
s0[1,:] = beam_size*u*np.sin(t)
s0[2,:] = -ne_extent
else: # Default to y
print("Default to y")
# Initial velocity
s0[4,:] = c * np.cos(χ)
s0[3,:] = c * np.sin(χ) * np.cos(ϕ)
s0[5,:] = c * np.sin(χ) * np.sin(ϕ)
# Initial position
s0[0,:] = beam_size*u*np.cos(t)
s0[1,:] = -ne_extent
s0[2,:] = beam_size*u*np.sin(t)
# Initialise amplitude, phase and polarisation
if coherent is False:
#then it's incoherent, random phase
print('Incoherent')
phase = 2*np.pi*np.random.rand(Np)
if coherent is True:
print('Coherent')
phase = np.zeros(Np)
s0[6,:] = 1.0
s0[7,:] = phase
s0[8,:] = 0.0
return s0
# Need to backproject to ne volume, then find angles
def ray_to_Jonesvector(ode_sol, ne_extent, probing_direction = 'z'):
"""Takes the output from the 9D solver and returns 6D rays for ray-transfer matrix techniques.
Effectively finds how far the ray is from the end of the volume, returns it to the end of the volume.
Args:
ode_sol (6xN float): N rays in (x,y,z,vx,vy,vz) format, m and m/s and amplitude, phase and polarisation
ne_extent (float): edge length of cube, m
probing_direction (str): x, y or z.
Returns:
[type]: [description]
"""
Np = ode_sol.shape[1] # number of photons
ray_p = np.zeros((4,Np))
ray_J = np.zeros((2,Np),dtype=np.complex)
x, y, z, vx, vy, vz = ode_sol[0], ode_sol[1], ode_sol[2], ode_sol[3], ode_sol[4], ode_sol[5]
# Resolve distances and angles
# YZ plane
if(probing_direction == 'x'):
t_bp = (x-ne_extent)/vx
# Positions on plane
ray_p[0] = y-vy*t_bp
ray_p[2] = z-vz*t_bp
# Angles to plane
ray_p[1] = np.arctan(vy/vx)
ray_p[3] = np.arctan(vz/vx)
# XZ plane
elif(probing_direction == 'y'):
t_bp = (y-ne_extent)/vy
# Positions on plane
ray_p[0] = x-vx*t_bp
ray_p[2] = z-vz*t_bp
# Angles to plane
ray_p[1] = np.arctan(vx/vy)
ray_p[3] = np.arctan(vz/vy)
# XY plane
elif(probing_direction == 'z'):
t_bp = (z-ne_extent)/vz
# Positions on plane
ray_p[0] = x-vx*t_bp
ray_p[2] = y-vy*t_bp
# Angles to plane
ray_p[1] = np.arctan(vx/vz)
ray_p[3] = np.arctan(vy/vz)
# Resolve Jones vectors
amp,phase,pol = ode_sol[6], ode_sol[7], ode_sol[8]
# Assume initially polarised along y
E_x_init = np.zeros(Np)
E_y_init = np.ones(Np)
# Perform rotation for polarisation, multiplication for amplitude, and complex rotation for phase
ray_J[0] = amp*(np.cos(phase)+1.0j*np.sin(phase))*(np.cos(pol)*E_x_init-np.sin(pol)*E_y_init)
ray_J[1] = amp*(np.cos(phase)+1.0j*np.sin(phase))*(np.sin(pol)*E_x_init+np.cos(pol)*E_y_init)
# ray_p [x,phi,y,theta], ray_J [E_x,E_y]
return ray_p,ray_J
```
#### File: turbulence_tracing/particle_tracking/polarisation_ray_transfer_matrix.py
```python
import numpy as np
import matplotlib.pyplot as plt
'''
Example:
###INITIALISE RAYS###
#Rays are a 6 vector of x, theta, y, phi, E_x, E_y, where E_x and E_y can be complex, and the rest are scalars.
#here we initialise 10*7 randomly distributed rays
rr0=np.random.rand(6,int(1e7))
rr0[0,:]-=0.5 #rand generates [0,1], so we recentre [-0.5,0.5]
rr0[2,:]-=0.5
rr0[4,:]-=0.5 #rand generates [0,1], so we recentre [-0.5,0.5]
rr0[5,:]-=0.5
#x, θ, y, ϕ
scales=np.diag(np.array([10,0,10,0,1,1j])) #set angles to 0, collimated beam. x, y in [-5,5]. Circularly polarised beam, E_x = iE_y
rr0=np.matmul(scales, rr0)
r0=circular_aperture(5, rr0) #cut out a circle
### Shadowgraphy, no polarisation
## object_length: determines where the focal plane is. If you object is 10 mm long, object length = 5 will
## make the focal plane in the middle of the object. Yes, it's a bad variable name.
s = Shadowgraphy(rr0, L = 400, R = 25, object_length=5)
s.solve()
s.histogram(bin_scale = 25)
fig, axs = plt.subplots(figsize=(6.67, 6))
cm='gray'
clim=[0,100]
s.plot(axs, clim=clim, cmap=cm)
### Faraday, with a polarisation β, in degrees, which puts the axis of extinction at beta degrees to the y direction.
### that is, beta = 0 extinguishes E_y, beta = 90 extinguishes E_x
### of course, in this example we have E_x = i E_y, so all the polariser will do is reduce the intensity.
## object_length: determines where the focal plane is. If you object is 10 mm long, object length = 5 will
## make the focal plane in the middle of the object. Yes, it's a bad variable name.
f = Faraday(rr0, L = 400, R = 25, object_length=5)
f.solve(β = 80)
f.histogram(bin_scale = 25)
fig, axs = plt.subplots(figsize=(6.67, 6))
cm='gray'
clim=[0,100]
f.plot(axs, clim=clim, cmap=cm)
'''
I = np.array([[1, 0],
[0, 1]])
def lens(f1,f2):
'''6x6 matrix for a thin lens, focal lengths f1 and f2 in orthogonal axes
See: https://en.wikipedia.org/wiki/Ray_transfer_matrix_analysis
'''
l1= np.array([[1, 0],
[-1/f1, 1]])
l2= np.array([[1, 0],
[-1/f2, 1]])
L=np.zeros((6,6))
L[:2,:2]=l1
L[2:4,2:4]=l2
L[4:,4:]=I
return L
def sym_lens(f):
'''
helper function to create an axisymmetryic lens
'''
return lens(f,f)
def distance(d):
'''6x6 matrix matrix for travelling a distance d
See: https://en.wikipedia.org/wiki/Ray_transfer_matrix_analysis
'''
d = np.array([[1, d],
[0, 1]])
L=np.zeros((6,6))
L[:2,:2]=d
L[2:4,2:4]=d
L[4:,4:]=I
return L
def polariser(β):
'''6x6 matrix for a polariser with the axis of extinction at β radians to the vertical
See: https://en.wikipedia.org/wiki/Ray_transfer_matrix_analysis
and https://en.wikipedia.org/wiki/Jones_calculus
'''
L=np.zeros((6,6))
L[:2,:2]=I
L[2:4,2:4]=I
L[4:,4:]=np.array([[np.cos(β)**2, np.cos(β)*np.sin(β)],
[np.cos(β)*np.sin(β), np.sin(β)**2]])
return L
def circular_aperture(R, rays):
'''
Filters rays to find those inside a radius R
'''
filt = rays[0,:]**2+rays[2,:]**2 > R**2
rays[:,filt]=None
return rays
def rect_aperture(Lx, Ly, rays):
'''
Filters rays inside a rectangular aperture, total size 2*Lx x 2*Ly
'''
filt1 = (rays[0,:]**2 > Lx**2)
filt2 = (rays[2,:]**2 > Ly**2)
filt=filt1*filt2
rays[:,filt]=None
return rays
def knife_edge(axis, rays, edge = 1e-1):
'''
Filters rays using a knife edge.
Default is a knife edge in y, can also do a knife edge in x.
'''
if axis is 'y':
a=2
else:
a=0
filt = rays[a,:] < edge
rays[:,filt]=None
return rays
class Rays:
"""
Inheritable class for ray diagnostics.
"""
def __init__(self, r0, L=400, R=25, Lx=18, Ly=13.5, focal_plane = 0):
"""Initialise ray diagnostic.
Args:
r0 (6xN float array): N rays, [x, theta, y, phi, Ex, Ey]
L (int, optional): Length scale L. First lens is at L. Defaults to 400.
R (int, optional): Radius of lenses. Defaults to 25.
Lx (int, optional): Detector size in x. Defaults to 18.
Ly (float, optional): Detector size in y. Defaults to 13.5.
Object_length (float, optional): Where the focus of the first optic should lie - 0 means at the near side of the density cube
"""
self.r0, self.L, self.R, self.Lx, self.Ly, self.focal_plane = r0, L, R, Lx, Ly, focal_plane
def histogram(self, bin_scale=10, pix_x=3448, pix_y=2574, clear_mem=False):
"""Bin data into a histogram. Defaults are for a KAF-8300.
Outputs are H, the histogram, and xedges and yedges, the bin edges.
Args:
bin_scale (int, optional): bin size, same in x and y. Defaults to 10.
pix_x (int, optional): number of x pixels in detector plane. Defaults to 3448.
pix_y (int, optional): number of y pixels in detector plane. Defaults to 2574.
"""
x=self.rf[0,:]
y=self.rf[2,:]
nonans = ~np.isnan(x)
x=x[nonans]
y=y[nonans]
#treat the imaginary and real parts of E_x and E_y all separately.
E_x_real = np.real(self.rf[4,:])
E_x_imag = np.imag(self.rf[4,:])
E_y_real = np.real(self.rf[5,:])
E_y_imag = np.imag(self.rf[5,:])
E_x_real = E_x_real[nonans]
E_x_imag = E_x_imag[nonans]
E_y_real = E_y_real[nonans]
E_y_imag = E_y_imag[nonans]
## create four separate histograms for the real and imaginary parts of E_x and E-y
self.H_Ex_real, self.xedges, self.yedges = np.histogram2d(x, y,
bins=[pix_x//bin_scale, pix_y//bin_scale],
range=[[-self.Lx/2, self.Lx/2],[-self.Ly/2,self.Ly/2]],
normed = False, weights = E_x_real)
self.H_Ex_imag, self.xedges, self.yedges = np.histogram2d(x, y,
bins=[pix_x//bin_scale, pix_y//bin_scale],
range=[[-self.Lx/2, self.Lx/2],[-self.Ly/2,self.Ly/2]],
normed = False, weights = E_x_imag)
self.H_Ey_real, self.xedges, self.yedges = np.histogram2d(x, y,
bins=[pix_x//bin_scale, pix_y//bin_scale],
range=[[-self.Lx/2, self.Lx/2],[-self.Ly/2,self.Ly/2]],
normed = False, weights = E_y_real)
self.H_Ey_imag, self.xedges, self.yedges = np.histogram2d(x, y,
bins=[pix_x//bin_scale, pix_y//bin_scale],
range=[[-self.Lx/2, self.Lx/2],[-self.Ly/2,self.Ly/2]],
normed = False, weights = E_y_imag)
# Recontruct the complex valued E_x and E_y components
self.H_Ex = self.H_Ex_real+1j*self.H_Ex_imag
self.H_Ey = self.H_Ey_real+1j*self.H_Ey_imag
# Find the intensity using complex conjugates. Take the real value to remove very small (numerical error) imaginary components
self.H = np.real(self.H_Ex*np.conj(self.H_Ex) + self.H_Ey*np.conj(self.H_Ey))
self.H = self.H.T
# Optional - clear ray attributes to save memory
if(clear_mem):
self.clear_rays()
def plot(self, ax, clim=None, cmap=None):
ax.imshow(self.H, interpolation='nearest', origin='low', clim=clim, cmap=cmap,
extent=[self.xedges[0], self.xedges[-1], self.yedges[0], self.yedges[-1]])
def clear_rays(self):
'''
Clears the r0 and rf variables to save memory
'''
self.r0 = None
self.rf = None
class Shadowgraphy(Rays):
def solve(self):
rl1=np.matmul(distance(self.L - self.focal_plane), self.r0) #displace rays to lens. Accounts for object with depth
rc1=circular_aperture(self.R, rl1) # cut off
r2=np.matmul(sym_lens(self.L/2), rc1) #lens 1
rl2=np.matmul(distance(3*self.L/2), r2) #displace rays to lens 2.
rc2=circular_aperture(self.R, rl2) # cut off
r3=np.matmul(sym_lens(self.L/3), rc2) #lens 2
rd3=np.matmul(distance(self.L), r3) #displace rays to detector
self.rf = rd3
class Faraday(Rays):
def solve(self, β = 3.0):
rl1=np.matmul(distance(self.L - self.focal_plane), self.r0) #displace rays to lens. Accounts for object with depth
rc1=circular_aperture(self.R, rl1) # cut off
r2=np.matmul(sym_lens(self.L/2), rc1) #lens 1
rl2=np.matmul(distance(3*self.L/2), r2) #displace rays to lens 2.
rc2=circular_aperture(self.R, rl2) # cut off
r3=np.matmul(sym_lens(self.L/3), rc2) #lens 2
rp3=np.matmul(distance(self.L), r3) #displace rays to polariser
β = β * np.pi/180
rd3=np.matmul(polariser(β), rp3) #pass polariser rays to detector
self.rf = rd3
```
|
{
"source": "jdhask/gcpy_extended",
"score": 3
}
|
#### File: jdhask/gcpy_extended/planeflight_io.py
```python
import pandas as pd
from astropy.io import ascii ass asc # For reading ascii tables (only need if reading in output files!)
from datetime import datetime
import utils as ut
import numpy as np
import sys
import os
def _get_optional_diags(print_diag_options: bool = False,) :
"""Function to list. get all optional Diagnostic Quantites you can get with Planeflight."""
# grabbed from: http://wiki.seas.harvard.edu/geos-chem/index.php/Planeflight_diagnostic
# and from inside planeflight_mod.f90
diag_dict= dict({"RO2": "Concentration of RO2 family",
"AN": "Concentration of AN family",
"NOy": "Concentration of NOy family",
"GMAO_TEMP":"Temperature",
"GMAO_ABSH":"Absolute humidity",
"GMAO_SURF":"Aerosol surface area",
"GMAO_PSFC":"Surface pressure",
"GMAO_UWND":"Zonal winds",
"GMAO_VWND":"Meridional winds",
"GMAO_IIEV":"GEOS-Chem grid box index, longitude",
"GMAO_JJEV":"GEOS-Chem grid box index, latitude",
"GMAO_LLEV":"GEOS-Chem grid box index, altitude",
"GMAO_RELH":"Relative humidity",
#"GMAO_PVRT":"Ertel's potential vorticity", # Currently disabled, State_Met%PV is not defined.
"GMAO_PSLV":"Sea level pressure",
"GMAO_AVGW":"Water vapor mixing ratio",
"GMAO_THTA":"Potential temperature",
"GMAO_PRES":"Pressure at center of grid box",
"AODC_SULF":"Column aerosol optical depth for sulfate",
"AODC_BLKC":"Column aerosol optical depth for black carbon",
"AODC_ORGC":"Column aerosol optical depth for organic carbon",
"AODC_SALA":"Column aerosol optical depth for accumulation mode sea salt",
"AODC_SALC":"Column aerosol optical depth for coarse mode sea salt",
"AODC_DUST":"Column aerosol optical depth for dust",
"AODB_SULF":"Column aerosol optical depth for sulfate below the aircraft",
"AODB_BLKC":"Column aerosol optical depth for black carbon below the aircraft",
"AODB_ORGC":"Column aerosol optical depth for organic carbon below the aircraft",
"AODB_SALA":"Column aerosol optical depth for accumulation mode sea salt below the aircraft",
"AODB_SALC":"Column aerosol optical depth for coarse mode sea salt below the aircraft",
"AODB_DUST":"Column aerosol optical depth for dust below the aircraft",
"HG2_FRACG":"Fraction of Hg(II) in the gas phase", "HG2_FRACP":"Fraction of Hg(II) in the particle phase",
"ISOR_HPLUS":"ISORROPIA H+",
"ISOR_PH":"ISORROPIA pH (non-ideal system, so pH can be negative)",
"ISOR_AH2O":"ISORROPIA aerosol water",
"ISOR_HSO4":"ISORROPIA bifulfate",
"TIME_LT":"Local time",
"GMAO_ICE00": "the fraction of each grid box that has 0% to +10% of sea ice coverage",
"GMAO_ICE10" :"the fraction of each grid box that has 10% to +20% of sea ice coverage",
"GMAO_ICE20" : "the fraction of each grid box that has 20% to +30% of sea ice coverage",
"GMAO_ICE30" : "the fraction of each grid box that has 30% to +40% of sea ice coverage",
"GMAO_ICE40" : "the fraction of each grid box that has 40% to +50% of sea ice coverage",
"GMAO_ICE50" : "the fraction of each grid box that has 50% to +60% of sea ice coverage",
"GMAO_ICE60": "the fraction of each grid box that has 60% to +70% of sea ice coverage",
"GMAO_ICE70" : "the fraction of each grid box that has 70% to +80% of sea ice coverage",
"GMAO_ICE80": "the fraction of each grid box that has 80% to +90% of sea ice coverage",
"GMAO_ICE90": "the fraction of each grid box that has 90% to +100% of sea ice coverage",
#"GAMM_EPOX":"Uptake coefficient for EPOX", # don't currently work with flexchem
#"GAMM_IMAE":"Uptake coefficient for IMAE",
#"GAMM_ISOPN":"Uptake coefficient for ISOPN",
#"GAMM_DHDN":"Uptake coefficient for DHDN",
#"GAMM_GLYX":"Uptake coefficient for GLYX",
"AQAER_RAD":"Aqueous aerosol radius",
"AQAER_SURF":"Aqueous aerosol surface area"})
# Print off the optional diagnostics...
if print_diag_options is True:
k=[*diag_dict]
print('Diagnositic options for planeflight.dat are as follows:')
for i in range(0, len(k)):
v=diag_dict.get(k[i])
print("{:<15} {:<100}".format(k[i], v))
return diag_dict
def make_planeflightdat_files(outpath: str,
datetimes,
lat_arr,
lon_arr,
pres_arr= [],
alt_arr = [],
tracers: list = [],
input_file: str ='',
typestr: str= '',
username: str = 'user',
overwrite: bool = True,
drop_dupes: bool = False,
diags: list = ['all'],
diags_minus: list =[],
print_diag_options: bool = False):
"""Function to create planeflight.dat files in correct format for GEOS-Chem.
Args:
----
outpath: string of path to save the output planeflight.dat files at.
datetimes: Pandas series of datetimes where obs to be collected at.
lat_arr: Array of latitudes where observations take at (degrees N)
lon_arr: Array of longitudes where observations take at (degrees N)
# Planeflight can take either alt or pressure. Must specify one.
pres_arr: Array of pressures in hPa where observations take at
alt_arr: Array of altitudes where observations take at (meters)
# You can tell this function what tracers to sample with planeflight
# by either passing them as a list or by passing your input file.
# That option will use all advected species.Not passing either arg will
# make a file without any tracers.
input_file: String of path to your GEOS-Chem input file (to read in tracer names).
tracers: List of tracers you want to sample using planeflight.dat
Optional Arguements:
-------------------
typestr: String of the "campaign" or the "type" of aircraft obs collected on. This is printed in the file.
NOTE: TypeStr must NOT begin with "S" unless altitutes passed. Will cause GEOS-Chem erorrs.
username: String of user who created files. Optional. Arg in header of file.
overwrite: Boolean of whether to overwrite existing files at outpath with this name or not.
drop_dupes: Boolean of whether to drop duplicate rows
diags: List of additional diagnostics to include (beyond tracers). Default option is
to include ALL available additional diagnositcs.
diags_minus: List of diagnostics you don't want to include (e.g. if you use "all").
print_diag_options: Boolean of whether to print other available diagnostic options.
Output:
------
Files named planeflight.dat.YYYYMMDD written to outpath.
"""
if (len(pres_arr)==0) and (len(alt_arr)==0):
sys.exit('Please specify either an altitude or pressure array.')
if (len(pres_arr)>0) and (len(alt_arr)>0):
sys.exit('Please specify either an altitude or pressure array, not both.'+\
'Planeflight.dat converts altitutdes to pressures. ')
if len(typestr) > 7:
sys.exit('Please change the campaign string used as a type to be'+\
'less than 7 chars, which is the max allowed by GEOS-Chem.')
if typestr[0]=='S':
print('WARNING: GEOS-Chem will assume you are passing the model ' +\
'altitude values if you pass a typestr value that beings with ' +\
'the letter "S". If you are using pressures as input it is best '+\
'to pass as typestr that does not begin with "S" to avoid '+\
'the model errorniously converting interpreting your ' + \
'pressures as altitudes.')
input("Press Enter to continue or Cntrl+C to exit.")
if len(diags)>0: # If the has user asked to include specific diagnostics...
# Build the dictionary of optional diagnostics. Print if user asks.
diag_dict= _get_optional_diags(print_diag_options)
#If the user says to use all diagnostic quantities, then grab them from the dict.
if diags[0].lower() =='all':
optionals=[*diag_dict]
else: # otherwise just use their inputted list!
optionals=diags
if len(diags_minus):
optionals = [j for j in optionals if j not in diags_minus]
# Make list of all tracers from input file plus the optional diagnostic quantites you want
if input_file != '': #either by reading the input file
tracer_list= optionals+ ut._build_species_list_from_input(input_file)
else: # or by using the tracesr they gave you.
tracer_list= optionals+ tracers
ntracers=str(int(len(tracer_list))) # count the number of quantities.
# Designate a few variables that we'll use to make the header lines of the files
today= str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) # Time stamp when you made this file.
spacer='-------------------------------------------------------------------------------'
title = ' Now give the times and locations of the flight'
# Parse the passed dates, get list of indivudal dates we have obs for:
all_dates= datetimes.dt.date
unq_dates=np.unique( datetimes.dt.date)
# Loop over each unique day during campaign to make a planeflight.dat file for:
for i in range(0, len(unq_dates)):
# Create a filesname based on the date:
YYMMDD=unq_dates[i].strftime('%Y%m%d')
filename='Planeflight.dat.'+ YYMMDD
#==========================================================================
# Build arrays of all the data we need in Planeflight.dat for this date.
# Rount to decmial places allowed in GEOS-Chem input for these vars.
#==========================================================================
# Get indexes in larger array of flight obs that took place on this date.
inds = np.where(all_dates== unq_dates[i])[0]
points= np.arange(1, len(inds)+1).astype(str)# Values for column "Points"
obs= np.full(len(inds),9999.000) ## Observation value from the flight campaign
typez= np.full(len(inds),'{typ: >6}'.format(typ=typestr)) #type is allowed 7 chars.
day= datetimes.dt.strftime('%d-%m-%Y')[inds] # Day, month, and year (GMT date) for each flight track point
tms= datetimes.dt.strftime('%H:%M')[inds] # Hour and minute (GMT time) for each flight track point
lats=np.around( lat_arr[inds], decimals=2) # Latitude (-90 to 90 degrees) for each flight track point
lons= np.around( lon_arr[inds], decimals=2) # Longitude (-180 to 180 degrees) for each flight track point
# Decide what array to use as vertical coordinate
if len(pres_arr)> 0: # Pressure in hPa for each flight track point.
if i==0: print('Using PRESSURE, not altitude.')
pres= np.around( pres_arr[inds], decimals=2) #GC doesn't allow more than 2 decimals
vert_header='PRESS ' # String for header
vert_arr=pres # Set the vertical array to pressure
else:
if len(alt_arr)> 0: # Altitude in m for each flight track point.
if i==1: print('Using ALTITUDE, not pressure.')
alt = np.around(alt_arr[inds], decimals=2) #GC doesn't allow more than 2 decimals
vert_header='ALT ' # String for header
vert_arr=alt # Set the vertical array to altitude
# Pack all this into a dataframe, because pandas writes tab delimited files nicely!
df= pd.DataFrame({'POINT': points, 'TYPE': typez, 'DD-MM-YYYY': day,
'HH:MM':tms, 'LAT':lats, 'LON':lons, vert_header.strip(): vert_arr,
'OBS':obs})
# Append a line at the bottom of the DF that says its the end!
df= df.append({'POINT': 99999, 'TYPE':'{typ: >6}'.format(typ='END'), 'DD-MM-YYYY': '00-00-0000',
'HH:MM': '00:00', 'LAT':0.00, 'LON':0.00, vert_header.strip(): 0.00,
'OBS':0.000}, ignore_index=True)
#==========================================================================
# Preform Checks on Data used in this file. Check for out of bounds, dupes, Nans.
#==========================================================================
# Check to make sure passed values are valid/ in range of correct units.
if any(abs(lats)> 90):
sys.exit('| Latitudes| are > 90.')
if any(abs(lons)> 180):
sys.exit('|Longitudes| are > 180 ')
if len(alt_arr)> 0:
if (any(alt< 0)) or (all(alt< 15)):
sys.exit('Altitudes are either < 0 or in Kilometers, not meters.')
if len(pres_arr)> 0:
if (any(pres< 0)) or (any(pres >1100)): # Check that
sys.exit('|Pressures| are < 0 or > 1100 hPa. Check units.')
# Check for & drop any rows that have NaNs/ Inf values/ tell people about them!
pd.options.mode.use_inf_as_na = True # Look fro Inf values too.
if df.isna().values.any() == True:
print('WARNING: Found NaNs in data. Dropping the following rows from the data set:')
print(df[df.isna().any(axis=1)])
df.dropna(axis=0, how='any', inplace=True)
# Check for any duplicate rows that drop them/ tell people.
if df.duplicated().values.any() == True:
print('WARNING: Found duplicate rows in data.'+
'If you wish to drop them please set keyword drop_dupes=True.')
print(df[df.duplicated()])
if drop_dupes== True:
df=df.drop_duplicates(ignore_index=True)
#==========================================================================
# Format the numerical strings so that they're in GEOS-Chem's expected format!
#==========================================================================
df.POINT=df.POINT.map('{: >5}'.format) # poitns are len 5 char strings only
df.LAT=df.LAT.map('{:7.2f}'.format) # Got these vals used for each var from
df.LON=df.LON.map('{:7.2f}'.format) # planeflight_mod.f90 i/o checks.
df.OBS=df.OBS.map('{:10.3f}'.format)
if len(pres_arr)> 0:
df.PRESS=df.PRESS.map('{:7.2f}'.format)
elif len(alt_arr)> 0:
df.ALT=df.ALT.map('{:7.2f}'.format)
if overwrite==True: #Delete existing file with this name. Otherwise it appends...
if os.path.isfile(outpath+filename):
os.remove(outpath+filename)
# Write the header lines that GEOS Chem expects to the file, considering format.
header= '{strr: >6}'.format(strr='POINT ')+ \
'{strr: >7}'.format(strr='TYPE ')+ \
'{strr: >11}'.format(strr='DD-MM-YYYY ')+ \
'{strr: >6}'.format(strr='HH:MM ')+\
'{strr: >8}'.format(strr='LAT ')+\
'{strr: >8}'.format(strr='LON ')+\
'{strr: >8}'.format(strr=vert_header) +\
'{strr: >10}'.format(strr='OBS')
#======================================================================
# Begin writing the planeflight.dat text file
#=====================================================================
# This is just a list containing our header lines in the right order...
textList = [filename, username, today, spacer, ntracers, spacer] + \
tracer_list + [spacer, title, spacer, header]
# Open the output file and write headers line by line.
outF = open(outpath+filename, "w")
for line in textList:
outF.write(line)
outF.write("\n")
# Write the lat/lon/time/ pres data a temp file, using ASCII encoding!
# Pandas default in Python 3 uses UTF-8 encoding, which GEOS-Chem can't read.
df.to_csv(outpath+filename+'_0', header=False, index=None, sep=' ', mode='a',
encoding='ascii')
# Annoyingly if we use a space as a delimiter, we get werid quotation m
# marks around strings, soooo then we'll open the temp file, read line
# by line, and take out the quotation marks, and write that to the
# actual output file.
fin = open(outpath+filename+'_0', 'r')
Lines = fin.readlines()
for line in Lines:
outF.write(line.replace('"','' ))
outF.close() # Close the output file.
fin.close() # Close the tempororay file
os.remove(outpath+filename+'_0') # And delete the temp file.
print('Output saved at: '+ outpath + filename) # tell where output is saved.
return
def read_planelog(filename: str):
"""Function to read a single planelog ouput files into a pandas dataframe."""
try:
# If the header isn't too long it can be read in like this
# (e.g. if you're not saving too many things from GEOS-Chem!)
df= asc.read(filename, delimiter="\s", guess=False).to_pandas()
except:
# Otherwise we need to read "every other line" because the header
# is super weird and splits the data like this. We'll read line
# by line, write the odd lines to a file, even lines to a file,
# read those in, and then contantenate them, and delete temp files.
out1 = open(filename+'_pt1', "w") # File that will jsut contan odd lines
out2 = open(filename+'_pt2', "w") # File that will just contain even lines
count=0
fin = open(filename, 'r') # open original file
Lines = fin.readlines()
for line in Lines: # read line by line
if (count% 2) == 0:
out1.write(line) # write evens
else:
out2.write(line) # write odds
count=count+1
fin.close() # Close all files.
out1.close()
out2.close()
# Read in columns from odd lines and columns from even liens
df1 = asc.read(filename+'_pt1', delimiter="\s", guess=False).to_pandas()
df2 = asc.read(filename+'_pt2', delimiter="\s", guess=False).to_pandas()
# Concantenate into a single dataframe
df = pd.concat([df1, df2], axis=1)
# Replace NaNs
df= df.replace(-1000, np.NaN)
# Convert YMDHM to a pandas datetime object
df['time']=pd.to_datetime(df.YYYYMMDD.astype(str) +df.HHMM.astype(str), format='%Y%m%d%H%M')
# Delete the temp files with even/odd lines of dat
os.remove(filename+'_pt1')
os.remove(filename+'_pt2')
return df
def planelog_read_and_concat(path_to_dir: str, outdir: str = None,
outfile: str = None):
"""
Concatonate output plane.log files into a single file from a directory.
# If you only want to open a single file, try read_planelog().
Args
----
path_to_dir = String with filepath to folder containing GEOS Chem output
plane.log files
outdir(optional) = # String path to where concatonated file will be saved
outfilename (optional) = string name of output file, no extension needed.
"""
# If outdir not set, then set it to the same as the input file path.
outdir = path_to_dir if outdir is None else outdir
# If outfilename not set, then set it to be concat_ObsPack.nc
outfile = 'planelog_concat' if outfile is None else outfile
# Look for all the planelog files in the directory given
file_list = ut._find_files_in_dir(path_to_dir, ['plane.log'])
for i in range(0, len(file_list)): # Loop over files, open each.
df_i= read_planelog(file_list[i])
df_i['flight']= np.full(len(df_i['time']), i+1) # Label with flight #.
if i==0 : # Begin concatonating all flights:
df_all= df_i
else:
# For all subsequent loops append the new df UNDER the old df
df_all = pd.concat([df_all, df_i], ignore_index=True)
df_all.to_pickle(outdir+outfile) # Save the concatonated data
print('Concatenated planelog data saved at: '+ outdir + outfile )
return df_all
```
|
{
"source": "jdhask/gcpy",
"score": 3
}
|
#### File: gcpy/gcpy/regrid.py
```python
import os
import xesmf as xe
from .grid import make_grid_LL, make_grid_CS, make_grid_SG, get_input_res, call_make_grid, \
get_grid_extents, get_vert_grid
import hashlib
import numpy as np
import xarray as xr
import pandas as pd
import scipy.sparse
import warnings
def make_regridder_L2L(
llres_in, llres_out, weightsdir='.', reuse_weights=False,
in_extent=[-180, 180, -90, 90],
out_extent=[-180, 180, -90, 90]):
"""
Create an xESMF regridder between two lat/lon grids
Args:
llres_in: str
Resolution of input grid in format 'latxlon', e.g. '4x5'
llres_out: str
Resolution of output grid in format 'latxlon', e.g. '4x5'
Keyword Args (optional):
weightsdir: str
Directory in which to create xESMF regridder NetCDF files
Default value: '.'
reuse_weights: bool
Set this flag to True to reuse existing xESMF regridder NetCDF files
Default value: False
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of input grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Desired minimum and maximum latitude and longitude of output grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
Returns:
regridder: xESMF regridder
regridder object between the two specified grids
"""
llgrid_in = make_grid_LL(llres_in, in_extent, out_extent)
llgrid_out = make_grid_LL(llres_out, out_extent)
if in_extent == [-180, 180, -90,
90] and out_extent == [-180, 180, -90, 90]:
weightsfile = os.path.join(
weightsdir, 'conservative_{}_{}.nc'.format(
llres_in, llres_out))
else:
in_extent_str = str(in_extent).replace(
'[', '').replace(
']', '').replace(
', ', 'x')
out_extent_str = str(out_extent).replace(
'[', '').replace(
']', '').replace(
', ', 'x')
weightsfile = os.path.join(
weightsdir, 'conservative_{}_{}_{}_{}.nc'.format(
llres_in, llres_out, in_extent_str, out_extent_str))
if not os.path.isfile(weightsfile) and reuse_weights:
#prevent error with more recent versions of xesmf
reuse_weights=False
try:
regridder = xe.Regridder(
llgrid_in,
llgrid_out,
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
except BaseException:
regridder = xe.Regridder(
llgrid_in,
llgrid_out,
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
return regridder
def make_regridder_C2L(csres_in, llres_out, weightsdir='.',
reuse_weights=True, sg_params=[1, 170, -90]):
"""
Create an xESMF regridder from a cubed-sphere to lat/lon grid
Args:
csres_in: int
Cubed-sphere resolution of input grid
llres_out: str
Resolution of output grid in format 'latxlon', e.g. '4x5'
Keyword Args (optional):
weightsdir: str
Directory in which to create xESMF regridder NetCDF files
Default value: '.'
reuse_weights: bool
Set this flag to True to reuse existing xESMF regridder NetCDF files
Default value: False
sg_params: list[float, float, float] (stretch_factor, target_longitude, target_latitude)
Input grid stretched-grid parameters in the format
[stretch_factor, target_longitude, target_latitude].
Will trigger stretched-grid creation if not default values.
Default value: [1, 170, -90] (no stretching)
Returns:
regridder_list: list[6 xESMF regridders]
list of regridder objects (one per cubed-sphere face) between the two specified grids
"""
[sf_in, tlon_in, tlat_in] = sg_params
if sg_params == [1, 170, -90]:
_, csgrid_list = make_grid_CS(csres_in)
else:
_, csgrid_list = make_grid_SG(
csres_in, stretch_factor=sg_params[0],
target_lon=sg_params[1],
target_lat=sg_params[2])
llgrid = make_grid_LL(llres_out)
regridder_list = []
for i in range(6):
if sg_params == [1, 170, -90]:
weightsfile = os.path.join(
weightsdir, 'conservative_c{}_{}_{}.nc'.format(
str(csres_in), llres_out, str(i)))
else:
weights_fname = f'conservative_sg{sg_hash(csres_in, sf_in, tlat_in, tlon_in)}_ll{llres_out}_F{i}.nc'
weightsfile = os.path.join(weightsdir, weights_fname)
if not os.path.isfile(weightsfile) and reuse_weights:
#prevent error with more recent versions of xesmf
reuse_weights=False
try:
regridder = xe.Regridder(
csgrid_list[i],
llgrid,
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
except BaseException:
regridder = xe.Regridder(
csgrid_list[i],
llgrid,
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
regridder_list.append(regridder)
return regridder_list
def make_regridder_S2S(
csres_in,
csres_out,
sf_in=1,
tlon_in=170,
tlat_in=-90,
sf_out=1,
tlon_out=170,
tlat_out=-90,
weightsdir='.',
verbose=True):
"""
Create an xESMF regridder from a cubed-sphere / stretched-grid grid
to another cubed-sphere / stretched-grid grid.
Stretched-grid params of 1, 170, -90 indicate no stretching.
Args:
csres_in: int
Cubed-sphere resolution of input grid
csres_out: int
Cubed-sphere resolution of output grid
Keyword Args (optional):
sf_in: float
Stretched-grid factor of input grid
Default value: 1
tlon_in: float
Target longitude for stretching in input grid
Default value: 170
tlat_in: float
Target longitude for stretching in input grid
Default value: -90
sf_out: float
Stretched-grid factor of output grid
Default value: 1
tlon_out: float
Target longitude for stretchingg in output grid
Default value: 170
tlat_out: float
Target longitude for stretching in output grid
Default value: -90
weightsdir: str
Directory in which to create xESMF regridder NetCDF files
Default value: '.'
verbose: bool
Set this flag to True to enable printing when output faces do not
intersect input faces when regridding
Default value: True
Returns:
regridder_list: list[6 xESMF regridders]
list of regridder objects (one per cubed-sphere face) between the two specified grids
"""
_, igrid_list = make_grid_SG(
csres_in, stretch_factor=sf_in, target_lat=tlat_in, target_lon=tlon_in)
_, ogrid_list = make_grid_SG(
csres_out, stretch_factor=sf_out, target_lat=tlat_out,
target_lon=tlon_out)
regridder_list = []
for o_face in range(6):
regridder_list.append({})
for i_face in range(6):
weights_fname = f'conservative_sg{sg_hash(csres_in, sf_in, tlat_in, tlon_in)}_F{i_face}_sg{sg_hash(csres_out, sf_out, tlat_out, tlon_out)}_F{o_face}.nc'
weightsfile = os.path.join(weightsdir, weights_fname)
reuse_weights = os.path.exists(weightsfile)
if not os.path.isfile(weightsfile) and reuse_weights:
#prevent error with more recent versions of xesmf
reuse_weights=False
try:
regridder = xe.Regridder(igrid_list[i_face],
ogrid_list[o_face],
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
regridder_list[-1][i_face] = regridder
except ValueError:
if verbose:
print(f"iface {i_face} doesn't intersect oface {o_face}")
return regridder_list
def make_regridder_L2S(llres_in, csres_out, weightsdir='.',
reuse_weights=True, sg_params=[1, 170, -90]):
"""
Create an xESMF regridder from a lat/lon to a cubed-sphere grid
Args:
llres_in: str
Resolution of input grid in format 'latxlon', e.g. '4x5'
csres_out: int
Cubed-sphere resolution of output grid
Keyword Args (optional):
weightsdir: str
Directory in which to create xESMF regridder NetCDF files
Default value: '.'
reuse_weights: bool
Set this flag to True to reuse existing xESMF regridder NetCDF files
Default value: False
sg_params: list[float, float, float] (stretch_factor, target_longitude, target_latitude)
Output grid stretched-grid parameters in the format
[stretch_factor, target_longitude, target_latitude].
Will trigger stretched-grid creation if not default values.
Default value: [1, 170, -90] (no stretching)
Returns:
regridder_list: list[6 xESMF regridders]
list of regridder objects (one per cubed-sphere face) between the two specified grids
"""
llgrid = make_grid_LL(llres_in)
if sg_params == [1, 170, -90]:
_, csgrid_list = make_grid_CS(csres_out)
else:
_, csgrid_list = make_grid_SG(
csres_out, stretch_factor=sg_params[0],
target_lon=sg_params[1],
target_lat=sg_params[2])
regridder_list = []
for i in range(6):
if sg_params == [1, 170, -90]:
weightsfile = os.path.join(
weightsdir, 'conservative_{}_c{}_{}.nc'.format(
llres_in, str(csres_out), str(i)))
else:
weights_fname = f'conservative_ll{llres_in}_sg{sg_hash(csres_out, *sg_params)}_F{i}.nc'
weightsfile = os.path.join(weightsdir, weights_fname)
if not os.path.isfile(weightsfile) and reuse_weights:
#prevent error with more recent versions of xesmf
reuse_weights=False
try:
regridder = xe.Regridder(
llgrid,
csgrid_list[i],
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
except BaseException:
regridder = xe.Regridder(
llgrid,
csgrid_list[i],
method='conservative',
filename=weightsfile,
reuse_weights=reuse_weights)
regridder_list.append(regridder)
return regridder_list
def create_regridders(
refds, devds, weightsdir='.', reuse_weights=True, cmpres=None,
zm=False, sg_ref_params=[1, 170, -90],
sg_dev_params=[1, 170, -90]):
"""
Internal function used for creating regridders between two datasets.
Follows decision logic needed for plotting functions.
Originally code from compare_single_level and compare_zonal_mean.
Args:
refds: xarray Dataset
Input dataset
devds: xarray Dataset
Output dataset
Keyword Args (optional):
weightsdir: str
Directory in which to create xESMF regridder NetCDF files
Default value: '.'
reuse_weights: bool
Set this flag to True to reuse existing xESMF regridder NetCDF files
Default value: False
cmpres: int or str
Specific target resolution for comparison grid used in difference and ratio plots
Default value: None (will follow logic chain below)
zm: bool
Set this flag to True if regridders will be used in zonal mean plotting
Default value: False
sg_ref_params: list[float, float, float] (stretch_factor, target_longitude, target_latitude)
Ref grid stretched-grid parameters in the format
[stretch_factor, target_longitude, target_latitude].
Default value: [1, 170, -90] (no stretching)
sg_dev_params: list[float, float, float] (stretch_factor, target_longitude, target_latitude)
Dev grid stretched-grid parameters in the format
[stretch_factor, target_longitude, target_latitude].
Default value: [1, 170, -90] (no stretching)
Returns:
list of many different quantities needed for regridding in plotting functions
refres, devres, cmpres: bool
Resolution of a dataset grid
refgridtype, devgridtype, cmpgridtype: str
Gridtype of a dataset ('ll' or 'cs')
regridref, regriddev, regridany: bool
Whether to regrid a dataset
refgrid, devgrid, cmpgrid: dict
Grid definition of a dataset
refregridder, devregridder: xESMF regridder
Regridder object between refgrid or devgrid and cmpgrid
(will be None if input grid is not lat/lon)
refregridder_list, devregridder_list: list[6 xESMF regridders]
List of regridder objects for each face between refgrid or devgrid and cmpgrid
(will be None if input grid is not cubed-sphere)
"""
# Take two lat/lon or cubed-sphere xarray datasets and regrid them if
# needed
refres, refgridtype = get_input_res(refds)
devres, devgridtype = get_input_res(devds)
refminlon, refmaxlon, refminlat, refmaxlat = get_grid_extents(refds)
devminlon, devmaxlon, devminlat, devmaxlat = get_grid_extents(devds)
# choose smallest overlapping area for comparison
cmpminlon = max(x for x in [refminlon, devminlon] if x is not None)
cmpmaxlon = min(x for x in [refmaxlon, devmaxlon] if x is not None)
cmpminlat = max(x for x in [refminlat, devminlat] if x is not None)
cmpmaxlat = min(x for x in [refmaxlat, devmaxlat] if x is not None)
ref_extent = [refminlon, refmaxlon, refminlat, refmaxlat]
cmp_extent = [cmpminlon, cmpmaxlon, cmpminlat, cmpmaxlat]
dev_extent = [devminlon, devmaxlon, devminlat, devmaxlat]
# ==================================================================
# Determine comparison grid resolution and type (if not passed)
# ==================================================================
# If no cmpres is passed then choose highest resolution between ref and dev.
# If one dataset is lat-lon and the other is cubed-sphere, and no comparison
# grid resolution is passed, then default to 1x1.25. If both cubed-sphere and
# plotting zonal mean, over-ride to be 1x1.25 lat-lon with a warning
sg_cmp_params = [1, 170, -90]
if cmpres is None:
if refres == devres and refgridtype == "ll":
cmpres = refres
cmpgridtype = refgridtype
elif refgridtype == "ll" and devgridtype == "ll":
cmpres = min([refres, devres])
cmpgridtype = refgridtype
elif refgridtype == "cs" and devgridtype == "cs":
if zm:
print(
"Warning: zonal mean comparison must be lat-lon. Defaulting to 1x1.25")
cmpres = '1x1.25'
cmpgridtype = "ll"
elif sg_ref_params != [] or sg_dev_params != []:
# pick ref grid when a stretched-grid and non-stretched-grid
# are passed
cmpres = refres
cmpgridtype = "cs"
sg_cmp_params = sg_ref_params
else:
# pick higher resolution CS grid out of two standard
# cubed-sphere grids
cmpres = max([refres, devres])
cmpgridtype = "cs"
elif refgridtype == "ll" and float(refres.split('x')[0]) < 1 and float(refres.split('x')[1]) < 1.25:
cmpres = refres
cmpgridtype = "ll"
elif devgridtype == "ll" and float(devres.split('x')[0]) < 1 and float(devres.split('x')[1]) < 1.25:
cmpres = devres
cmpgridtype = "ll"
else:
# default to 1x1.25 lat/lon grid for mixed CS and LL grids
cmpres = "1x1.25"
cmpgridtype = "ll"
elif "x" in cmpres:
cmpgridtype = "ll"
elif zm:
print("Warning: zonal mean comparison must be lat-lon. Defaulting to 1x1.25")
cmpres = '1x1.25'
cmpgridtype = "ll"
else:
# cubed-sphere cmpres
if isinstance(cmpres, list):
# stretched-grid resolution
# first element is cubed-sphere resolution, rest are sg params
sg_cmp_params = cmpres[1:]
cmpres = int(cmpres[0])
else:
cmpres = int(cmpres) # must cast to integer for cubed-sphere
cmpgridtype = "cs"
# Determine what, if any, need regridding.
regridref = refres != cmpres or sg_ref_params != sg_cmp_params
regriddev = devres != cmpres or sg_dev_params != sg_cmp_params
regridany = regridref or regriddev
# ==================================================================
# Make grids (ref, dev, and comparison)
# ==================================================================
[refgrid, _] = call_make_grid(
refres, refgridtype, ref_extent, cmp_extent, sg_ref_params)
[devgrid, _] = call_make_grid(
devres, devgridtype, dev_extent, cmp_extent, sg_dev_params)
[cmpgrid, _] = call_make_grid(
cmpres, cmpgridtype, cmp_extent, cmp_extent, sg_cmp_params)
# =================================================================
# Make regridders, if applicable
# =================================================================
refregridder = None
refregridder_list = None
devregridder = None
devregridder_list = None
if regridref:
if refgridtype == "ll":
if cmpgridtype == "cs":
refregridder_list = make_regridder_L2S(
refres,
cmpres,
weightsdir=weightsdir,
reuse_weights=reuse_weights,
sg_params=sg_cmp_params)
else:
refregridder = make_regridder_L2L(
refres,
cmpres,
weightsdir=weightsdir,
reuse_weights=reuse_weights,
in_extent=ref_extent,
out_extent=cmp_extent)
else:
if cmpgridtype == "cs":
refregridder_list = make_regridder_S2S(
refres,
cmpres,
*sg_ref_params,
*sg_cmp_params,
weightsdir=weightsdir,
verbose=False)
else:
refregridder_list = make_regridder_C2L(
refres,
cmpres,
weightsdir=weightsdir,
reuse_weights=reuse_weights,
sg_params=sg_ref_params)
if regriddev:
if devgridtype == "ll":
if cmpgridtype == "cs":
devregridder_list = make_regridder_L2S(
devres,
cmpres,
weightsdir=weightsdir,
reuse_weights=reuse_weights,
sg_params=sg_cmp_params)
else:
devregridder = make_regridder_L2L(
devres,
cmpres,
weightsdir=weightsdir,
reuse_weights=reuse_weights,
in_extent=dev_extent,
out_extent=cmp_extent)
else:
if cmpgridtype == "cs":
devregridder_list = make_regridder_S2S(
devres,
cmpres,
*sg_dev_params,
*sg_cmp_params,
weightsdir=weightsdir,
verbose=False)
else:
devregridder_list = make_regridder_C2L(
devres,
cmpres,
weightsdir=weightsdir,
reuse_weights=reuse_weights,
sg_params=sg_dev_params)
return [
refres,
refgridtype,
devres,
devgridtype,
cmpres,
cmpgridtype,
regridref,
regriddev,
regridany,
refgrid,
devgrid,
cmpgrid,
refregridder,
devregridder,
refregridder_list,
devregridder_list]
def regrid_comparison_data(
data,
res,
regrid,
regridder,
regridder_list,
global_cmp_grid,
gridtype,
cmpgridtype,
cmpminlat_ind=0,
cmpmaxlat_ind=-2,
cmpminlon_ind=0,
cmpmaxlon_ind=-2,
nlev=1):
"""
Regrid comparison datasets to cubed-sphere (including stretched-grid) or lat/lon format.
Args:
data: xarray DataArray
DataArray containing a GEOS-Chem data variable
res: int
Cubed-sphere resolution for comparison grid
regrid: bool
Set to true to regrid dataset
regridder: xESMF regridder
Regridder between the original data grid and the comparison grid
regridder_list: list(xESMF regridder)
List of regridders for cubed-sphere data
global_cmp_grid: xarray DataArray
Comparison grid
gridtype: str
Type of input data grid (either 'll' or 'cs')
cmpgridtype: str
Type of input data grid (either 'll' or 'cs')
Keyword Args (optional):
cmpminlat_ind: int
Index of minimum latitude extent for comparison grid
Default value: 0
cmpmaxlat_ind: int
Index (minus 1) of maximum latitude extent for comparison grid
Default value: -2
cmpminlon_ind: int
Index of minimum longitude extent for comparison grid
Default value: 0
cmpmaxlon_ind: int
Index (minus 1) of maximum longitude extent for comparison grid
Default value: -2
nlev: int
Number of levels of input grid and comparison grid
Default value: 1
Returns:
data: xarray DataArray
Original DataArray regridded to comparison grid (including resolution and extent changes)
"""
if regrid:
if gridtype == "ll":
if cmpgridtype == "ll":
# regrid ll to ll
return regridder(data)
elif cmpgridtype == "cs":
# ll to CS
new_data = np.zeros([nlev, 6, res, res]).squeeze()
for j in range(6):
new_data[j, ...] = regridder_list[j](data)
return new_data
elif cmpgridtype == "ll":
# CS to ll
if nlev == 1:
new_data = np.zeros([global_cmp_grid['lat'].size,
global_cmp_grid['lon'].size])
data_reshaped = data.data.reshape(6, res, res)
else:
new_data = np.zeros([nlev, global_cmp_grid['lat'].size,
global_cmp_grid['lon'].size])
data_reshaped = data.data.reshape(
nlev, 6, res, res).swapaxes(0, 1)
for j in range(6):
regridder = regridder_list[j]
new_data += regridder(data_reshaped[j])
if nlev == 1:
# limit to extent of cmpgrid
return new_data[cmpminlat_ind:cmpmaxlat_ind +
1, cmpminlon_ind:cmpmaxlon_ind + 1].squeeze()
else:
return new_data
elif cmpgridtype == "cs":
# CS to CS
# Reformat dimensions to T, Z, F, Y, X
if 'Xdim' in data.dims:
data_format = 'diagnostic'
else:
data_format = 'checkpoint'
new_data = reformat_dims(
data, format=data_format, towards_common=True)
# Transpose to T, Z, F, Y, X
if len(new_data.dims) == 5:
new_data = new_data.transpose('T', 'Z', 'F', 'Y', 'X')
elif len(new_data.dims) == 4:
# no time
new_data = new_data.transpose('Z', 'F', 'Y', 'X')
elif len(new_data.dims) == 3:
# no time or vertical
new_data = new_data.transpose('F', 'Y', 'X')
# For each output face, sum regridded input faces
oface_datasets = []
for oface in range(6):
oface_regridded = []
for iface, regridder in regridder_list[oface].items():
ds_iface = new_data.isel(F=iface)
if 'F' in ds_iface.coords:
ds_iface = ds_iface.drop('F')
oface_regridded.append(
regridder(ds_iface, keep_attrs=True))
oface_regridded = xr.concat(
oface_regridded, dim='intersecting_ifaces').sum(
'intersecting_ifaces', keep_attrs=True)
oface_datasets.append(oface_regridded)
new_data = xr.concat(oface_datasets, dim='F')
new_data = new_data.rename({
'y': 'Y',
'x': 'X',
})
# lat, lon are from xESMF which we don't want
new_data = new_data.drop(['lat', 'lon'])
# reformat dimensions to previous format
new_data = reformat_dims(
new_data, format=data_format, towards_common=False)
return new_data
else:
return data
def reformat_dims(ds, format, towards_common):
"""
Reformat dimensions of a cubed-sphere / stretched-grid grid between different GCHP formats
Args:
ds: xarray Dataset
Dataset to be reformatted
format: str
Format from or to which to reformat ('checkpoint' or 'diagnostic')
towards_common: bool
Set this flag to True to move towards a common dimension format
Returns:
ds: xarray Dataset
Original dataset with reformatted dimensions
"""
def unravel_checkpoint_lat(ds_in):
if isinstance(ds_in, xr.Dataset):
cs_res = ds_in.dims['lon']
assert cs_res == ds_in.dims['lat'] // 6
else:
cs_res = ds_in['lon'].size
assert cs_res == ds_in['lat'].size // 6
mi = pd.MultiIndex.from_product([
np.linspace(1, 6, 6),
np.linspace(1, cs_res, cs_res)
])
ds_in = ds_in.assign_coords({'lat': mi})
ds_in = ds_in.unstack('lat')
return ds_in
def ravel_checkpoint_lat(ds_out):
if isinstance(ds, xr.Dataset):
cs_res = ds_out.dims['lon']
else:
cs_res = ds_out['lon'].size
ds_out = ds_out.stack(lat=['lat_level_0', 'lat_level_1'])
ds_out = ds_out.assign_coords({
'lat': np.linspace(1, 6 * cs_res, 6 * cs_res)
})
return ds_out
dim_formats = {
'checkpoint': {
'unravel': [unravel_checkpoint_lat],
'ravel': [ravel_checkpoint_lat],
'rename': {
'lon': 'X',
'lat_level_0': 'F',
'lat_level_1': 'Y',
'time': 'T',
'lev': 'Z',
},
'transpose': ('time', 'lev', 'lat', 'lon')
},
'diagnostic': {
'rename': {
'nf': 'F',
'lev': 'Z',
'Xdim': 'X',
'Ydim': 'Y',
'time': 'T',
},
'transpose': ('time', 'lev', 'nf', 'Ydim', 'Xdim')
}
}
if towards_common:
# Unravel dimensions
for unravel_callback in dim_formats[format].get('unravel', []):
ds = unravel_callback(ds)
# Rename dimensions
ds = ds.rename(dim_formats[format].get('rename', {}))
return ds
else:
# Reverse rename
ds = ds.rename(
{v: k for k, v in dim_formats[format].get('rename', {}).items()})
# Ravel dimensions
for ravel_callback in dim_formats[format].get('ravel', []):
ds = ravel_callback(ds)
# Transpose
if len(ds.dims) == 5 or (len(ds.dims) == 4 and 'lev' in list(
ds.dims) and 'time' in list(ds.dims)):
# full dim dataset
ds = ds.transpose(*dim_formats[format].get('transpose', []))
elif len(ds.dims) == 4:
# single time
ds = ds.transpose(*dim_formats[format].get('transpose', [])[1:])
elif len(ds.dims) == 3:
# single level / time
ds = ds.transpose(*dim_formats[format].get('transpose', [])[2:])
return ds
def sg_hash(
cs_res,
stretch_factor: float,
target_lat: float,
target_lon: float):
return hashlib.sha1(
'cs={cs_res},sf={stretch_factor:.5f},tx={target_lon:.5f},ty={target_lat:.5f}'.format(
stretch_factor=stretch_factor,
target_lat=target_lat,
target_lon=target_lon,
cs_res=cs_res).encode()).hexdigest()[
:7]
def regrid_vertical_datasets(ref, dev, target_grid_choice='ref', ref_vert_params=[[],[]],
dev_vert_params=[[],[]], target_vert_params=[[],[]]):
"""
Perform complete vertical regridding of GEOS-Chem datasets to
the vertical grid of one of the datasets or an entirely different
vertical grid.
Args:
ref: xarray.Dataset
First dataset
dev: xarray.Dataset
Second dataset
target_grid_choice (optional): str
Will regrid to the chosen dataset among the two datasets
unless target_vert_params is provided
Default value: 'ref'
ref_vert_params (optional): list(list, list) of list-like types
Hybrid grid parameter A in hPa and B (unitless) in [AP, BP] format.
Needed if ref grid is not 47 or 72 levels
Default value: [[], []]
dev_vert_params (optional): list(list, list) of list-like types
Hybrid grid parameter A in hPa and B (unitless) in [AP, BP] format.
Needed if dev grid is not 47 or 72 levels
Default value: [[], []]
target_vert_params (optional): list(list, list) of list-like types
Hybrid grid parameter A in hPa and B (unitless) in [AP, BP] format.
Will override target_grid_choice as target grid
Default value: [[], []]
Returns:
new_ref: xarray.Dataset
First dataset, possibly regridded to a new vertical grid
new_dev: xarray.Dataset
Second dataset, possibly regridded to a new vertical grid
"""
# Get mid-point pressure and edge pressures for this grid
ref_pedge, ref_pmid, _ = get_vert_grid(ref, *ref_vert_params)
dev_pedge, dev_pmid, _ = get_vert_grid(dev, *dev_vert_params)
new_ref, new_dev = ref, dev
if len(ref_pedge) != len(dev_pedge) or target_vert_params != [[],[]]:
if target_vert_params != [[],[]]:
#use a specific target grid for regridding if passed
target_grid = vert_grid(*target_vert_params)
target_pedge, target_pmid = target_grid.p_edge(), target_grid.p_mid()
elif target_grid_choice == 'ref':
target_pedge, target_pmid = ref_pedge, ref_pmid
else:
target_pedge, target_pmid = dev_pedge, dev_pmid
def regrid_one_vertical_dataset(ds, ds_pedge, target_pedge, target_pmid):
new_ds = ds
if len(ds_pedge) != len(target_pedge):
#regrid all 3D (plus possible time dimension) variables
xmat_ds = gen_xmat(ds_pedge, target_pedge)
regrid_variables = [v for v in ds.data_vars if (("lat" in ds[v].dims or "Xdim" in ds[v].dims)
and ("lon" in ds[v].dims or "Ydim" in ds[v].dims)
and ("lev" in ds[v].dims))]
new_ds = xr.Dataset()
#currently drop data vars that have lev but don't also have x and y coordinates
for v in (set(ds.data_vars)-set(regrid_variables)):
if 'lev' not in ds[v].dims:
new_ds[v] = ds[v]
new_ds.attrs = ds.attrs
for v in regrid_variables:
if "time" in ds[v].dims:
new_ds_temp = []
for time in range(len(ds[v].time)):
new_ds_v = regrid_vertical(ds[v].isel(time=time), xmat_ds, target_pmid)
new_ds_temp.append(new_ds_v.expand_dims("time"))
new_ds[v] = xr.concat(new_ds_temp, "time")
else:
new_ds[v] = regrid_vertical(ds[v], xmat, target_pmid)
return new_ds
new_ref = regrid_one_vertical_dataset(ref, ref_pedge, target_pedge, target_pmid)
new_dev = regrid_one_vertical_dataset(dev, dev_pedge, target_pedge, target_pmid)
return new_ref, new_dev
def regrid_vertical(src_data_3D, xmat_regrid, target_levs=[]):
"""
Performs vertical regridding using a sparse regridding matrix
This function was originally written by <NAME> and included
in package gcgridobj: https://github.com/sdeastham/gcgridobj
Args:
src_data_3D: xarray DataArray or numpy array
Data to be regridded
xmat_regrid: sparse scipy coordinate matrix
Regridding matrix from input data grid to target grid
target_levs (optional): list
Values for Z coordinate of returned data (if returned data is of type xr.DataArray)
Default value: []
Returns:
out_data: xarray DataArray or numpy array
Data regridded to target grid
"""
# Assumes that the FIRST dimension of the input data is vertical
nlev_in = src_data_3D.shape[0]
if xmat_regrid.shape[1] == nlev_in:
# Current regridding matrix is for the reverse regrid
# Rescale matrix to get the contributions right
# Warning: this assumes that the same vertical range is covered
warnings.warn(
'Using inverted regridding matrix. This may cause incorrect extrapolation')
xmat_renorm = xmat_regrid.transpose().toarray()
for ilev in range(xmat_renorm.shape[1]):
norm_fac = np.sum(xmat_renorm[:, ilev])
if np.abs(norm_fac) < 1.0e-20:
norm_fac = 1.0
xmat_renorm[:, ilev] /= norm_fac
xmat_renorm = scipy.sparse.coo_matrix(xmat_renorm)
elif xmat_regrid.shape[0] == nlev_in:
# Matrix correctly dimensioned
xmat_renorm = xmat_regrid.copy()
else:
print(src_data_3D, xmat_regrid.shape)
raise ValueError('Regridding matrix not correctly sized')
nlev_out = xmat_renorm.shape[1]
out_shape = [nlev_out] + list(src_data_3D.shape[1:])
n_other = np.product(src_data_3D.shape[1:])
temp_data = np.zeros((nlev_out, n_other))
in_data = np.reshape(np.array(src_data_3D), (nlev_in, n_other))
for ix in range(n_other):
in_data_vec = np.matrix(in_data[:, ix])
temp_data[:, ix] = in_data_vec * xmat_renorm
out_data = np.reshape(temp_data, out_shape)
# Transfer over old / create new coordinates for xarray DataArrays
if isinstance(src_data_3D, xr.DataArray):
new_coords = {
coord: src_data_3D.coords[coord].data
for coord in src_data_3D.coords if coord != 'lev'}
if target_levs == []:
new_coords['lev'] = np.arange(
1, out_data.shape[0], out_data.shape[0])
else:
new_coords['lev'] = target_levs
# GCHP-specific
if 'lats' in src_data_3D.coords:
new_coords['lats'] = (
('lat', 'lon'), src_data_3D.coords['lats'].data)
if 'lons' in src_data_3D.coords:
new_coords['lons'] = (
('lat', 'lon'), src_data_3D.coords['lons'].data)
out_data = xr.DataArray(out_data,
dims=tuple([dim for dim in src_data_3D.dims]),
coords=new_coords,
attrs=src_data_3D.attrs)
return out_data
def gen_xmat(p_edge_from, p_edge_to):
"""
Generates regridding matrix from one vertical grid to another.
This function was originally written by <NAME> and included
in package gcgridobj: https://github.com/sdeastham/gcgridobj
Args:
p_edge_from: numpy array
Edge pressures of the input grid
p_edge_to: numpy array
Edge pressures of the target grid
Returns:
xmat: sparse scipy coordinate matrix
Regridding matrix from input grid to target grid
"""
n_from = len(p_edge_from) - 1
n_to = len(p_edge_to) - 1
# Guess - max number of entries?
n_max = max(n_to, n_from) * 5
# Index being mapped from
xmat_i = np.zeros(n_max)
# Index being mapped to
xmat_j = np.zeros(n_max)
# Weights
xmat_s = np.zeros(n_max)
# Find the first output box which has any commonality with the input box
first_from = 0
i_to = 0
if p_edge_from[0] > p_edge_to[0]:
# "From" grid starts at lower altitude (higher pressure)
while p_edge_to[0] < p_edge_from[first_from + 1]:
first_from += 1
else:
# "To" grid starts at lower altitude (higher pressure)
while p_edge_to[i_to + 1] > p_edge_from[0]:
i_to += 1
frac_to_total = 0.0
i_weight = 0
for i_from in range(first_from, n_from):
p_base_from = p_edge_from[i_from]
p_top_from = p_edge_from[i_from + 1]
# Climb the "to" pressures until you intersect with this box
while i_to < n_to and p_base_from <= p_edge_to[i_to + 1]:
i_to += 1
frac_to_total = 0.0
# Now, loop over output layers as long as there is any overlap,
# i.e. as long as the base of the "to" layer is below the
# top of the "from" layer
last_box = False
while p_edge_to[i_to] >= p_top_from and not last_box and not i_to >= n_to:
p_base_common = min(p_base_from, p_edge_to[i_to])
p_top_common = max(p_top_from, p_edge_to[i_to + 1])
# Fraction of target box
frac_to = (p_base_common - p_top_common) / \
(p_edge_to[i_to] - p_edge_to[i_to + 1])
xmat_i[i_weight] = i_from
xmat_j[i_weight] = i_to
xmat_s[i_weight] = frac_to
i_weight += 1
last_box = p_edge_to[i_to + 1] <= p_top_from
if not last_box:
i_to += 1
return scipy.sparse.coo_matrix(
(xmat_s[: i_weight],
(xmat_i[: i_weight],
xmat_j[: i_weight])),
shape=(n_from, n_to))
```
|
{
"source": "jdhayhurst/sumstat_harmoniser",
"score": 3
}
|
#### File: sumstat_harmoniser/lib/SumStatRecord.py
```python
from lib.Seq import Seq
import sys
class SumStatRecord:
""" Class to hold a summary statistic record.
"""
def __init__(self, chrom, pos, other_al, effect_al, beta, oddsr,
oddsr_lower, oddsr_upper, eaf, data):
# Set raw info
self.chrom = chrom
self.pos = pos
self.other_al = other_al
self.effect_al = effect_al
self.data = data
self.beta = float(beta) if beta else None
self.oddsr = safe_float(oddsr) if oddsr else None
self.oddsr_lower = safe_float(oddsr_lower) if oddsr_lower else None
self.oddsr_upper = safe_float(oddsr_upper) if oddsr_upper else None
# Effect allele frequency is not required if we assume +ve strand
if eaf:
self.eaf = float(eaf)
assert 0<= self.eaf <= 1
else:
self.eaf = None
# Set harmonised values
self.hm_rsid = None
self.hm_chrom = None
self.hm_pos = None
self.hm_other_al = None
self.hm_effect_al = None
self.is_harmonised = False
self.hm_code = None
def validate_ssrec(self):
''' Ensures that chrom, pos, other_al, effect_al are of correct type
Return code which will either be:
- None if successful,
- 14 if unsuccessful
'''
# Coerce types
self.chrom = str(self.chrom)
self.other_al = Seq(self.other_al)
self.effect_al = Seq(self.effect_al)
try:
self.pos = int(self.pos)
except (ValueError, TypeError) as e:
return 14
# Assert that other and effect alleles are different
if self.other_al.str() == self.effect_al.str():
return 14
# Assert that unkown nucleotides don't exist
if 'N' in self.other_al.str() or 'N' in self.effect_al.str():
return 14
return None
def revcomp_alleles(self):
""" Reverse complement both the other and effect alleles.
"""
self.effect_al = self.effect_al.revcomp()
self.other_al = self.other_al.revcomp()
def flip_beta(self):
""" Flip the beta, alleles and eaf. Set flipped to True.
Args:
revcomp (Bool): If true, will take reverse complement in addition
to flipping.
"""
# Flip beta
if self.beta:
self.beta = self.beta * -1
# Flip OR
if self.oddsr:
self.oddsr = self.oddsr ** -1
if self.oddsr_lower and self.oddsr_upper:
unharmonised_lower = self.oddsr_lower
unharmonised_upper = self.oddsr_upper
self.oddsr_lower = unharmonised_upper ** -1
self.oddsr_upper = unharmonised_lower ** -1
# Switch alleles
new_effect = self.other_al
new_other = self.effect_al
self.other_al = new_other
self.effect_al = new_effect
# Flip eaf
if self.eaf:
self.eaf = 1 - self.eaf
def alleles(self):
"""
Returns:
Tuple of (other, effect) alleles
"""
return (self.other_al, self.effect_al)
def __repr__(self):
return "\n".join(["Sum stat record",
" chrom : " + self.chrom,
" pos : " + str(self.pos),
" other allele : " + str(self.other_al),
" effect allele: " + str(self.effect_al),
" beta : " + str(self.beta),
" odds ratio : " + str(self.oddsr),
" EAF : " + str(self.eaf)
])
def safe_float(value):
''' Convert to float, rounding to sys.float_info.max if reach 64bit
precision limit. Only to be used on values > 0.
Args:
value (float)
Returns:
float
'''
value = float(value)
if value == 0.0:
value = sys.float_info.min
elif value == float('Inf'):
value = sys.float_info.max
return value
```
#### File: sumstat_harmoniser/sumstat_harmoniser/main.py
```python
import os
import sys
import gzip
import argparse
from copy import deepcopy
from subprocess import Popen, PIPE
from collections import OrderedDict, Counter
from lib.SumStatRecord import SumStatRecord
from lib.VCFRecord import VCFRecord
def main():
""" Implements main logic.
"""
# Get args
global args
args = parse_args()
# Intitate handles and counters
header_written = False
strand_counter = Counter()
code_counter = Counter()
if args.hm_sumstats:
out_handle = open_gzip(args.hm_sumstats, "wb")
# Process each row in summary statistics
for counter, ss_rec in enumerate(yield_sum_stat_records(args.sumstats,
args.in_sep)):
# If set to only process 1 chrom, skip none matching chroms
if args.only_chrom and not args.only_chrom == ss_rec.chrom:
continue
# Validate summary stat record
ret_code = ss_rec.validate_ssrec()
if ret_code:
ss_rec.hm_code = ret_code
strand_counter['Invalid variant for harmonisation'] += 1
# # DEBUG print progress
# if counter % 1000 == 0:
# print(counter + 1)
#
# Load and filter VCF records ------------------------------------------
#
# Skip rows that have code 14 (fail validation)
if not ss_rec.hm_code:
# Get VCF reference variants for this record
vcf_recs = get_vcf_records(
args.vcf.replace("#", ss_rec.chrom),
ss_rec.chrom,
ss_rec.pos)
# Extract the VCF record that matches the summary stat record
vcf_rec, ret_code = exract_matching_record_from_vcf_records(
ss_rec, vcf_recs)
# Set return code when vcf_rec was not found
if ret_code:
ss_rec.hm_code = ret_code
# If vcf record was found, extract some required values
if vcf_rec:
# Get alt allele
vcf_alt = vcf_rec.alt_als[0]
# Set variant information from VCF file
ss_rec.hm_rsid = vcf_rec.id
ss_rec.hm_chrom = vcf_rec.chrom
ss_rec.hm_pos = vcf_rec.pos
ss_rec.hm_other_al = vcf_rec.ref_al
ss_rec.hm_effect_al = vcf_alt
else:
vcf_rec = None
#
# Harmonise variants ---------------------------------------------------
#
# Skip if harmonisation code exists (no VCF record exists or code 14)
if ss_rec.hm_code:
strand_counter['No VCF record found'] += 1
# Harmonise palindromic alleles
elif is_palindromic(ss_rec.other_al, ss_rec.effect_al):
strand_counter['Palindormic variant'] += 1
if args.hm_sumstats:
ss_rec = harmonise_palindromic(ss_rec, vcf_rec)
# Harmonise opposite strand alleles
elif compatible_alleles_reverse_strand(ss_rec.other_al,
ss_rec.effect_al,
vcf_rec.ref_al,
vcf_alt):
strand_counter['Reverse strand variant'] += 1
if args.hm_sumstats:
ss_rec = harmonise_reverse_strand(ss_rec, vcf_rec)
# Harmonise same forward alleles
elif compatible_alleles_forward_strand(ss_rec.other_al,
ss_rec.effect_al,
vcf_rec.ref_al,
vcf_alt):
strand_counter['Forward strand variant'] += 1
if args.hm_sumstats:
ss_rec = harmonise_forward_strand(ss_rec, vcf_rec)
# Should never reach this 'else' statement
else:
sys.exit("Error: Alleles were not palindromic, opposite strand, or "
"same strand!")
# Add harmonisation code to counter
code_counter[ss_rec.hm_code] += 1
#
# Write ssrec to output ------------------------------------------------
#
if args.hm_sumstats:
# Add harmonised other allele, effect allele, eaf, beta, or to output
out_row = OrderedDict()
out_row["hm_varid"] = vcf_rec.hgvs()[0] if vcf_rec and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_rsid"] = ss_rec.hm_rsid if vcf_rec and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_chrom"] = ss_rec.hm_chrom if vcf_rec and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_pos"] = ss_rec.hm_pos if vcf_rec and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_other_allele"] = ss_rec.hm_other_al.str() if vcf_rec and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_effect_allele"] = ss_rec.hm_effect_al.str() if vcf_rec and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_beta"] = ss_rec.beta if ss_rec.beta and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_OR"] = ss_rec.oddsr if ss_rec.oddsr and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_OR_lowerCI"] = ss_rec.oddsr_lower if ss_rec.oddsr_lower and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_OR_upperCI"] = ss_rec.oddsr_upper if ss_rec.oddsr_upper and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_eaf"] = ss_rec.eaf if ss_rec.eaf and ss_rec.is_harmonised else args.na_rep_out
out_row["hm_code"] = ss_rec.hm_code
# Add other data from summary stat file
for key in ss_rec.data:
value = ss_rec.data[key] if ss_rec.data[key] else args.na_rep_out
out_row[key] = str(value)
# Write header
if not header_written:
outline = args.out_sep.join([str(x) for x in out_row.keys()]) + "\n"
out_handle.write(outline.encode("utf-8"))
header_written = True
# Write row
outline = args.out_sep.join([str(x) for x in out_row.values()]) + "\n"
out_handle.write(outline.encode("utf-8"))
# Close output handle
if args.hm_sumstats:
out_handle.close()
# Write strand_count stats to file
if args.strand_counts:
with open_gzip(args.strand_counts, rw='wb') as out_h:
for key in sorted(strand_counter.keys()):
out_h.write('{0}\t{1}\n'.format(key, strand_counter[key]).encode('utf-8'))
# Write outcome code stats to file
code_table = {
1: 'Palindromic; Infer strand; Forward strand; Correct orientation; Already harmonised',
2: 'Palindromic; Infer strand; Forward strand; Flipped orientation; Requires harmonisation',
3: 'Palindromic; Infer strand; Reverse strand; Correct orientation; Already harmonised',
4: 'Palindromic; Infer strand; Reverse strand; Flipped orientation; Requires harmonisation',
5: 'Palindromic; Assume forward strand; Correct orientation; Already harmonised',
6: 'Palindromic; Assume forward strand; Flipped orientation; Requires harmonisation',
7: 'Palindromic; Assume reverse strand; Correct orientation; Already harmonised',
8: 'Palindromic; Assume reverse strand; Flipped orientation; Requires harmonisation',
9: 'Palindromic; Drop palindromic; Will not harmonise',
10: 'Forward strand; Correct orientation; Already harmonised',
11: 'Forward strand; Flipped orientation; Requires harmonisation',
12: 'Reverse strand; Correct orientation; Already harmonised',
13: 'Reverse strand; Flipped orientation; Requires harmonisation',
14: 'Required fields are not known; Cannot harmonise',
15: 'No matching variants in reference VCF; Cannot harmonise',
16: 'Multiple matching variants in reference VCF (ambiguous); Cannot harmonise',
17: 'Palindromic; Infer strand; EAF or reference VCF AF not known; Cannot harmonise',
18: 'Palindromic; Infer strand; EAF < --maf_palin_threshold; Will not harmonise' }
if args.hm_statfile:
with open_gzip(args.hm_statfile, 'wb') as out_h:
out_h.write('hm_code\tcount\tdescription\n'.encode('utf-8'))
for key in sorted(code_counter.keys()):
out_h.write('{0}\t{1}\t{2}\n'.format(key,
code_counter[key],
code_table[key]).encode('utf-8') )
print("Done!")
return 0
def parse_args():
""" Parse command line args using argparse.
"""
parser = argparse.ArgumentParser(description="Summary statistc harmoniser")
# Input file args
infile_group = parser.add_argument_group(title='Input files')
infile_group.add_argument('--sumstats', metavar="<file>",
help=('GWAS summary statistics file'), type=str,
required=True)
infile_group.add_argument('--vcf', metavar="<file>",
help=('Reference VCF file. Use # as chromosome wildcard.'), type=str, required=True)
# Output file args
outfile_group = parser.add_argument_group(title='Output files')
outfile_group.add_argument('--hm_sumstats', metavar="<file>",
help=("Harmonised sumstat output file (use 'gz' extension to gzip)"), type=str)
outfile_group.add_argument('--hm_statfile', metavar="<file>",
help=("Statistics from harmonisation process output file. Should only be used in conjunction with --hm_sumstats."), type=str)
outfile_group.add_argument('--strand_counts', metavar="<file>",
help=("Output file showing number of variants that are forward/reverse/palindromic"), type=str)
# Harmonisation mode
mode_group = parser.add_argument_group(title='Harmonisation mode')
mode_group.add_argument('--palin_mode', metavar="[infer|forward|reverse|drop]",
help=('Mode to use for palindromic variants:\n'
'(a) infer strand from effect-allele freq, '
'(b) assume forward strand, '
'(c) assume reverse strand, '
'(d) drop palindromic variants'),
choices=['infer', 'forward', 'reverse', 'drop'],
type=str)
# Infer strand specific options
infer_group = parser.add_argument_group(title='Strand inference options',
description='Options that are specific to strand inference (--palin_mode infer)')
infer_group.add_argument('--af_vcf_field', metavar="<str>",
help=('VCF info field containing alt allele freq (default: AF_NFE)'),
type=str, default="AF_NFE")
infer_group.add_argument('--infer_maf_threshold', metavar="<float>",
help=('Max MAF that will be used to infer palindrome strand (default: 0.42)'),
type=float, default=0.42)
# Global column args
incols_group = parser.add_argument_group(title='Input column names')
incols_group.add_argument('--chrom_col', metavar="<str>",
help=('Chromosome column'), type=str, required=True)
incols_group.add_argument('--pos_col', metavar="<str>",
help=('Position column'), type=str, required=True)
incols_group.add_argument('--effAl_col', metavar="<str>",
help=('Effect allele column'), type=str, required=True)
incols_group.add_argument('--otherAl_col', metavar="<str>",
help=('Other allele column'), type=str, required=True)
incols_group.add_argument('--beta_col', metavar="<str>",
help=('beta column'), type=str)
incols_group.add_argument('--or_col', metavar="<str>",
help=('Odds ratio column'), type=str)
incols_group.add_argument('--or_col_lower', metavar="<str>",
help=('Odds ratio lower CI column'), type=str)
incols_group.add_argument('--or_col_upper', metavar="<str>",
help=('Odds ratio upper CI column'), type=str)
incols_group.add_argument('--eaf_col', metavar="<str>",
help=('Effect allele frequency column'), type=str)
# Global other args
other_group = parser.add_argument_group(title='Other args')
other_group.add_argument('--only_chrom', metavar="<str>",
help=('Only process this chromosome'), type=str)
other_group.add_argument('--in_sep', metavar="<str>",
help=('Input file column separator [tab|space|comma|other] (default: tab)'),
type=str, default='tab')
other_group.add_argument('--out_sep', metavar="<str>",
help=('Output file column separator [tab|space|comma|other] (default: tab)'),
type=str, default='tab')
other_group.add_argument('--na_rep_in', metavar="<str>",
help=('How NA are represented in the input file (default: "")'),
type=str, default="")
other_group.add_argument('--na_rep_out', metavar="<str>",
help=('How to represent NA values in output (default: "")'),
type=str, default="")
other_group.add_argument('--chrom_map', metavar="<str>",
help=('Map summary stat chromosome names, e.g. `--chrom_map 23=X 24=Y`'),
type=str, nargs='+')
# Parse arguments
args = parser.parse_args()
# Convert input/output separators
args.in_sep = convert_arg_separator(args.in_sep)
args.out_sep = convert_arg_separator(args.out_sep)
# Assert that at least one of --hm_sumstats, --strand_counts is selected
assert any([args.hm_sumstats, args.strand_counts]), \
"Error: at least 1 of --hm_sumstats, --strand_counts must be selected"
# Assert that --hm_statfile is only ever used in conjunction with --hm_sumstats
if args.hm_statfile:
assert args.hm_sumstats, \
"Error: --hm_statfile must only be used in conjunction with --hm_sumstats"
# Assert that mode is selected if doing harmonisation
if args.hm_sumstats:
assert args.palin_mode, \
"Error: '--palin_mode' must be used with '--hm_sumstats'"
# Assert that inference specific options are supplied
if args.palin_mode == 'infer':
assert all([args.af_vcf_field, args.infer_maf_threshold, args.eaf_col]), \
"Error: '--af_vcf_field', '--infer_maf_threshold' and '--eaf_col' must be used with '--palin_mode infer'"
# Assert that OR_lower and OR_upper are used both present if any
if any([args.or_col_lower, args.or_col_upper]):
assert all([args.or_col_lower, args.or_col_upper]), \
"Error: '--or_col_lower' and '--or_col_upper' must be used together"
# Parse chrom_map
if args.chrom_map:
try:
chrom_map_d = dict([pair.split('=') for pair in args.chrom_map])
args.chrom_map = chrom_map_d
except ValueError:
assert False, \
'Error: --chrom_map must be in the format `--chrom_map 23=X 24=Y`'
return args
def convert_arg_separator(s):
''' Converts [tab|space|comma|other] to a variable
'''
if s == 'tab':
return '\t'
elif s == 'space':
return ' '
elif s == 'comma':
return ','
else:
return s
def exract_matching_record_from_vcf_records(ss_rec, vcf_recs):
''' Extracts the vcf record that matches the summary stat record.
Args:
ss_rec (SumStatRecord): object containing summary statistic record
vcf_recs (list of VCFRecords): list containing vcf records
Returns:
tuple(
a single VCFRecord or None,
output code
)
'''
# Discard if there are no records
if len(vcf_recs) == 0:
return (None, 15)
# Remove alt alleles that don't match sum stat alleles
for i in range(len(vcf_recs)):
non_matching_alts = find_non_matching_alleles(ss_rec, vcf_recs[i])
for alt in non_matching_alts:
vcf_recs[i] = vcf_recs[i].remove_alt_al(alt)
# Remove records that don't have any matching alt alleles
vcf_recs = [vcf_rec for vcf_rec in vcf_recs if vcf_rec.n_alts() > 0]
# Discard ss_rec if there are no valid vcf_recs
if len(vcf_recs) == 0:
return (None, 15)
# Discard ss_rec if there are multiple records
if len(vcf_recs) > 1:
return (None, 16)
# Given that there is now 1 record, use that
vcf_rec = vcf_recs[0]
# Discard ssrec if there are multiple matching alleles
if vcf_rec.n_alts() > 1:
return (None, 16)
return (vcf_rec, None)
def harmonise_palindromic(ss_rec, vcf_rec):
''' Harmonises palindromic variant
Args:
ss_rec (SumStatRecord): object containing summary statistic record
vcf_rec (VCFRecord): matching vcf record
Returns:
harmonised ss_rec
'''
# Mode: Infer strand mode
if args.palin_mode == 'infer':
# Extract allele frequency if argument is provided
if args.af_vcf_field and args.af_vcf_field in vcf_rec.info:
vcf_alt_af = float(vcf_rec.info[args.af_vcf_field][0])
else:
ss_rec.hm_code = 17
return ss_rec
# Discard if either MAF is greater than threshold
if ss_rec.eaf:
if ( af_to_maf(ss_rec.eaf) > args.infer_maf_threshold or
af_to_maf(vcf_alt_af) > args.infer_maf_threshold ):
ss_rec.hm_code = 18
return ss_rec
else:
ss_rec.hm_code = 17
return ss_rec
# If EAF and alt AF are concordant, then alleles are on forward strand
if afs_concordant(ss_rec.eaf, vcf_alt_af):
# If alleles flipped orientation
if ss_rec.effect_al.str() == vcf_rec.ref_al.str():
ss_rec.flip_beta()
ss_rec.is_harmonised = True
ss_rec.hm_code = 2
return ss_rec
# If alleles in correct orientation
else:
ss_rec.is_harmonised = True
ss_rec.hm_code = 1
return ss_rec
# Else alleles are on the reverse strand
else:
# Take reverse complement of ssrec alleles
ss_rec.revcomp_alleles()
# If alleles flipped orientation
if ss_rec.effect_al.str() == vcf_rec.ref_al.str():
ss_rec.flip_beta()
ss_rec.is_harmonised = True
ss_rec.hm_code = 4
return ss_rec
# If alleles in correct orientation
else:
ss_rec.is_harmonised = True
ss_rec.hm_code = 3
return ss_rec
# Mode: Assume palindromic variants are on the forward strand
elif args.palin_mode == 'forward':
# If alleles flipped orientation
if ss_rec.effect_al.str() == vcf_rec.ref_al.str():
ss_rec.flip_beta()
ss_rec.is_harmonised = True
ss_rec.hm_code = 6
return ss_rec
# If alleles in correct orientation
else:
ss_rec.is_harmonised = True
ss_rec.hm_code = 5
return ss_rec
# Mode: Assume palindromic variants are on the reverse strand
elif args.palin_mode == 'reverse':
# Take reverse complement of ssrec alleles
ss_rec.revcomp_alleles()
# If alleles flipped orientation
if ss_rec.effect_al.str() == vcf_rec.ref_al.str():
ss_rec.flip_beta()
ss_rec.is_harmonised = True
ss_rec.hm_code = 8
return ss_rec
# If alleles in correct orientation
else:
ss_rec.is_harmonised = True
ss_rec.hm_code = 7
return ss_rec
# Mode: Drop palindromic variants
elif args.palin_mode == 'drop':
ss_rec.hm_code = 9
return ss_rec
def harmonise_reverse_strand(ss_rec, vcf_rec):
''' Harmonises reverse strand variant
Args:
ss_rec (SumStatRecord): object containing summary statistic record
vcf_rec (VCFRecord): matching vcf record
Returns:
harmonised ss_rec
'''
# Take reverse complement of ssrec alleles
ss_rec.revcomp_alleles()
# If alleles flipped orientation
if ss_rec.effect_al.str() == vcf_rec.ref_al.str():
ss_rec.flip_beta()
ss_rec.is_harmonised = True
ss_rec.hm_code = 13
return ss_rec
# If alleles in correct orientation
else:
ss_rec.is_harmonised = True
ss_rec.hm_code = 12
return ss_rec
def harmonise_forward_strand(ss_rec, vcf_rec):
''' Harmonises forward strand variant
Args:
ss_rec (SumStatRecord): object containing summary statistic record
vcf_rec (VCFRecord): matching vcf record
Returns:
harmonised ss_rec
'''
# If alleles flipped orientation
if ss_rec.effect_al.str() == vcf_rec.ref_al.str():
ss_rec.flip_beta()
ss_rec.is_harmonised = True
ss_rec.hm_code = 11
return ss_rec
# If alleles in correct orientation
else:
ss_rec.is_harmonised = True
ss_rec.hm_code = 10
return ss_rec
def afs_concordant(af1, af2):
""" Checks whether the allele frequencies of two palindromic variants are
concordant. Concordant if both are either >0.5 or both are <0.5.
Args:
af1, af2 (float): Allele frequencies from two datasets
Returns:
Bool: True if concordant
"""
assert isinstance(af1, float) and isinstance(af2, float)
if (af1 >= 0.5 and af2 >= 0.5) or (af1 < 0.5 and af2 < 0.5):
return True
else:
return False
def is_palindromic(A1, A2):
""" Checks if two alleles are palindromic.
Args:
A1, A2 (Seq): Alleles (i.e. other and effect alleles)
"""
return A1.str() == A2.revcomp().str()
def find_non_matching_alleles(sumstat_rec, vcf_rec):
""" For each vcfrec ref-alt pair check whether it matches either the
forward or reverse complement of the sumstat alleles.
Args:
sumstat_rec (SumStatRecord)
vcf_rec (VCFRecord)
Returns:
list of alt alleles to remove
"""
alts_to_remove = []
for ref, alt in vcf_rec.yeild_alleles():
if not compatible_alleles_either_strand(sumstat_rec.other_al,
sumstat_rec.effect_al,
ref,
alt):
alts_to_remove.append(alt)
return alts_to_remove
def compatible_alleles_either_strand(A1, A2, B1, B2):
""" Checks whether alleles are compatible, either on the forward or reverse
strand
Args:
A1, A2 (Seq): Alleles from one source
B1, B2 (Seq): Alleles from another source
Returns:
Boolean
"""
return (compatible_alleles_forward_strand(A1, A2, B1, B2) or
compatible_alleles_reverse_strand(A1, A2, B1, B2))
def compatible_alleles_forward_strand(A1, A2, B1, B2):
""" Checks whether alleles are compatible on the forward strand
Args:
A1, A2 (Seq): Alleles from one source
B1, B2 (Seq): Alleles from another source
Returns:
Boolean
"""
return set([A1.str(), A2.str()]) == set([B1.str(), B2.str()])
def compatible_alleles_reverse_strand(A1, A2, B1, B2):
""" Checks whether alleles are compatible on the forward strand
Args:
A1, A2 (Seq): Alleles from one source
B1, B2 (Seq): Alleles from another source
Returns:
Boolean
"""
return set([A1.str(), A2.str()]) == set([B1.revcomp().str(), B2.revcomp().str()])
def af_to_maf(af):
""" Converts an allele frequency to a minor allele frequency
Args:
af (float or str)
Returns:
float
"""
# Sometimes AF == ".", in these cases, set to 0
try:
af = float(af)
except ValueError:
af = 0.0
if af <= 0.5:
return af
else:
return 1 - af
def get_vcf_records(in_vcf, chrom, pos):
""" Uses tabix to query VCF file. Parses info from record.
Args:
in_vcf (str): vcf file
chrom (str): chromosome
pos (int): base pair position
Returns:
list of VCFRecords
"""
response = list(tabix_query(in_vcf, chrom, pos, pos))
return [VCFRecord(line) for line in response]
def yield_sum_stat_records(inf, sep):
""" Load lines from summary stat file and convert to SumStatRecord class.
Args:
inf (str): input file
sep (str): column separator
Returns:
SumStatRecord
"""
for row in parse_sum_stats(inf, sep):
# Replace chrom with --chrom_map value
chrom = row[args.chrom_col]
if args.chrom_map:
chrom = args.chrom_map.get(chrom, chrom)
# Make sumstat class instance
ss_record = SumStatRecord(chrom,
row[args.pos_col],
row[args.otherAl_col],
row[args.effAl_col],
row.get(args.beta_col, None),
row.get(args.or_col, None),
row.get(args.or_col_lower, None),
row.get(args.or_col_upper, None),
row.get(args.eaf_col, None),
row)
yield ss_record
def parse_sum_stats(inf, sep):
""" Yields a line at a time from the summary statistics file.
Args:
inf (str): input file
sep (str): column separator
Returns:
OrderedDict: {column: value}
"""
with open_gzip(inf, "rb") as in_handle:
# Get header
header = in_handle.readline().decode("utf-8").rstrip().split(sep)
# Assert that all column arguments are contained in header
for arg, value in args.__dict__.items():
if '_col' in arg and value:
assert value in header, \
'Error: --{0} {1} not found in input header'.format(arg, value)
# Iterate over lines
for line in in_handle:
values = line.decode("utf-8").rstrip().split(sep)
# Replace any na_rep_in values with None
values = [value if value != args.na_rep_in else None
for value in values]
# Check we have the correct number of elements
assert len(values) == len(header), 'Error: column length ({0}) does not match header length ({1})'.format(len(values), len(header))
yield OrderedDict(zip(header, values))
def open_gzip(inf, rw="rb"):
""" Returns handle using gzip if gz file extension.
"""
if inf.split(".")[-1] == "gz":
return gzip.open(inf, rw)
else:
return open(inf, rw)
def tabix_query(filename, chrom, start, end):
"""Call tabix and generate an array of strings for each line it returns.
Author: https://github.com/slowkow/pytabix
"""
query = '{}:{}-{}'.format(chrom, start, end)
process = Popen(['tabix', '-f', filename, query], stdout=PIPE)
for line in process.stdout:
yield [s.decode("utf-8") for s in line.strip().split()]
def str2bool(v):
""" Parses argpare boolean input
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
main()
```
|
{
"source": "JDHDEV/recipe-app-api",
"score": 2
}
|
#### File: traveler/tests/test_graphql.py
```python
from graphene_django.utils.testing import GraphQLTestCase
from graphene.test import Client
from app.schema import schema
from django.contrib.auth import get_user_model
from django.test import RequestFactory
from rest_framework import status
from core.models import Spot
def sample_spot(user, **params):
"""Create and return a sample spot"""
defaults = {
'name': 'Sample spot',
'time_minutes': 60,
'price': 20.00
}
defaults.update(params)
return Spot.objects.create(user=user, **defaults)
class PublicGraphQLApiTests(GraphQLTestCase):
"""Test unauthenticated graphql API access"""
GRAPHQL_SCHEMA = schema
def test_auth_required(self):
response = self.query(
'''
query {
}
'''
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class privateGraphQLApiTests(GraphQLTestCase):
"""Test authenticated graphql API access"""
GRAPHQL_SCHEMA = schema
def setUp(self):
self.factory = RequestFactory()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
def test_allspots_graphql(self):
"""Test retrieving spots with graphql"""
sample_spot(user=self.user)
sample_spot(user=self.user)
request = self.factory.get('graphql/')
# Recall that middleware are not supported. You can simulate a
# logged-in user by setting request.user manually.
request.user = self.user
client = Client(schema)
executed = client.execute(
'''{ allSpots { name } } ''', context=request
)
self.assertIn('data', executed)
data = executed.get('data')
self.assertIn('allSpots', data)
allSpots = data.get('allSpots')
self.assertEqual(allSpots[0].get('name'), "Sample spot")
self.assertEqual(allSpots[1].get('name'), "Sample spot")
```
|
{
"source": "jdheinz/project-ordo_ab_chao",
"score": 2
}
|
#### File: django_website/blog/views.py
```python
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404
from .models import BlogPost
from .forms import BlogPostModelForm
# render intial html page of list of published blogs
def blog_post_list_view(request):
qs = BlogPost.objects.all().published() # queryset -> list of python objects
if request.user.is_authenticated:
my_qs = BlogPost.objects.filter(user=request.user)
qs = (qs | my_qs).distinct()
context = {'object_list':qs}
return render(request, 'blog/list.html', context)
# create new blog post
@login_required
def blog_post_create_view(request):
form = BlogPostModelForm(request.POST or None, request.FILES or None)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
obj.save()
form = BlogPostModelForm()
context = {'form':form}
return render(request, 'blog/form.html', context)
# click on blog 'view' on blog list page to see details of a single blog
def blog_post_detail_view(request, slug):
obj = get_object_or_404(BlogPost, slug=slug)
context = {'object':obj}
return render(request, 'blog/detail.html', context)
# blog author can update/edit the blog post that user created
@login_required
def blog_post_update_view(request, slug):
obj = get_object_or_404(BlogPost, slug=slug)
form = BlogPostModelForm(request.POST or None, instance=obj)
if form.is_valid():
form.save()
context = {
"form":form,
"title":f"Update {obj.title}",
}
return render(request, 'blog/update.html', context)
# blog author can delete the blog post that user created
@login_required
def blog_post_delete_view(request, slug):
obj = get_object_or_404(BlogPost, slug=slug)
if request.method == "POST":
obj.delete()
context = {'object':obj}
return render(request, 'blog/delete.html', context)
```
#### File: django_website/contact_us/views.py
```python
from django.shortcuts import render
# render 'contact us' html page
def contact_page(request):
return render(request, 'contact_us/contact.html')
```
#### File: django_website/display_graphs/neuralNetwork.py
```python
import pandas as pd
import numpy as np
from tensorflow import keras
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
# instance of the neural network to predit future prices
class Neural_Network:
def neural_network(self, n_df):
df = n_df.copy()
df = df.replace('^\s*$', np.nan, regex=True)
#df['itemId'] = df['itemId'].astype(int)
df['listingType'] = pd.get_dummies(df['listingType'])
df['endPrice'] = df['endPrice'].astype(np.float)
df['shippingServiceCost'] = df['shippingServiceCost'].astype(np.float)
#df['shippingServiceCost'] = df['shippingServiceCost'].interpolate()
df['shippingServiceCost'] = df['shippingServiceCost'].fillna(df['shippingServiceCost'].mean())
df['bidCount'] = df['bidCount'].astype(np.float)
#df['bidCount'] = df['bidCount'].interpolate()
df['bidCount'] = df['bidCount'].fillna(df['bidCount'].mean())
df['watchCount'] = df['watchCount'].astype(np.float)
#df['watchCount'] = df['watchCount'].interpolate()
df['watchCount'] = df['watchCount'].fillna(df['watchCount'].mean())
df['returnsAccepted'] = pd.get_dummies(df['returnsAccepted'])
df['handlingTime'] = df['handlingTime'].astype(np.float)
df['sellerUserName'] = pd.get_dummies(df['sellerUserName'])
df['feedbackScore'] = df['feedbackScore'].astype(np.float)
df['positiveFeedbackPercent'] = df['positiveFeedbackPercent'].astype(np.float)
df['topRatedSeller'] = pd.get_dummies(df['topRatedSeller'])
df['endDate'] = pd.get_dummies(df['endDate'])
#print('\nnull values in dataframe are:\n', df.isnull().any())
features_df = df.drop(['itemId','title','endPrice','location','endTime','startTime','endTimeOfDay'], axis=1)
corr = features_df.corr()
#print('\ncorr:\n', corr)
num_of_cols = len(features_df.columns)
#print('\nnumber of feature columns:\n', num_of_cols)
features = features_df.values
target = df.endPrice.values
#print('\ntarget values:\n', target)
#print('\nfeatures values:\n', features)
#print('\ntarget shape:\n', target.shape)
#print('\nfeatures shape:\n', features.shape)
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.3, random_state=124)
#print('\nTRAIN TEST SPLIT EXECUTED\n')
X_train = MinMaxScaler(feature_range=(-1,1)).fit_transform(X_train)
X_test = MinMaxScaler(feature_range=(-1,1)).fit_transform(X_test)
#print('\nX_train and X_test scaled\n')
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
y_train = MinMaxScaler(feature_range=(-1,1)).fit_transform(y_train)
y_test = MinMaxScaler(feature_range=(-1,1)).fit_transform(y_test)
y_train = y_train.reshape(-1)
y_test = y_test.reshape(-1)
#print('\nshape of X_train:\n', X_train.shape)
#print('\nshape of X_test:\n', X_test.shape)
#print('\nshape of y_train:\n', y_train.shape)
#print('\nshape of y_test:\n', y_test.shape)
model = keras.Sequential()
'''
input_layer = keras.layers.Dense(16, input_dim=num_of_cols, activation='sigmoid')
model.add(input_layer)
hidden_layer = keras.layers.Dense(num_of_cols, input_dim=16, activation='sigmoid')
model.add(hidden_layer)
output_layer = keras.layers.Dense(1, input_dim=num_of_cols, activation='softmax')
model.add(output_layer)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_split=0.2, batch_size=32, epochs=100, shuffle=True)
'''
input_layer = keras.layers.Dense(units=16, kernel_initializer='uniform', input_dim=num_of_cols, activation='relu')
model.add(input_layer)
hidden_layer1 = keras.layers.Dense(units=18, kernel_initializer='uniform', activation='relu')
model.add(hidden_layer1)
model.add(keras.layers.Dropout(rate=0.25))
hidden_layer2 = keras.layers.Dense(20, kernel_initializer='uniform', activation='relu')
model.add(hidden_layer2)
hidden_layer3 = keras.layers.Dense(24, kernel_initializer='uniform', activation='relu')
model.add(hidden_layer3)
output_layer = keras.layers.Dense(1, kernel_initializer='uniform', activation='sigmoid')
model.add(output_layer)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_split=0.2, batch_size=15, epochs=10, shuffle=2)
predictions = model.predict(X_test, verbose=1, steps=1)
#print('\npredictions shape:\n', predictions.shape)
#print('\npredictions values:\n', predictions)
pred_nn_df = pd.DataFrame({'predictions':pd.Series(predictions.reshape(-1)),'actual_sell_prices':pd.Series(y_test)})
return pred_nn_df, history
```
#### File: django_website/display_graphs/outOfSample.py
```python
import pandas as pd
# instance of 'out of sample'/step forward analysis model to predit future prices
class Oos:
def out_of_sample(self, c_df):
df = c_df.copy()
df['endPrice'] = df['endPrice'].astype(float)
pivot_df = df.pivot_table(index='endDate', columns='listingType', values='endPrice')
pivot_df = pivot_df.interpolate(method='linear', axis=0).ffill().bfill()
#print('\npivot_df:\n', pivot_df)
delta_values = [7,14,21,28]
delta_dict = {}
for offset in delta_values:
delta_dict['delta_{}'.format(offset)] = pivot_df/pivot_df.shift(offset) - 1.0
#print('delta_7 \'date\' check for null dates:', delta_dict['delta_7'].index.isnull().sum())
#print('delta_14 \'date\' check for null dates:', delta_dict['delta_14'].index.isnull().sum())
#print('delta_21 \'date\' check for null dates:', delta_dict['delta_21'].index.isnull().sum())
#print('delta_28 \'date\' check for null dates:', delta_dict['delta_28'].index.isnull().sum())
melted_dfs = []
for key, delta_df in delta_dict.items():
melted_dfs.append(delta_df.reset_index().melt(id_vars=['endDate'], value_name=key))
'''
for i in melted_dfs:
print(i,'melted_dfs:\n', i.tail())
'''
target_variable = pd.merge(melted_dfs[0], melted_dfs[1], on=['endDate','listingType'])
target_variable = pd.merge(target_variable, melted_dfs[2], on=['endDate','listingType'])
target_variable = pd.merge(target_variable, melted_dfs[3], on=['endDate', 'listingType'])
#print('\ntarget_variable:\n', target_variable)
return target_variable
```
#### File: django_website/display_graphs/views.py
```python
from django.shortcuts import render
from ebaysdk.finding import Connection as finding
import xmltodict
from json import loads, dumps
import pandas as pd
import numpy as np
import datetime
from . import outOfSample, neuralNetwork, mLinearRegression
# create empty dataframe within scope of entire file
content_df = pd.DataFrame()
# do ebay search by keywords and pass to graphs.html, and get predictions
def display_the_graphs(request):
keywords = request.POST.get('search')
api = finding(appid='JohnHein-homepage-PRD-392e94856-07aba7fe', config_file=None, siteid='EBAY-US')
api_request = {'keywords':keywords, 'itemFilter':[{'name':'SoldItemsOnly', 'value':True},],'outputSelector':['SellerInfo']}
response = api.execute('findCompletedItems', api_request)
content = response.content
xml_dict = xmltodict.parse(content)
content_dict = to_dict(xml_dict)
count = content_dict['findCompletedItemsResponse']['searchResult']['@count']
item_dict = content_dict['findCompletedItemsResponse']['searchResult']['item']
print('count:', count)
#print('\nitem_dict:\n', item_dict)
content_df = extract_values(item_dict)
content_df_copy = content_df.copy()
y_values = content_df_copy['endPrice'].tolist()
y_values = [float(i) for i in y_values]
x_values_b = content_df_copy['endTime'].tolist()
x_values = convert_datetime(x_values_b)
#print('\nx_values: ', x_values,'\n')
#print('\ny_values: ', y_values,'\n')
#print('\nx_values count:', len(x_values),'\n')
#print('\ny_values count:', len(y_values),'\n')
#print('\nx_values type:', type(x_values[-1]),'\n')
#print('\ny_values type:', type(y_values[-1]),'\n')
chart1_data = [list(i) for i in zip(x_values, y_values)]
oos = outOfSample.Oos()
df2 = oos.out_of_sample(content_df)
nn = neuralNetwork.Neural_Network()
df3, history = nn.neural_network(content_df)
mlr = mLinearRegression.MultivariateLinearRegression()
df4 = mlr.regression(content_df)
nn_x_values = df3['predictions'].tolist()
nn_y_values = df3['actual_sell_prices'].tolist()
chart2_data = [list(i) for i in zip(nn_x_values, nn_y_values)]
mlr_x_values = df4['predictions'].tolist()
mlr_y_values = df4['actual_sell_prices'].tolist()
chart4_data = [list(i) for i in zip(mlr_x_values, mlr_y_values)]
#print('chart1 data:', chart1_data)
context = {
'response': content_df.to_html(),
'chart1': chart1_data,
'chart4': chart4_data,
'chart2': chart2_data,
'oos_df': df2.to_html(),
'nn_df': df3.to_html(),
'mlr_df': df4.to_html()
}
return render(request, 'display_graphs/graphs.html', context)
# convert ordered dictionary to regular dictionary
def to_dict(input_ordered_dict):
return loads(dumps(input_ordered_dict))
# take ebay response data and put into dataframe
def extract_values(temp_dict):
df = pd.DataFrame(columns=['itemId','title','listingType','endPrice','shippingServiceCost','bidCount','watchCount','returnsAccepted','location','endTime','startTime','handlingTime','sellerUserName','feedbackScore','positiveFeedbackPercent','topRatedSeller'])
a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
#print('\ntype of data:\n', type(temp_dict))
length = len(temp_dict)
#print('\nlength:\n', length)
for index in range(length):
for key, value in temp_dict[index].items():
#print('temp_dict[index][key]:', key)
if key == 'itemId':
a.append(value)
if key == 'title':
b.append(value)
if key == 'sellingStatus':
c.append(temp_dict[index]['sellingStatus']['convertedCurrentPrice']['#text'])
try:
d.append(temp_dict[index]['sellingStatus']['bidCount'])
except KeyError:
d.append(np.nan)
if key == 'shippingInfo':
e.append(temp_dict[index]['shippingInfo']['handlingTime'])
try:
m.append(temp_dict[index]['shippingInfo']['shippingServiceCost']['#text'])
except KeyError:
m.append(np.nan)
if key == 'sellerInfo':
f.append(temp_dict[index]['sellerInfo']['sellerUserName'])
g.append(temp_dict[index]['sellerInfo']['feedbackScore'])
h.append(temp_dict[index]['sellerInfo']['positiveFeedbackPercent'])
n.append(temp_dict[index]['sellerInfo']['topRatedSeller'])
if key == 'location':
i.append(value)
if key == 'listingInfo':
j.append(temp_dict[index]['listingInfo']['endTime'])
l.append(temp_dict[index]['listingInfo']['startTime'])
p.append(temp_dict[index]['listingInfo']['listingType'])
try:
k.append(temp_dict[index]['listingInfo']['watchCount'])
except KeyError:
k.append(np.nan)
if key == 'returnsAccepted':
o.append(value)
df = pd.DataFrame({'itemId':pd.Series(a),'title':pd.Series(b),'listingType':pd.Series(p),'endPrice':pd.Series(c),'shippingServiceCost':pd.Series(m),
'bidCount':pd.Series(d),'watchCount':pd.Series(k),'returnsAccepted':pd.Series(o),
'location':pd.Series(i),'endTime':pd.Series(j),'startTime':pd.Series(l),'handlingTime':pd.Series(e),
'sellerUserName':pd.Series(f),'feedbackScore':pd.Series(g),'positiveFeedbackPercent':pd.Series(h),
'topRatedSeller':pd.Series(n)})
#print('\ndf:\n', df)
#print('\narray a:\n', a)
#print('\narray b:\n', b)
#print('\narray c:\n', c)
#print('\narray d:\n', d)
#print('\narray f:\n', f)
df['endTime'] = pd.to_datetime(df['endTime']) # datetime ISO 8601 format ---> YYYY-MM-DD HH:MM:SS +HH:MM (NOTE: '+HH:MM' is UTC offset)
df['endTimeOfDay'],df['endDate'] = df['endTime'].apply(lambda x:x.time()),df['endTime'].apply(lambda x:x.date())
return df
# convert the datetime for that column in the dataframe
def convert_datetime(arr):
arr2 = []
for i in arr:
dateobj = str(i)
dateobj = dateobj[:19]
arr2.append(int(datetime.datetime.strptime(dateobj, "%Y-%m-%d %H:%M:%S").timestamp())*1000)
#print('convert_datetime ',arr2[-1])
#print('dateobj:', dateobj)
return arr2
```
#### File: django_website/tests/test_views.py
```python
from django.test import TransactionTestCase
from django.test import TestCase
from django.urls import reverse
from home_page.models import Search
from ebaysdk.finding import Connection as finding
class PageTest(TransactionTestCase):
def test_home_page_status_code_1(self):
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
def test_home_page_status_code_2(self):
response = self.client.get('/home/')
self.assertEquals(response.status_code, 200)
def test_home_page_view_url_by_name(self):
response = self.client.get(reverse('home'))
self.assertEquals(response.status_code, 200)
def test_home_page_contains_correct_html(self):
response = self.client.get('/')
self.assertContains(response, 'Please enter key search words below:')
def test_home_page_does_not_contain_incorrect_html(self):
response = self.client.get('/')
self.assertNotContains(
response, 'Hi there! I should not be on the page.')
def test_graphs_page_status_code(self):
response = self.client.get('/graphs/')
self.assertEquals(response.status_code, 200)
def test_graphs_page_view_url_by_name(self):
response = self.client.get(reverse('graphs'))
self.assertEquals(response.status_code, 200)
def test_contact_page_status_code(self):
response = self.client.get('/contact/')
self.assertEquals(response.status_code, 200)
def test_contact_page_view_url_by_name(self):
response = self.client.get(reverse('contact'))
self.assertEquals(response.status_code, 200)
def test_about_page_status_code(self):
response = self.client.get('/about/')
self.assertEquals(response.status_code, 200)
def test_about_page_view_url_by_name(self):
response = self.client.get(reverse('about'))
self.assertEquals(response.status_code, 200)
def test_about_page_contains_correct_html(self):
response = self.client.get('/about/')
self.assertContains(response, 'ordo_ab_chao team members:')
def test_about_page_does_not_contain_incorrect_html(self):
response = self.client.get('/about/')
self.assertNotContains(
response, 'Hi there! I should not be on the page.')
def test_blog_page_status_code(self):
response = self.client.get('/blog/')
self.assertEquals(response.status_code, 200)
def test_blog_page_view_url_by_name(self):
response = self.client.get(reverse('blog'))
self.assertEquals(response.status_code, 200)
def test_aboutWebsite_page_status_code(self):
response = self.client.get('/about website/')
self.assertEquals(response.status_code, 200)
def test_aboutWebsite_page_view_url_by_name(self):
response = self.client.get(reverse('about website'))
self.assertEquals(response.status_code, 200)
def test_directions_page_status_code(self):
response = self.client.get('/directions/')
self.assertEquals(response.status_code, 200)
def test_directions_page_view_url_by_name(self):
response = self.client.get(reverse('directions'))
self.assertEquals(response.status_code, 200)
class SearchModelTest(TestCase):
def test_keywords_respresentation(self):
keywords1 = Search(search="1986 Fleer Jordan")
keywords2 = Search(search=1986)
self.assertEquals(str(keywords1), keywords1.search)
self.assertNotEquals(keywords2, keywords2.search)
class TestEbayAPI(TestCase):
def test_ebay_api_request_status_code(self):
api = finding(appid='JohnHein-homepage-PRD-392e94856-07aba7fe', config_file=None, siteid='EBAY-US')
keywords = Search(search="1986 Fleer Jordan PSA 10")
api_request = {'keywords':keywords, 'itemFilter':[{'name':'SoldItemsOnly', 'value':True},]}
response = api.execute('findCompletedItems', api_request)
self.assertEqual(response.status_code, 200)
```
#### File: django_website/home_page/views.py
```python
from django.shortcuts import render
# render 'home page' html
def homepage(request):
return render(request, 'home_page/homepage.html')
```
|
{
"source": "jdhenke/sovereignpy",
"score": 3
}
|
#### File: jdhenke/sovereignpy/main.py
```python
import os, sys, subprocess
from http.server import HTTPServer, BaseHTTPRequestHandler
class Server(BaseHTTPRequestHandler):
# respond to all get requests with the content of this file
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(open("main.py").read().encode("utf8"))
# treat all post requests as an attempt to patch this server,
# using the body of the request as the patch
def do_POST(self):
try:
content_len = int(self.headers.get('Content-Length'))
patch = self.rfile.read(content_len)
self.try_patch(patch)
except Exception as e:
self.send_response(400)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write((str(e)+"\n").encode("utf8"))
return
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("OK\n".encode("utf8"))
# restart server in place after the response is written
print("Restarting server...")
os.execl(sys.executable, sys.executable, *sys.argv)
# applies the patch if it passes verification
def try_patch(self, patch):
self.verify(patch)
self.apply(patch)
# raises an exception if the patch should not be applied
def verify(self, patch):
pass # always accept
# applies the patch to the source code in this directory
def apply(self, patch):
process = subprocess.Popen(["git", "am"], stdin=subprocess.PIPE)
process.communicate(patch)
process.wait()
if process.returncode != 0:
raise Exception("git am: return code: %i" % (process.returncode, ))
if __name__ == "__main__":
print("Starting server...")
HTTPServer(("localhost", 8080), Server).serve_forever()
```
|
{
"source": "jdhenke/uap",
"score": 2
}
|
#### File: uap/src/parse.py
```python
from transform import createSparseMatrix
# modify me to return a list or be a generator of your assertions
def getAssertions():
yield "joe", "in", "school"
yield "school", "in", "ma"
createSparseMatrix(getAssertions(), "kb.pickle")
```
#### File: uap/src/server.py
```python
import os, sys, graph, cherrypy
import simplejson as json
'''USAGE: python src/server.py <kb-file> <num-axes> <concepts|assertions> <www-path> <port>'''
class Server(object):
def __init__(self, matrix_path, dim_list, node_type):
self.graph = graph.create_graph(matrix_path, dim_list, node_type)
@cherrypy.expose
@cherrypy.tools.json_out()
def get_nodes(self):
return self.graph.get_nodes();
@cherrypy.expose
@cherrypy.tools.json_out()
def get_edges(self, node, otherNodes):
return self.graph.get_edges(json.loads(node), json.loads(otherNodes))
@cherrypy.expose
@cherrypy.tools.json_out()
def get_related_nodes(self, nodes, numNodes):
return self.graph.get_related_nodes(json.loads(nodes), int(numNodes))
@cherrypy.expose
@cherrypy.tools.json_out()
def get_dimensionality_bounds(self):
return self.graph.get_dimensionality_bounds()
@cherrypy.expose
@cherrypy.tools.json_out()
def get_truth(self, node):
node = json.loads(node)
return self.graph.get_truth(node["concept1"], node["concept2"], node["relation"])
@cherrypy.expose
@cherrypy.tools.json_out()
def get_concepts(self):
return self.graph.get_concepts()
@cherrypy.expose
@cherrypy.tools.json_out()
def get_relations(self):
return self.graph.get_relations()
if __name__ == '__main__':
# parse command line arguments
sm_path, dim_list_str, node_type, www_path, port_str = sys.argv[1:]
dim_list = [int(dim_str) for dim_str in dim_list_str.split(",")]
port = int(port_str)
# configure cherrypy to properly accept requests
cherrypy.config.update({'server.socket_host': '0.0.0.0',
'server.socket_port': port})
Server._cp_config = {
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.abspath(www_path),
'tools.staticdir.index' :\
'index.html' if node_type == 'concepts' else 'index-assertions.html',
}
# start server
cherrypy.quickstart(Server(sm_path, dim_list, node_type))
```
|
{
"source": "jdhenshaw/casa_modules",
"score": 2
}
|
#### File: jdhenshaw/casa_modules/cleaning.py
```python
def run_makedirtycube(vis, imagename, imsize, pixelsize,
phasecenter='', restfreq='', specmode = 'cube',
nchan=-1, width='', start=0,
datacolumn='data', outframe='LSRK', gridder='mosaic',
deconvolver='multiscale', scales=[0,7,21,63],
parallel=False):
"""
Creates a dirty cube
Parameters
----------
vis : casa ms visibility file
input visibilities
imagename : string w/o extension
output file name for the dirty cube. Will be appended with _dirty
imsize : array
array or x,y list for the size of the output image
pixelsize : number
size of the pixels. Hard coded for arcseconds
remaining parameters are those sent to tclean
"""
import os
import masking
import numpy as np
from tasks import tclean, imhead, imstat
#Makes dirty image
dirtyimage = '%s_dirty' %imagename
print '[INFO] Making dirty image: %s' %dirtyimage
tclean(vis = vis,
datacolumn = datacolumn,
imagename = dirtyimage,
imsize = imsize,
cell = str(pixelsize)+'arcsec',
phasecenter = phasecenter,
specmode = specmode,
nchan = nchan,
start = start,
width = width,
outframe = outframe,
restfreq = restfreq,
gridder = gridder,
deconvolver = deconvolver,
scales = scales,
niter = 0,
interactive = False,
parallel = parallel)
#Cleaning up the dir
#print '[INFO] Cleaning output dir.'
#os.system('rm -rf %s.weight' %dirtyimage)
#os.system('rm -rf %s.model' %dirtyimage)
#os.system('rm -rf %s.psf' %dirtyimage)
#os.system('rm -rf %s.sumwt' %dirtyimage)
def run_makecleancube(vis, imagename, imsize, pixelsize,
phasecenter='', restfreq='', specmode = 'cube',
nchan=-1, width='', start=0,
datacolumn='data', outframe='LSRK', gridder='mosaic',
deconvolver='multiscale', scales=[0,7,21,63],
niter=100000, tp_model = '',usetpmodel=False,
n_cycles=5, nsigma_max = 10, nsigma_min=1,
parallel=False):
"""
Code for staggered non-interactive CLEANing in casa.
This code employs an automasking technique to identify data above a given
threshold. CLEANing commences and stops when this threshold is hit. In
the next iteration the previous CLEAN is used as a model for the next cycle.
The number of steps in the CLEANing process can be finetuned to avoid
divergence.
Note that this code requires a dirty cube to be located in the same
directory as "imagename". This can be produced using run_makedirtycube.
Parameters
----------
vis : casa ms visibility file
input visibilities
imagename : string w/o extension
output file name for the dirty cube. Will be appended with _dirty
imsize : array
array or x,y list for the size of the output image
pixelsize : number
size of the pixels. Hard coded for arcseconds
tp_model : CASA image
single dish model to use for the CLEANing. Must already be tweaked into
a useable format
usetpmodel : bool
Do you want to use this as a model for the CLEANing? default=no - Will
use the previous CLEAN image as an input model
n_cycles : number
number of cycles for the CLEANing
nsigma_max : number
starting threshold for mask creation. Given as an integer multiple of
the rms
nsigma_min : number
end threshold for mask creation. i.e. CLEAN down to nsigma_min * rms
remaining parameters are those sent to tclean
"""
import os
import masking
import numpy as np
from tasks import tclean, imhead, imstat
# define thresholds, from 10 to 1
threshs = np.linspace(nsigma_max, nsigma_min, n_cycles)
dirtyimage = '%s_dirty' %imagename
#Makes mask and cleans
for cycle in range(n_cycles):
print ''
if usetpmodel:
previmage = '%s_cycle%i_tpmodel' %(imagename, cycle-1)
outimage = '%s_cycle%i_tpmodel' %(imagename, cycle)
else:
previmage = '%s_cycle%i' %(imagename, cycle-1)
outimage = '%s_cycle%i' %(imagename, cycle)
print '[INFO] Cleaning cycle %i' %cycle
print '[INFO] Making image: %s' %outimage
print ''
header = imhead(imagename=dirtyimage + '.image', mode='list')
major = header['perplanebeams']['median area beam']['major']['value']
minor = header['perplanebeams']['median area beam']['minor']['value']
beam_area = major*minor
pixel_area = pixelsize**2
beam_pixel_ratio = beam_area/pixel_area
thresh = threshs[cycle]
print ''
print '[INFO] Cycle thresh: %0.2f rms' %thresh
print ''
if cycle == 0:
dirtyimage_ = '%s.image' %dirtyimage
stats = imstat(imagename = dirtyimage_ )
mad = stats['medabsdevmed'][0]
print ''
print '[INFO] Cycle rms: %g Jy/beam' %mad
print ''
mask = masking.make_mask_3d(imagename = dirtyimage,
thresh = thresh*mad,
fl = False,
useimage = True,
pixelmin = beam_pixel_ratio*3,
major = major,
minor = minor,
pixelsize = pixelsize,
line = True,
overwrite_old = False)
if usetpmodel:
startmodel = [tp_model]
else:
startmodel = ''
print '[INFO] No model - okay?'
else:
print ''
previmage_ = '%s.image' %previmage
stats = imstat(imagename=previmage_)
mad = stats['medabsdevmed'][0]
print ''
print '[INFO] Cycle rms: %g Jy/beam' %mad
print ''
mask = masking.make_mask_3d(imagename = previmage,
thresh = thresh*mad,
fl = True,
useimage = False,
pixelmin = beam_pixel_ratio*3,
major = major,
minor = minor,
pixelsize = pixelsize,
line = True,
overwrite_old = False)
if usetpmodel:
startmodel = [tp_model]
else:
startmodel = ['%s.model' %previmage]
print ''
print '[INFO] Using model: %s' %startmodel
print ''
tclean(vis = vis,
datacolumn = datacolumn,
imagename = outimage,
imsize = imsize,
cell = str(pixelsize)+'arcsec',
phasecenter = phasecenter,
specmode = specmode,
nchan = nchan,
start = start,
width = width,
outframe = outframe,
restfreq = restfreq,
gridder = gridder,
deconvolver = deconvolver,
scales = scales,
niter = niter,
threshold = thresh*mad,
interactive = False,
mask = mask,
startmodel = startmodel,
parallel = parallel)
#os.system('rm -rf %s.weight' %outimage)
#os.system('rm -rf %s.model' %outimage)
#os.system('rm -rf %s.psf' %outimage)
#os.system('rm -rf %s.sumwt' %outimage)
#os.system('rm -rf %s.threshmask' %previmage)
#os.system('rm -rf %s.fullmask' %previmage)
#os.system('rm -rf %s.fullmask.nopb' %previmage)
return
```
#### File: jdhenshaw/casa_modules/image_analysis.py
```python
from tasks import imstat
def computerms(imagename, lower=0, upper=-1):
"""
Computes the rms over a given range of channels
"""
return imstat(imagename,axes=[0,1])['rms'][lower:upper].mean()
def calc_beam_area_sd(header):
import numpy as np
# beam major FWHM
bmaj = header['beammajor']['value']
# beam minor FWHM
bmin = header['beamminor']['value']
# beam pa
pa = header['beampa']['value']
return (np.pi/(4.*np.log(2))) * bmaj * bmin
def calc_beam_area_perplane(header):
import numpy as np
# beam major FWHM
bmaj = header['perplanebeams']['median area beam']['major']['value']
# beam minor FWHM
bmin = header['perplanebeams']['median area beam']['minor']['value']
return (np.pi/(4.*np.log(2))) * bmaj * bmin
def sdtomodel(tp, dirtycube, convtojypix=True, pbcorr=True):
"""
prepares a single dish for conversion to a model for imaging. Assumes image
is already a casa image file. Also requires a dirty cube of the data you
intend to merge with.
"""
print ''
print tp
print dirtycube
print ''
# First convert to jy per pix if not already done so
if convtojypix:
jypiximage = imtojypix(tp)
else:
jypiximage = tp
print jypiximage
print ''
print ''
# regrid SD image to same axes as the dirty cube
jypiximage_regrid = regridsd(jypiximage, dirtycube)
if pbcorr:
attenuate(jypiximage_regrid, dirtycube)
def attenuate(jypiximage_regrid, dirtycube):
"""
Attenuate the tp image using the primary beam response of the 12m+7m mosaic.
"""
from tasks import immath
_pbresponse = '%s.pb' %dirtycube
jypiximage_regrid_pb = '%s.pb' %jypiximage_regrid
immath(imagename=[jypiximage_regrid+'.image', _pbresponse],
outfile=jypiximage_regrid_pb+'.image',
expr='IM0*IM1')
def regridsd(jypiximage, dirtycube):
"""
regrids sd to axes of dirty cube
"""
from tasks import imregrid
_dirtycube = '%s.image' %dirtycube
_jypiximage = '%s.image' %jypiximage
jypiximage_regrid = '%s.regrid' %jypiximage
imregrid(imagename=_jypiximage, template=_dirtycube,
output=jypiximage_regrid+'.image')
return jypiximage_regrid
def imtojypix(tp):
"""
Converts a single dish image either in k or in jy/beam to jy/pix using
header information
"""
from tasks import imhead, immath
import numpy as np
_tp = '%s.image' %tp
# First check the units of the SD image
header = imhead(_tp, mode='list')
unit = header['bunit']
restfreq = header['restfreq']
restfreq_ghz = restfreq/1.e9 # to GHz
# beam major FWHM
bmaj = header['beammajor']['value']
# beam minor FWHM
bmin = header['beamminor']['value']
# get pixel size and convert to arcseconds
pixelsize = np.abs(header['cdelt1']) * (60.*60.*360.)/(2.*np.pi)
beamarea = calc_beam_area_sd(header)
pixelarea = pixelsize**2
pixperbeam = beamarea/pixelarea
if (unit == 'K') or (unit=='kelvin') or (unit=='k'):
# convert to jy/beam
jybeamimage = '%s.jybeam' %tp
immath(
imagename=_tp,
expr='8.18249739e-7*{0:.6f}*{0:.6f}*IM0*{1:.6f}*{2:.6f}'.format(
restfreq_ghz, bmaj, bmin),
outfile=jybeamimage+'.image',
)
imhead(jybeamimage+'.image', mode='put', hdkey='bunit', hdvalue='Jy/beam')
else:
jybeamimage = tp
jypiximage = '%s.jypix' %tp
immath(imagename=jybeamimage+'.image',expr='IM0/{0:.6f}'.format(pixperbeam),
outfile=jypiximage+'.image',)
imhead(jypiximage+'.image', mode='put', hdkey='bunit', hdvalue='Jy/pixel')
return jypiximage
```
|
{
"source": "jdhenshaw/leodis",
"score": 3
}
|
#### File: leodis/acorns/tree_definition.py
```python
import numpy as np
class Tree(object):
def __init__(self, _antecessor, idx=None, acorns=None):
"""
Description
-----------
This is how individual trees are defined and characterised. Output can
be plotted as a dendrogram
Parameters
----------
Examples
--------
Notes
-----
"""
self._acorns = acorns
self._tree_idx = idx
self._number_of_tree_members = None
self._tree_members_idx = None
self._tree_members = None
self._trunk = None
self._branches = None
self._leaves = None
self._sorted_leaves = None
self._crown_width = None
self._cluster_vertices = [[], []]
self._horizontals = [[], []]
self = _get_members(self, _antecessor)
self = _sort_members(self)
self = _dendrogram_positions(self)
@property
def tree_idx(self):
"""
Returns tree index
"""
return self._tree_idx
@property
def tree_members_idx(self):
"""
Returns the indices of the tree members
"""
return self._tree_members_idx
@property
def tree_members(self):
"""
Returns tree members
"""
return self._tree_members
@property
def number_of_tree_members(self):
"""
Returns the number of tree members
"""
return int(self._number_of_tree_members)
@property
def trunk(self):
"""
Returns trunk
"""
return self._trunk
@property
def branches(self):
"""
Returns branches
"""
return self._branches
@property
def leaves(self):
"""
Returns leaves
"""
return self._leaves
@property
def crown_width(self):
"""
Returns maximum width of dendrogram crown
"""
return len(self._leaves)
@property
def cluster_vertices(self):
"""
Returns cluster vertices for plotting
"""
return self._cluster_vertices
@property
def horizontals(self):
"""
Returns cluster vertices for plotting
"""
return self._horizontals
def __repr__(self):
"""
Return a nice printable format for the object. This format will indicate
if the current structure is a leaf_cluster or a branch_cluster, and give
the cluster index.
"""
if self.trunk.leaf_cluster:
return "<< acorns tree; type=leaf_cluster; tree_index={0}; number_of_tree_members={1} >>".format(self.tree_idx, self.number_of_tree_members)
else:
return "<< acorns tree; type=branch_cluster; tree_index={0}; number_of_tree_members={1} >>".format(self.tree_idx, self.number_of_tree_members)
def _get_members(self, _antecessor):
"""
Returns information on the tree members
"""
self._trunk = _antecessor
self._branches = []
self._leaves = []
# Create a temporary list of descendants that will be updated
new_descendants = _antecessor.descendants
self._number_of_tree_members = 1.0
self._number_of_leaves = 0.0
self._number_of_branches = 0.0
self._tree_members = [_antecessor]
self._tree_members_idx = [_antecessor.cluster_idx]
# Cycle through descendants looking for new descendants
while (len(new_descendants) !=0 ):
descendant_list = []
# Loop over descendants
for _descendant in new_descendants:
self._number_of_tree_members+=1.0
self._tree_members_idx.append(_descendant.cluster_idx)
if _descendant.leaf_cluster:
self._number_of_leaves += 1.0
self._tree_members.append(_descendant)
self._leaves.append(_descendant)
else:
self._number_of_branches += 1.0
self._tree_members.append(_descendant)
self._branches.append(_descendant)
# Check to see if the current descendant has any descendants
if (len(_descendant.descendants) !=0 ):
# If there are, add these to the descendant_list
descendant_list.extend(_descendant.descendants)
# Once search for descendants has finished begin a new search based
# on the descendant_list
new_descendants = descendant_list
if (_antecessor.leaf_cluster==True):
self._number_of_leaves = 1.0
self._leaves.append(_antecessor)
return self
def _sort_members(self):
"""
Sorts the tree members.
Notes
-----
Here we want to sort the leaf clusters such that we can plot them as a
dendrogram. The method starts with the brightest leaf in a tree and then
descends the hierarchy checking for leaf siblings along the way.
"""
# Initial sorting
leaf_flux = [leaf.statistics[0][1] for leaf in self.leaves]
sort_idx = np.argsort(np.asarray(leaf_flux))
_leaves = np.array(self.leaves)
_sorted_peak_leaves = list(_leaves[sort_idx])
number_of_leaves = len(self.leaves)
self._sorted_leaves = []
while len(_sorted_peak_leaves) != 0.0:
# Start with the brightest
cluster = _sorted_peak_leaves.pop()
self._sorted_leaves.append(cluster)
# Now descend
while cluster.antecedent is not None:
siblings = cluster.siblings
idx = [(sibling.leaf_cluster) for sibling in siblings]
sortedidxs = np.argsort(idx)[::-1]
siblings = list(np.array(siblings)[sortedidxs])
for sibling in siblings:
# If the sibling is a leaf add it to the sorted list
if sibling.leaf_cluster:
found_sibling = (np.asarray(_sorted_peak_leaves) == sibling)
if np.any(found_sibling):
idx = np.squeeze(np.where(found_sibling == True))
if np.size(idx) == 1:
self._sorted_leaves.append(_sorted_peak_leaves[idx])
_sorted_peak_leaves.pop(idx)
else:
for j in range(np.size(idx)):
self._sorted_leaves.append(_sorted_peak_leaves[idx[j]])
_sorted_peak_leaves.pop(idx[j])
# If however, the sibling is a branch we need to ascend that
# branch to get the order correct
else:
_branch_sibling = sibling
_leaf_list = []
_search_list = _branch_sibling.descendants
idx = [(_descendant.leaf_cluster) for _descendant in _search_list]
sortedidxs = np.argsort(idx)[::-1]
_search_list = [list(np.array(_search_list)[sortedidxs])]
num_descendants = len(_search_list)
branch_found = 1
while branch_found != 0:
branch_found = 0
num_searches = np.shape(np.asarray(_search_list))[0]
for i in range(num_searches):
num_clusters = np.size(np.asarray(_search_list[i]))
for j in range(num_clusters):
if _search_list[i][j].branch_cluster == True:
_branch_descendants = _search_list[i][j].descendants
idx = [(_branch_descendant.leaf_cluster) for _branch_descendant in _branch_descendants]
sortedidxs = np.argsort(idx)[::-1]
_branch_descendants = list(np.array(_branch_descendants)[sortedidxs])
_search_list[i][j] = [descendant for descendant in _branch_descendants]
branch_found += 1
else:
_search_list[i][j] = [_search_list[i][j]]
_search_list[i] = [item for sublist in _search_list[i] for item in sublist]
_leaf_list = [item for sublist in _search_list for item in sublist]
if len(_leaf_list) != 0:
for _cluster in _leaf_list:
found_leaf = (np.asarray(_sorted_peak_leaves) == _cluster)
if np.any(found_leaf):
idx = np.squeeze(np.where(found_leaf == True))
self._sorted_leaves.append(_sorted_peak_leaves[idx])
_sorted_peak_leaves.pop(idx)
cluster = cluster.antecedent
return self
def _dendrogram_positions(self):
# Make lines for plotting a dendrogram
# x locations of the leaf clusters
x_loc = -1.*np.ones(len(self.tree_members))
x_loc = list(x_loc)
for j in range(len(self.tree_members)):
idx = np.where(np.asarray(self._sorted_leaves) == self.tree_members[j])
if np.size(idx) != 0.0:
x_loc[j] = idx[0][0]+1.0
# Find x_locations of clusters
while np.any(np.asarray(x_loc) == -1.0):
for i in range(len(x_loc)):
if x_loc[i] != -1.0:
_sibling_pos = [x_loc[i]]
if self.tree_members[i].siblings is not None:
for sibling in self.tree_members[i].siblings:
found_sibling = (np.asarray(self.tree_members) == sibling)
idx = np.where(found_sibling == True)
for j in range(np.size(idx)):
_sibling_pos.append(x_loc[idx[0][j]])
if not np.any(np.asarray(_sibling_pos) == -1.0):
x_loc_add = np.mean(_sibling_pos)
idx = np.squeeze(np.where(np.asarray(self.tree_members) == self.tree_members[i].antecedent))
x_loc[idx] = x_loc_add
# Generate lines for dendrogram
for i in range(len(self.tree_members)):
self._cluster_vertices[0].append(np.array([x_loc[i], x_loc[i]]))
if self.tree_members[i] == self.trunk:
if len(self.trunk.descendants) != 0.0:
self._cluster_vertices[1].append(np.array([self.trunk.statistics[0][0], self.trunk.descendants[0].merge_level]))
# find the descendants positions in x_loc
x_loc_descendants = []
for descendant in self.tree_members[i].descendants:
found_descendant = (np.asarray(self.tree_members) == descendant)
idx = np.where(found_descendant == True)
for j in range(np.size(idx)):
x_loc_descendants.append(x_loc[idx[0][j]])
range_x = np.ptp(x_loc_descendants)
self._horizontals[0].append(np.array([np.min(np.asarray(x_loc_descendants)) ,np.min(np.asarray(x_loc_descendants))+range_x]))
self._horizontals[1].append(np.array([self.trunk.descendants[0].merge_level, self.trunk.descendants[0].merge_level]))
else:
self._cluster_vertices[1].append(np.array([self.trunk.statistics[0][0], self.trunk.statistics[0][1]]))
self._horizontals[0].append(np.array([0.0,0.0]))
self._horizontals[1].append(np.array([0.0,0.0]))
elif self.tree_members[i].leaf_cluster == True:
self._cluster_vertices[1].append(np.array([self.tree_members[i].merge_level, self.tree_members[i].statistics[0][1]]))
self._horizontals[0].append(np.array([0.0,0.0]))
self._horizontals[1].append(np.array([0.0,0.0]))
else:
self._cluster_vertices[1].append(np.array([self.tree_members[i].merge_level, self.tree_members[i].descendants[0].merge_level]))
# find the descendants positions in x_loc
x_loc_descendants = []
for descendant in self.tree_members[i].descendants:
found_descendant = (np.asarray(self.tree_members) == descendant)
idx = np.where(found_descendant == True)
for j in range(np.size(idx)):
x_loc_descendants.append(x_loc[idx[0][j]])
range_x = np.ptp(x_loc_descendants)
self._horizontals[0].append(np.array([np.min(np.asarray(x_loc_descendants)) ,np.min(np.asarray(x_loc_descendants))+range_x]))
self._horizontals[1].append(np.array([self.tree_members[i].descendants[0].merge_level, self.tree_members[i].descendants[0].merge_level]))
return self
```
|
{
"source": "jdhenshaw/SCOUSEpy",
"score": 3
}
|
#### File: SCOUSEpy/scousepy/model_housing.py
```python
import numpy as np
class saa(object):
"""
Stores all the information regarding individual spectral averaging areas
(SAA)
Parameters
----------
coordinates : array
The coordinates of the SAA in pixel units. In (x,y).
spectrum : array
The spectrum
index : number
The index of the spectral averaging area (used as a key for saa_dict)
to_be_fit : bool
Indicating whether or not the SAA is to be fit or not
scouseobject : instance of the scouse class
Attributes
----------
index : number
The index of the SAA
coordinates : array
The coordinates of the SAA in pixel units
spectrum : array
The spectrum
rms : number
An estimate of the rms
indices : array
The indices of individual pixels located within the SAA
indices_flat : array
The same indices but flattened according to the shape of the cube
to_be_fit : bool
Indicating whether or not the SAA is to be fit or not
individual_spectra : dictionary
This is a dictionary which will contain a number of instances of the
"individual_spectrum" class. individual spectra are initially housed
in the saa object. After the fitting has completed these will be
moved into a single dictionary and this dictionary will be removed
"""
def __init__(self, coordinates, spectrum, index=None, to_be_fit=False, scouseobject=None):
self.index=index
self.coordinates=coordinates
self.spectrum=spectrum
self.rms=get_rms(self,scouseobject)
self.indices=None
self.indices_flat=None
self.to_be_fit=to_be_fit
self.model=None
self.individual_spectra=None
def __repr__(self):
"""
Return a nice printable format for the object.
"""
return "< scousepy SAA; index={0} >".format(self.index)
def add_indices(self, indices, shape):
"""
Adds indices contained within the SAA
Parameters
----------
indices : ndarray
An array containing the indices that are to be added to the SAA
shape : ndarray
X, Y shape of the data cube. Used to flatten the indices
"""
self.indices=np.array(indices, dtype='int')
self.indices_flat=np.ravel_multi_index(indices.T, shape)
def add_saamodel(self, model):
"""
Adds model solution as described by saa model to the SAA
Parameters
----------
model : instance of the saamodel class
"""
self.model=model
class individual_spectrum(object):
"""
Stores all the information regarding individual spectra
Parameters
----------
coordinates : array
The coordinates of the spectrum in pixel units. In (x,y).
spectrum : array
The spectrum
index : number
The flattened index of the spectrum
scouseobject : instance of the scouse class
saa_dict_index : number
Index of the saa_dict. This will be used along with saaindex to find the
parent model solution to provide initial guesses for the fitter
saaindex : number
Index of the SAA. Used to locate a given spectrum's parent SAA
Attributes
----------
template : instance of pyspeckit's Spectrum class
A template spectrum updated during fitting
model : instance of the indivmodel class
The final best-fitting model solution as determined in stage 4
model_from_parent : instance of the indivmodel class
The best-fitting solution as determined from using the SAA model as
input guesses
model_from_dspec : instance of the indivmodel class
The best-fitting model solution derived from derivative spectroscopy
model_from_spatial : instance of the indivmodel class
The best-fitting model solution derived from spatial fitting
model_from_manual : instance of the indivmodel class
The best-fitting model solution as fit manually during stage 6 of the
process
decision : string
The decision made during stage 4 of the process, i.e. if the spectrum
was refit,
"""
def __init__(self, coordinates, spectrum, index=None, scouseobject=None,
saa_dict_index=None, saaindex=None):
self.index=index
self.coordinates=coordinates
self.spectrum=spectrum
self.rms=get_rms(self, scouseobject)
self.saa_dict_index=saa_dict_index
self.saaindex=saaindex
self.template=None
self.guesses_from_parent=None
self.guesses_updated=None
self.model=None
self.model_from_parent=None
self.model_from_dspec=None
self.model_from_spatial=None
self.model_from_manual=None
self.decision=None
def __repr__(self):
"""
Return a nice printable format for the object.
"""
return "<< scousepy individual spectrum; index={0} >>".format(self.index)
def add_model(self, model):
"""
Adds model solution
Parameters
----------
model : instance of the indivmodel class
"""
if model.method=='parent':
self.model_from_parent=model
elif model.method=='dspec':
self.model_from_dspec=model
elif model.method=='spatial':
self.model_from_spatial=[model]
elif model.method=='manual':
self.model_from_manual=model
else:
pass # error here?
def get_rms(self, scouseobject):
"""
Calculates rms value. Used by both saa and individual_spectrum classes
Parameters
----------
scouseobject : instance of the scouse class
"""
from scousepy.noisy import getnoise
noisy=getnoise(scouseobject.x, self.spectrum)
if np.isfinite(noisy.rms):
rms = noisy.rms
else:
# if the spectrum violates these conditions then simply set the rms to
# the value measured over the entire cube
rms = scouseobject.rms_approx
return rms
class basemodel(object):
"""
Base model for scouse. These properties are shared by both SAA model
solutions and individual spectra solutions
Attributes
----------
fittype : string
Model used during fitting (e.g. Gaussian)
parnames : list
A list containing the parameter names in the model (corresponds to those
used in pyspeckit)
ncomps : Number
Number of components in the model solution
params : list
The parameter estimates
errors : list
The uncertainties on each measured parameter
rms : Number
The measured rms value
residstd : Number
The standard deviation of the residuals
chisq : Number
The chi squared value
dof : Number
The number of degrees of freedom
redchisq : Number
The reduced chi squared value
AIC : Number
The akaike information criterion
fitconverge : bool
Indicates whether or not the fit has converged
"""
def __init__(self):
self.fittype=None
self.parnames=None
self.ncomps=None
self.params=None
self.errors=None
self.rms=None
self.residstd=None
self.chisq=None
self.dof=None
self.redchisq=None
self.AIC=None
self.fitconverge=None
class saamodel(basemodel):
"""
This houses the model information for spectral averaging areas. It uses
the base model but includes some parameters that are unique to SAAs.
Parameters
----------
modeldict : dictionary
This is a dictionary containing the model parameters that we want to
add to the SAA. This is output from scousefitter.
Attributes
----------
SNR : number
This is the signal-to-noise ratio set during the fitting process in
scousefitter
kernelsize : number
This is the size of the kernel used for the derivative spectroscopy
method
manual : bool
This indicates whether a manual fit was performed
"""
def __init__(self, modeldict):
super(basemodel, self).__init__()
self.SNR=None
self.alpha=None
self.set_attributes(modeldict)
def __repr__(self):
"""
Return a nice printable format for the object.
"""
return "< scousepy saamodel_solution; fittype={0}; ncomps={1} >".format(self.fittype, self.ncomps)
def set_attributes(self, modeldict):
"""
Sets the attributes of the SAA model
"""
for parameter, value in modeldict.items():
setattr(self, parameter, value)
class indivmodel(basemodel):
"""
This houses the model information for individual spectra. It uses the base
model and includes some parameters that are unique to individual spectra.
Parameters
----------
modeldict : dictionary
This is a dictionary containing the model parameters that we want to
add to the individual spectrum.
"""
def __init__(self, modeldict):
super(basemodel, self).__init__()
self.method=None
self.set_attributes(modeldict)
def __repr__(self):
"""
Return a nice printable format for the object.
"""
return "< scousepy model_solution; fittype={0}; ncomps={1} >".format(self.fittype, self.ncomps)
def set_attributes(self, modeldict):
"""
Sets the attributes of the SAA model
"""
for parameter, value in modeldict.items():
setattr(self, parameter, value)
```
|
{
"source": "jdherg/emojificate",
"score": 3
}
|
#### File: emojificate/emojificate/__main__.py
```python
import sys
from .filter import emojificate
def display_help():
print("emojificate.py -- turns text with emoji into text with accessible emoji")
if __name__ == "__main__":
line = " ".join(sys.argv[1:])
if line:
print(emojificate(line))
else:
display_help()
sys.exit(1)
```
#### File: emojificate/templatetags/emojificate.py
```python
from django.template import Library, Node
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from ..filter import emojificate
register = Library()
@register.filter("emojificate", needs_autoescape=True)
def emojificate_filter(content, autoescape=True):
"Convert any emoji in a string into accessible content."
# return mark_safe(emojificate(content))
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
return mark_safe(emojificate(esc(content)))
@register.tag("emojified")
def do_emojified(parser, token):
nodelist = parser.parse(("endemojified",))
parser.delete_first_token()
return EmojifiedNode(nodelist)
class EmojifiedNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
return emojificate(output)
```
|
{
"source": "jdherg/octopus-holdings",
"score": 3
}
|
#### File: octopus-holdings/emoji/emoji_catalog.py
```python
from collections import Counter, defaultdict, namedtuple
import re
from typing import Sequence, Union
from emoji.config import EMOJI_CONFIG
Emoji = namedtuple(
"Emoji",
["literal", "name", "code_points", "status", "group", "subgroup", "version"],
)
EMOJI_LINE = re.compile(
r"^(?P<code_points>([0-9A-Fa-f]+ )+) *; (?P<status>\S+) +# (?P<emoji>\S+) E(?P<version>\d+\.\d+) (?P<name>.*)$",
)
GROUP_LINE = re.compile(
r"^# group: (?P<group_name>.+)$",
)
GROUP_COUNT_LINE = re.compile(
r"^# (?P<group_name>.+) subtotal:\s+(?P<group_count>\d+)$",
)
STATUS_COUNT_LINE = re.compile(
r"^# (?P<status_name>(fully-qualified|minimally-qualified|unqualified|component)) : (?P<status_count>\d+)$",
)
SUBGROUP_LINE = re.compile(
r"^# subgroup: (?P<subgroup_name>.+)$",
)
def parse_unicode_test_file(
lines: list[str],
) -> tuple[
dict[str, Emoji],
dict[str, int],
dict[str, int],
dict[str, list[str]],
dict[str, list[str]],
]:
emoji: dict[str, Emoji] = {}
group_counts: dict[str, int] = {}
status_counts: dict[str, int] = {}
group = ""
subgroup = ""
names: dict[str, list[str]] = defaultdict(list)
variants: dict[str, list[str]] = defaultdict(list)
for line in [line.strip() for line in lines if line.strip() != ""]:
if line[0] == "#":
if match := GROUP_LINE.match(line):
group = match.group("group_name")
elif match := GROUP_COUNT_LINE.match(line):
assert group == match.group("group_name")
group_counts[group] = int(match.group("group_count"))
elif match := STATUS_COUNT_LINE.match(line):
status_counts[match.group("status_name")] = int(
match.group("status_count")
)
elif match := SUBGROUP_LINE.match(line):
subgroup = match.group("subgroup_name")
elif match := EMOJI_LINE.match(line):
literal = match.group("emoji")
name = match.group("name")
names[name].append(literal)
if len(parts := name.split(":")) > 1:
base, _ = parts
if base in names:
variants[names[base][0]].append(literal)
emoji[literal] = Emoji(
code_points=match.group("code_points").strip().split(),
group=group,
literal=literal,
name=name,
status=match.group("status"),
subgroup=subgroup,
version=match.group("version"),
)
return (emoji, group_counts, status_counts, variants, names)
UNICODE_TEST_FILE_PATH = EMOJI_CONFIG["unicode_test_file"]
with open(UNICODE_TEST_FILE_PATH, "r") as unicode_emoji_test_file:
PARSED_EMOJI_METADATA = parse_unicode_test_file(unicode_emoji_test_file.readlines())
EMOJI_CATALOG, _, _, EMOJI_VARIANTS, EMOJI_BY_NAME = PARSED_EMOJI_METADATA
```
#### File: octopus-holdings/emoji/image_catalog.py
```python
import itertools
import json
import os
import re
from emoji.config import EMOJI_CONFIG
from emoji.emoji_catalog import EMOJI_CATALOG
def load_emoji_set_svgs(set_config):
emoji_to_svgs = dict()
set_emoji = set()
external_prefix = ""
if set_config["external_path"]:
external_prefix = set_config["external_path"] + "/"
svg_folder = set_config["internal_path"]
svg_filename_regex = re.compile(set_config["svg_filename_regex"])
svg_folder_filenames = os.listdir(svg_folder)
for filename in svg_folder_filenames:
if svg_filename_regex.search(filename):
svg_codepoint = svg_filename_regex.search(filename).group()
if "alias_map" in set_config:
emoji = svg_codepoint
else:
emoji = "".join(
[chr(int(char, 16)) for char in re.split(r"[\-_]", svg_codepoint)]
)
emoji_path = external_prefix + filename
emoji_to_svgs[emoji] = emoji_path
set_emoji.add(emoji)
return emoji_to_svgs, set_emoji
def load():
set_config = EMOJI_CONFIG["set_config"]
result: dict[str, dict[str, str]] = {}
for name, config in set_config.items():
if config.get("alias_map") is not None:
continue
emoji_to_svgs, _ = load_emoji_set_svgs(config)
result[name] = emoji_to_svgs
return result
IMAGE_CATALOGS = load()
def load_all_emoji_svgs():
all_resolved_emoji = set()
emoji_to_svgs = dict()
for emoji_set_name in EMOJI_CONFIG["priority"]:
emoji_to_svgs[emoji_set_name], set_emoji = load_emoji_set_svgs(
EMOJI_CONFIG["set_config"][emoji_set_name]
)
all_resolved_emoji |= set_emoji
return all_resolved_emoji, emoji_to_svgs
ALL_RESOLVED_EMOJI, EMOJI_TO_SVGS = load_all_emoji_svgs()
def main():
unicode_emoji: set[str] = set(EMOJI_CATALOG.keys())
svg_emoji: set[str] = set(
itertools.chain(*[x.keys() for x in IMAGE_CATALOGS.values()])
)
svg_files: set[str] = set(
itertools.chain(
*[[(k, v) for k, v in c.items()] for c in IMAGE_CATALOGS.values()]
)
)
# svg_emoji = svg_emoji | {e + "\uFE0F" for e in svg_emoji}
present_emoji = [EMOJI_CATALOG[e] for e in unicode_emoji & svg_emoji]
missing_emoji = [EMOJI_CATALOG[e] for e in unicode_emoji - svg_emoji]
missing_svg = [v for k, v in svg_files if k not in EMOJI_CATALOG]
print(f"present {len(present_emoji)}")
print(f"missing {len(missing_emoji)}")
print(f"missing svg {len(missing_svg)}")
from collections import Counter
c = Counter(map(lambda e: e.version, missing_emoji))
print(sorted(c.items(), key=lambda i: float(i[0])))
c = Counter(map(lambda e: e.status, missing_emoji))
print(sorted(c.items(), key=lambda i: i[0]))
print(missing_svg[:10])
if __name__ == "__main__":
main()
```
#### File: emoji/tests/test_alias_catalog.py
```python
import unittest
from emoji.alias_catalog import (
ALIASES_TO_EMOJI,
derive_codepoint_aliases,
derive_skin_tone_aliases,
)
class TestCodepointDeriver(unittest.TestCase):
def test_single_codepoint(self):
octopus = "🐙"
aliases = derive_codepoint_aliases(octopus)
self.assertIn(octopus, aliases)
self.assertIn("1f419", aliases)
self.assertIn("u1f419", aliases)
class TestSkinToneDeriver(unittest.TestCase):
def test_single(self):
aliases = derive_skin_tone_aliases("👋", "wave")
self.assertIn("wave:skin-tone-medium", aliases)
def test_double(self):
aliases = derive_skin_tone_aliases("🧑🤝🧑", "couple")
self.assertIn("couple:skin-tone-light:skin-tone-light", aliases)
class TestAliasesToEmoji(unittest.TestCase):
def test_octopus(self):
self.assertEqual(ALIASES_TO_EMOJI["octopus"], "🐙")
def test_wave(self):
self.assertEqual(ALIASES_TO_EMOJI.get("wave"), "👋")
self.assertEqual(ALIASES_TO_EMOJI.get("wave:skin-tone-medium"), "👋🏽")
def test_chipmunk(self):
# One with FE0F, one without
self.assertIn(ALIASES_TO_EMOJI.get("chipmunk"), ("🐿️", "🐿"))
```
|
{
"source": "jdherman/eci273",
"score": 2
}
|
#### File: jdherman/eci273/L14-hedging-contourplots.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize, differential_evolution
import seaborn as sns
sns.set_style('whitegrid')
# Set some parameters
K = 975 # capacity, TAF
D = 150 # target yield, TAF
a = 1
b = 2 # cost function parameters
# data setup
Q = np.loadtxt('data/FOL-monthly-inflow-TAF.csv', delimiter=',', skiprows=1, usecols=[1])
T = len(Q)
def simulate(x):
S = np.zeros(T)
R = np.zeros(T)
cost = np.zeros(T)
h0 = x[0]
hf = x[1]
S[0] = K # start simulation full
for t in range(1,T):
# new storage: mass balance, max value is K
S[t] = min(S[t-1] + Q[t-1] - R[t-1], K)
# determine R from hedging policy
W = S[t] + Q[t]
if W > hf:
R[t] = D
elif W < h0:
R[t] = W
else:
R[t] = (D-h0)/(hf-h0)*(W-h0)+h0
shortage = D-R[t]
cost[t] = a*shortage**b
return cost.mean()
# to make a contour plot...
h0s = np.arange(0,D,5)
hfs = np.arange(D,K+D,5)
# or, ranges for zoomed in contour plot
# h0s = np.arange(60,90,0.5)
# hfs = np.arange(800,855,0.5)
data = np.zeros((len(h0s),len(hfs)))
i,j = 0,0
for h0 in h0s:
for hf in hfs:
data[i,j] = simulate([h0,hf])
j += 1
j = 0
i += 1
X,Y = np.meshgrid(h0s, hfs)
plt.contour(X,Y,data.T, 50, cmap=plt.cm.cool)
plt.colorbar()
plt.title('Average Shortage Cost ("$")')
plt.xlabel(r'$h_0$')
plt.ylabel(r'$h_f$')
plt.show()
```
#### File: jdherman/eci273/L15-pareto-sort.py
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
# assumes minimization
# a dominates b if it is <= in all objectives and < in at least one
def dominates(a, b):
return (np.all(a <= b) and np.any(a < b))
# accepts a matrix of points, returns a matrix of only the nondominated ones
# not the most efficient way to do this
# 'keep' is an array of booleans used to index the matrix at the end
def pareto_sort(P):
N = len(P)
keep = np.ones(N, dtype=bool) # all True to start
for i in range(N):
for j in range(i+1,N):
if keep[j] and dominates(P[i,:], P[j,:]):
keep[j] = False
elif keep[i] and dominates(P[j,:], P[i,:]):
keep[i] = False
return P[keep,:]
# a matrix of data points for a hypothetical 2-objective problem
circle_points = np.loadtxt('data/circle-points.csv', delimiter=',')
pareto = pareto_sort(circle_points)
plt.scatter(circle_points[:,0],circle_points[:,1], c='0.7')
plt.scatter(pareto[:,0], pareto[:,1], c='red')
plt.legend(['Dominated Points', 'Non-dominated points'])
plt.show()
```
#### File: jdherman/eci273/L3-sequentpeak.py
```python
import numpy as np
import matplotlib.pyplot as plt
# sequent peak example from LVB Table 11.2
Q = np.tile([1, 3, 3, 5, 8, 6, 7, 2, 1], 2)
T = len(Q)
K = np.zeros(T+1)
R = 3.5*np.ones(T)
# for t in range(T):
# K[t+1] = max(R[t] - Q[t] + K[t], 0)
# print('Reservoir size needed: %f' % np.max(K))
# Or...let's do this as a function instead
# def sequent_peak(R, Q):
# # accepts inflow and outflow arrays
# # returns sequent peak reservoir capacity
# assert len(R) == len(Q), 'R and Q must be the same length'
# T = len(Q)
# K = np.zeros(T+1)
# for t in range(T):
# K[t+1] = max(R[t] - Q[t] + K[t], 0)
# return np.max(K)
# Kmax = sequent_peak(R,Q)
# print(Kmax)
```
#### File: jdherman/eci273/L8-pandas-autocorr.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# read CSV data into a "dataframe" - pandas can parse dates
# this will be familiar to R users (not so much matlab users)
df = pd.read_csv('data/SHA.csv', index_col=0, parse_dates=True)
Q = df.SHA_INFLOW_CFS # a pandas series (daily)
# Q = Q.resample('AS-OCT').sum() # annual values
print(Q.autocorr(lag=1))
# plot a correlogram with confidence bounds
pd.plotting.autocorrelation_plot(Q)
plt.xlim([0,365])
plt.show()
from statsmodels.tsa import stattools
pacf,ci = stattools.pacf(Q, nlags=7, alpha=0.05)
plt.plot(pacf, linewidth=2)
plt.plot(ci, linestyle='dashed', color='0.5')
plt.show()
# we did this with pandas to simplify the resampling operations
# but we can also do it with numpy
# (using annual flow values)
Q = df.SHA_INFLOW_CFS.resample('AS-OCT').sum().values # now a numpy array
def autocorr(x,k):
return np.corrcoef(x[:len(x)-k], x[k:])[0,1]
print(autocorr(Q,k=1))
```
#### File: jdherman/eci273/L9-thomasfiering-monthly.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
cfs_to_taf = 2.29568411*10**-5 * 86400 / 1000
def get_monthly_stats(x):
'''calculate monthly
mean, std. dev., and lag-1 autocorrelation
from historical data x. Assumes each month
is lognormally distributed.'''
x = np.log(x)
N = len(x)
mu = np.zeros(12)
sigma = np.zeros(12)
rho = np.zeros(12)
for m in range(12):
mu[m] = x[m::12].mean()
sigma[m] = x[m::12].std()
x1 = x[m:N-1:12]
x2 = x[m+1::12]
rho[m] = np.corrcoef(x1,x2)[0,1] # index the matrix
return mu,sigma,rho # log space
def thomasfiering_monthly(mu, sigma, rho, N_years):
'''Lag-1 model. use historical monthly statistics
to generate a synthetic sequence of N years'''
Q = np.zeros(N_years*12) # initialize
Q[0] = np.random.normal(mu[0],sigma[0],1)
for y in range(N_years):
for m in range(12):
i = 12*y + m # index
if i > 0:
Z = np.random.standard_normal()
Q[i] = mu[m] + rho[m-1]*(Q[i-1] - mu[m-1]) + Z*sigma[m]*np.sqrt(1-rho[m-1]**2)
return np.exp(Q) # real space
# read in data and generate synthetic timeseries
df = pd.read_csv('data/FOL.csv', index_col=0, parse_dates=True)
Q = (cfs_to_taf * df.FOL_INFLOW_CFS).resample('M').sum().values
mu,sigma,rho = get_monthly_stats(Q)
Q_synthetic = thomasfiering_monthly(mu, sigma, rho, N_years=20)
# compare synthetic stats to historical
a,b,c = get_monthly_stats(Q_synthetic)
for m in range(12):
print('Month %d means: %f, %f' % (m,mu[m],a[m]))
for m in range(12):
print('Month %d stdev: %f, %f' % (m,sigma[m],b[m]))
for m in range(12):
print('Month %d rho: %f, %f' % (m,rho[m],c[m]))
# plot timeseries
plt.subplot(2,1,1)
plt.plot(Q)
plt.title('Historical')
plt.subplot(2,1,2)
plt.title('Synthetic')
plt.plot(Q_synthetic)
plt.ylim([0,2000])
plt.show()
# compare ACF/PACF in historical and synthetic
# from statsmodels.tsa import stattools
# plt.subplot(2,2,1)
# acf,ci = stattools.acf(Q, nlags = 12, alpha=0.05)
# plt.plot(acf, linewidth=2)
# plt.plot(ci, linestyle='dashed', color='0.5')
# plt.title('ACF, Historical')
# plt.subplot(2,2,2)
# pacf,ci = stattools.pacf(Q, nlags = 12, alpha=0.05)
# plt.plot(pacf, linewidth=2)
# plt.plot(ci, linestyle='dashed', color='0.5')
# plt.title('PACF, Historical')
# plt.subplot(2,2,3)
# acf,ci = stattools.acf(Q_synthetic, nlags = 12, alpha=0.05)
# plt.plot(acf, linewidth=2)
# plt.plot(ci, linestyle='dashed', color='0.5')
# plt.title('ACF, Synthetic')
# plt.subplot(2,2,4)
# pacf,ci = stattools.pacf(Q_synthetic, nlags = 12, alpha=0.05)
# plt.plot(pacf, linewidth=2)
# plt.plot(ci, linestyle='dashed', color='0.5')
# plt.title('PACF, Synthetic')
# plt.show()
```
|
{
"source": "jdherman/evolutionary-algorithms-course",
"score": 3
}
|
#### File: jdherman/evolutionary-algorithms-course/L7-GA-KP.py
```python
import numpy as np
import matplotlib.pyplot as plt
# 0-1 knapsack
def value(x, v, w, W):
# if weight is exceeded, value=0
if np.sum(x*w) > W:
return 0
else:
return np.sum(v*x)
d = 10 # dimension of decision variable space
num_seeds = 20
popsize = 80
pc = 0.9
pm = 0.1 # recommended 1/D
max_gen = 50
k = 2 # tournament size
ft = np.zeros((num_seeds, max_gen))
# knapsack problem P01
# Optimal solution = [1, 1, 1, 1, 0, 1, 0, 0, 0, 0]
# which has a weight of 165 and a value of 309
W = 165
w = np.array([23, 31, 29, 44, 53, 38, 63, 85, 89, 82])
v = np.array([92, 57, 49, 68, 60, 43, 67, 84, 87, 72])
# select 1 parent from population P
# using tournaments of size k
def tournament_selection(P,f,k):
candidates = np.random.randint(0,popsize,k)
best = f[candidates].argmax()
index = candidates[best]
return P[index,:]
# one-point crossover plus mutation
# input two parents and crossover probabilities
def cx_and_mut(P1,P2,pc,pm):
child1 = np.copy(P1)
child2 = np.copy(P2)
# one-point crossover
if np.random.rand() < pc:
x = np.random.randint(d)
child1[x:] = P2[x:]
child2[x:] = P1[x:]
# bit-flip mutation
for c in child1,child2:
for i in range(d):
if np.random.rand() < pm:
c[i] = 1 - c[i]
return child1,child2
# run the GA
for seed in range(num_seeds):
np.random.seed(seed)
# initialize
P = np.random.randint(0, 2, (popsize,d))
f = np.zeros(popsize) # we'll evaluate them later
gen = 0
f_best, x_best = None, None
while gen < max_gen:
# evaluate
for i,x in enumerate(P):
f[i] = value(x, v, w, W)
# keep track of best
if f_best is None or f.max() > f_best:
f_best = f.max()
x_best = P[f.argmax(),:]
# selection / crossover / mutation (following Luke Algorithm 20)
Q = np.copy(P)
for i in range(0, popsize, 2):
parent1 = tournament_selection(P,f,k)
parent2 = tournament_selection(P,f,k)
child1,child2 = cx_and_mut(parent1,parent2,pc,pm)
Q[i,:] = child1
Q[i+1,:] = child2
# new population of children
P = np.copy(Q)
ft[seed,gen] = f_best
gen += 1
# for each trial print the result (but the traces are saved in ft)
print(x_best)
print(f_best)
plt.plot(ft.T, color='steelblue', linewidth=1)
plt.xlabel('Generations')
plt.ylabel('Objective Value')
plt.show()
```
#### File: jdherman/evolutionary-algorithms-course/L9-hymod.py
```python
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import differential_evolution
# hymod as defined by Gharari et al. HESS 2013
# load daily data for 1 year (P,PET,Q)
ndays = 365
data = np.loadtxt('data/leaf-river-data.txt', skiprows=1)
data_P = data[0:ndays,3]
data_PET = data[0:ndays,4]
data_Q = data[0:ndays,5]
def hymod(x, mode='optimize'):
# assign parameters
Sm_max,B,alpha,Kf,Ks = list(x)
# initialize storage, all empty to start
Sm,Sf1,Sf2,Sf3,Ss1 = [np.zeros(ndays) for _ in range(5)]
Q = np.zeros(ndays)
for t in range(1,ndays):
# calculate all fluxes
P = data_P[t]
Peff = P*(1 - max(1-Sm[t-1]/Sm_max,0)**B) # PDM model Moore 1985
Evap = min(data_PET[t]*(Sm[t-1]/Sm_max), Sm[t-1])
Qf1 = Kf*Sf1[t-1]
Qf2 = Kf*Sf2[t-1]
Qf3 = Kf*Sf3[t-1]
Qs1 = Ks*Ss1[t-1]
# update state variables
Sm[t] = Sm[t-1] + P - Peff - Evap
Sf1[t] = Sf1[t-1] + alpha*Peff - Qf1
Sf2[t] = Sf2[t-1] + Qf1 - Qf2
Sf3[t] = Sf3[t-1] + Qf2 - Qf3
Ss1[t] = Ss1[t-1] + (1-alpha)*Peff - Qs1
Q[t] = Qs1 + Qf3
if mode=='simulate':
return Q
else:
return np.sqrt(np.mean((Q-data_Q)**2))
# parameter bounds
bounds = [(0,500), (0,2), (0,1), (0.1,1), (0,0.1)]
# x =[80, 0.7, 0.9, 0.7, 0.05]
result = differential_evolution(hymod, bounds=bounds, polish=False)
print(result)
# simulate with best to plot
Q = hymod(result.x, mode='simulate')
plt.plot(data_Q, color='k')
plt.plot(Q, color='red')
plt.xlabel('Days')
plt.ylabel('Streamflow (mm)')
plt.legend(['Simulated', 'Observed'])
plt.show()
```
|
{
"source": "jdherman/ssjrb",
"score": 3
}
|
#### File: jdherman/ssjrb/util.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numba import jit
cfs_to_afd = 2.29568411*10**-5 * 86400
def water_day(d):
return d - 274 if d >= 274 else d + 91
def cdec_build_url(station=None, sensor=None, duration=None, sd=None, ed=None):
url = 'http://cdec.water.ca.gov/dynamicapp/req/CSVDataServlet?'
url += 'Stations=%s' % station
url += '&SensorNums=%d' % sensor
url += '&dur_code=%s' % duration
url += '&Start=%s' % sd
url += '&End=%s' % ed
return url
# takes df from one (station, sensor) request
# converts to a series indexed by datetime
def cdec_reformat_series(df):
try:
# reindex by datetime
df['DATE TIME'] = pd.to_datetime(df['DATE TIME'])
df.set_index('DATE TIME', inplace=True)
df.index.rename('datetime', inplace=True)
# keep just the "VALUE" column and rename it
name = '%s_%s_%s' % (df['STATION_ID'][0], df['SENSOR_TYPE'][0], df['UNITS'][0])
df = df['VALUE']
df.rename(name, inplace=True)
except IndexError: #empty data frame causes indexerror
raise IndexError('Requested data does not exist')
return df
# gets data from a specific station and sensor type
def cdec_sensor_data(station=None, sensor=None, duration=None, sd=None, ed=None):
url = cdec_build_url(station, sensor, duration, sd, ed)
df = pd.read_csv(url)
series = cdec_reformat_series(df)
return series
# pull numpy arrays from dataframes
def get_simulation_data(res_keys, df_hydrology, medians, df_demand=None):
dowy = df_hydrology.dowy.values
Q = df_hydrology[[k+'_inflow_cfs' for k in res_keys]].values * cfs_to_afd
Q_avg = medians[[k+'_inflow_cfs' for k in res_keys]].values * cfs_to_afd
R_avg = medians[[k+'_outflow_cfs' for k in res_keys]].values * cfs_to_afd
S_avg = medians[[k+'_storage_af' for k in res_keys]].values
Gains_avg = medians['delta_gains_cfs'].values
Pump_pct_avg = medians['total_delta_pumping_pct'].values
if df_demand is None:
demand_multiplier = np.ones(dowy.size)
else:
demand_multiplier = df_demand.combined_demand.values
return (dowy, Q, Q_avg, R_avg, S_avg, Gains_avg, Pump_pct_avg, demand_multiplier)
# calculate annual objectives from simulation results
def results_to_annual_objectives(df, medians, nodes, rk, df_demand=None):
# 1. water supply reliability north of delta
NOD_target = pd.Series([np.max((-1*medians.delta_gains_cfs[i], 0.0)) for i in df.dowy], index=df.index)
NOD_delivery = (-1*df.delta_gains_cfs).clip(lower=0, upper=10**10)
if df_demand is not None: # account for demand multipliers in future scenarios
NOD_target *= df_demand.combined_demand
rel_N = NOD_delivery.resample('AS-OCT').sum() / NOD_target.resample('AS-OCT').sum()
# 2. water supply reliability south of delta
SOD_target = pd.Series([medians.total_delta_pumping_cfs[i] for i in df.dowy], index=df.index)
if df_demand is not None:
SOD_target *= df_demand.combined_demand
rel_S = df.total_delta_pumping_cfs.resample('AS-OCT').sum() / SOD_target.resample('AS-OCT').sum()
rel_S[rel_S > 1] = 1
# 3. flood volume exceeding downstream levee capacity, sum over all reservoirs
flood_vol_taf = pd.Series(0, index=df.index)
for k in rk:
flood_vol_taf += ((df[k+'_outflow_cfs'] - nodes[k]['safe_release_cfs'])
.clip(lower=0) * cfs_to_afd / 1000)
flood_vol_taf = flood_vol_taf.resample('AS-OCT').sum()
# 4. peak flow into the delta
delta_peak_cfs = df.delta_inflow_cfs.resample('AS-OCT').max()
objs = pd.concat([rel_N, rel_S, flood_vol_taf, delta_peak_cfs], axis=1)
objs.columns = ['Rel_NOD_%', 'Rel_SOD_%', 'Upstream_Flood_Volume_taf', 'Delta_Peak_Inflow_cfs']
return objs
```
|
{
"source": "jdheywood/sqlalchemy-orm-and-migrations-demo",
"score": 2
}
|
#### File: alembic/versions/06afa7d32b1a_initial_database_create.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '06afa7d32b1a'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('chumble',
sa.Column('id', sa.String(), nullable=False),
sa.Column('length', sa.Integer(), nullable=True),
sa.Column('diameter', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dinglepop',
sa.Column('id', sa.String(), nullable=False),
sa.Column('origin', sa.String(), nullable=True),
sa.Column('weight', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('fleeb',
sa.Column('id', sa.String(), nullable=False),
sa.Column('organic', sa.Boolean(), nullable=True),
sa.Column('picked_on', sa.TIMESTAMP(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('schleem',
sa.Column('id', sa.String(), nullable=False),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('width', sa.Integer(), nullable=True),
sa.Column('harvested_at', sa.TIMESTAMP(), nullable=True),
sa.Column('batch', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('schleem')
op.drop_table('fleeb')
op.drop_table('dinglepop')
op.drop_table('chumble')
# ### end Alembic commands ###
```
#### File: code/models/materials.py
```python
from sqlalchemy import (
Column,
Boolean,
Integer,
String,
TIMESTAMP,
)
from .base import Base
__all__ = ['Schleem', 'Dinglepop', 'Fleeb', 'Chumble']
class Schleem(Base):
__tablename__ = 'schleem'
id = Column(String, primary_key=True)
height = Column(Integer)
width = Column(Integer)
harvested_at = Column(TIMESTAMP)
batch = Column(String)
def __repr__(self):
return "<Schleem id='{}' harvested_at={}".format(
self.id, self.harvested_at)
class Dinglepop(Base):
__tablename__ = 'dinglepop'
id = Column(String, primary_key=True)
origin = Column(String)
weight = Column(Integer)
def __repr__(self):
return "<Dinglepop id='{}' origin={}".format(
self.id, self.origin)
class Fleeb(Base):
__tablename__ = 'fleeb'
id = Column(String, primary_key=True)
organic = Column(Boolean, default=True)
picked_on = Column(TIMESTAMP)
def __repr__(self):
return "<Fleeb id='{}' picked_on={}".format(
self.id, self.picked_on)
class Chumble(Base):
__tablename__ = 'chumble'
id = Column(String, primary_key=True)
length = Column(Integer)
diameter = Column(Integer)
def __repr__(self):
return "<Chumble id='{}' diameter={}".format(
self.id, self.diameter)
```
|
{
"source": "jdhogan/gagrank",
"score": 3
}
|
#### File: gagrank/gagrank/gagrank.py
```python
print "Importing modules...",
import time # for timing functions
start_time = time.time()
import re # for converting chemical formulae to dictionaries and vice versa
import sys # for getting user inputs
import argparse # for getting user inputs
import sqlite3 as sq # for accessing the database
import networkx as nx # for network analysis
import numpy as np # for numeric arrays
import scipy as sp # for handling degree matrices
from scipy.stats import rankdata # for getting actual GAG ranking
print "Done!"
## arrays for info about GAG parts
# initialize dictionaries for weights and formulae
wt = {}
fm = {}
# monoisotopic element weights
wt['monoH'] = 1.00782504
wt['monoC'] = 12.0
wt['monoO'] = 15.99491461956
wt['monoN'] = 14.0030740048
wt['monoS'] = 31.97207100
# monoisotopic compound weights
wt['monoHex'] = 6*wt['monoC'] + 12*wt['monoH'] + 6*wt['monoO']
wt['monoHexA'] = 6*wt['monoC'] + 10*wt['monoH'] + 7*wt['monoO']
wt['monoHexN'] = 6*wt['monoC'] + 13*wt['monoH'] + wt['monoN'] + 5*wt['monoO']
wt['monodHexA'] = 6*wt['monoC'] + 8*wt['monoH'] + 6*wt['monoO']
wt['monoH2O'] = 2*wt['monoH'] + wt['monoO']
wt['monoSO3'] = wt['monoS'] + 3*wt['monoO']
wt['monoAc'] = 2*wt['monoC'] + 2*wt['monoH'] + wt['monoO']
# formulae
fm['Hex'] = {'C':6, 'H':12, 'O':6, 'N':0, 'S':0}
fm['HexA'] = {'C':6, 'H':10, 'O':7, 'N':0, 'S':0}
fm['HexN'] = {'C':6, 'H':13, 'O':5, 'N':1, 'S':0}
fm['dHexA'] = {'C':6, 'H':8, 'O':6, 'N':0, 'S':0}
fm['H2O'] = {'C':0, 'H':2, 'O':1, 'N':0, 'S':0}
fm['SO3'] = {'C':0, 'H':0, 'O':3, 'N':0, 'S':1}
fm['Ac'] = {'C':2, 'H':2, 'O':1, 'N':0, 'S':0}
### modification locations
# initialize dictionary
modlocs = {}
modlocs['HS'] = {'D':{}, 'U':{}, 'N':{}}
modlocs['CS'] = {'D':{}, 'U':{}, 'N':{}}
modlocs['KS'] = {'X':{}, 'N':{}}
# Acetyl
modlocs['HS']['N'] = {'Ac': [2]}
modlocs['CS']['N'] = {'Ac': [2]}
modlocs['KS']['N'] = {'Ac': [2]}
# Sulfate
modlocs['HS']['D']['SO3'] = [2]
modlocs['HS']['U']['SO3'] = [2]
modlocs['HS']['N']['SO3'] = [2,3,6]
modlocs['CS']['D']['SO3'] = [2]
modlocs['CS']['U']['SO3'] = [2]
modlocs['CS']['N']['SO3'] = [4,6]
modlocs['KS']['X']['SO3'] = [6]
modlocs['KS']['N']['SO3'] = [6]
### cross-ring fragments ###
# initialize dictionaries for weights and formulae
xwt = {}
xfm = {}
xmod = {}
### cross-ring formulae ###
## HexA
xfm['HexA'] = {}
# initialize dictionaries for non-reducing end and reducing end
xfm['HexA']['NR'] = {}
xfm['HexA']['RE'] = {}
# add fragments
xfm['HexA']['NR']['0,2'] = {'C':4, 'H':6, 'O':5, 'N':0, 'S':0}
xfm['HexA']['NR']['1,3'] = {'C':2, 'H':4, 'O':2, 'N':0, 'S':0}
xfm['HexA']['NR']['1,5'] = {'C':5, 'H':8, 'O':5, 'N':0, 'S':0}
xfm['HexA']['NR']['2,4'] = {'C':2, 'H':4, 'O':2, 'N':0, 'S':0}
xfm['HexA']['NR']['3,5'] = {'C':3, 'H':4, 'O':3, 'N':0, 'S':0}
xfm['HexA']['NR']['0,3'] = {'C':3, 'H':4, 'O':4, 'N':0, 'S':0}
xfm['HexA']['NR']['1,4'] = {'C':3, 'H':6, 'O':3, 'N':0, 'S':0}
xfm['HexA']['NR']['2,5'] = {'C':4, 'H':6, 'O':4, 'N':0, 'S':0}
xfm['HexA']['RE']['0,2'] = {'C':2, 'H':4, 'O':2, 'N':0, 'S':0}
xfm['HexA']['RE']['1,3'] = {'C':4, 'H':6, 'O':5, 'N':0, 'S':0}
xfm['HexA']['RE']['1,5'] = {'C':1, 'H':2, 'O':2, 'N':0, 'S':0}
xfm['HexA']['RE']['2,4'] = {'C':4, 'H':6, 'O':5, 'N':0, 'S':0}
xfm['HexA']['RE']['3,5'] = {'C':3, 'H':6, 'O':4, 'N':0, 'S':0}
xfm['HexA']['RE']['0,3'] = {'C':3, 'H':6, 'O':3, 'N':0, 'S':0}
xfm['HexA']['RE']['1,4'] = {'C':3, 'H':4, 'O':4, 'N':0, 'S':0}
xfm['HexA']['RE']['2,5'] = {'C':2, 'H':4, 'O':3, 'N':0, 'S':0}
## HexA
xfm['dHexA'] = {}
# initialize dictionaries for non-reducing end and reducing end
xfm['dHexA']['RE'] = {}
# add fragments
xfm['dHexA']['RE']['0,2'] = {'C':2, 'H':4, 'O':2, 'N':0, 'S':0}
xfm['dHexA']['RE']['1,3'] = {'C':4, 'H':4, 'O':4, 'N':0, 'S':0}
xfm['dHexA']['RE']['1,5'] = {'C':1, 'H':2, 'O':2, 'N':0, 'S':0}
xfm['dHexA']['RE']['2,4'] = {'C':4, 'H':5, 'O':5, 'N':0, 'S':0}
xfm['dHexA']['RE']['3,5'] = {'C':3, 'H':6, 'O':4, 'N':0, 'S':0}
xfm['dHexA']['RE']['0,3'] = {'C':3, 'H':6, 'O':3, 'N':0, 'S':0}
xfm['dHexA']['RE']['1,4'] = {'C':3, 'H':3, 'O':4, 'N':0, 'S':0}
xfm['dHexA']['RE']['2,5'] = {'C':2, 'H':4, 'O':3, 'N':0, 'S':0}
## HexN
xfm['HexN'] = {}
# initialize dictionaries for non-reducing end and reducing end
xfm['HexN']['NR'] = {}
xfm['HexN']['RE'] = {}
# add fragments
xfm['HexN']['NR']['0,2'] = {'C':4, 'H':8, 'O':4, 'N':0, 'S':0}
xfm['HexN']['NR']['1,3'] = {'C':2, 'H':5, 'O':1, 'N':1, 'S':0}
xfm['HexN']['NR']['1,5'] = {'C':5, 'H':11, 'O':3, 'N':1, 'S':0}
xfm['HexN']['NR']['2,4'] = {'C':2, 'H':4, 'O':2, 'N':0, 'S':0}
xfm['HexN']['NR']['3,5'] = {'C':3, 'H':6, 'O':2, 'N':0, 'S':0}
xfm['HexN']['NR']['0,3'] = {'C':3, 'H':7, 'O':2, 'N':1, 'S':0}
xfm['HexN']['NR']['1,4'] = {'C':3, 'H':7, 'O':2, 'N':1, 'S':0}
xfm['HexN']['NR']['2,5'] = {'C':4, 'H':5, 'O':3, 'N':0, 'S':0}
xfm['HexN']['RE']['0,2'] = {'C':2, 'H':5, 'O':1, 'N':1, 'S':0}
xfm['HexN']['RE']['1,3'] = {'C':4, 'H':8, 'O':4, 'N':0, 'S':0}
xfm['HexN']['RE']['1,5'] = {'C':1, 'H':2, 'O':2, 'N':0, 'S':0}
xfm['HexN']['RE']['2,4'] = {'C':4, 'H':9, 'O':3, 'N':1, 'S':0}
xfm['HexN']['RE']['3,5'] = {'C':3, 'H':7, 'O':3, 'N':1, 'S':0}
xfm['HexN']['RE']['0,3'] = {'C':3, 'H':6, 'O':3, 'N':0, 'S':0}
xfm['HexN']['RE']['1,4'] = {'C':3, 'H':6, 'O':3, 'N':0, 'S':0}
xfm['HexN']['RE']['2,5'] = {'C':2, 'H':5, 'O':2, 'N':1, 'S':0}
## Hex
xfm['Hex'] = {}
# initialize dictionaries for non-reducing end and reducing end
xfm['Hex']['NR'] = {}
xfm['Hex']['RE'] = {}
# add fragments
xfm['Hex']['NR']['0,2'] = {'C':4, 'H':8, 'O':4, 'N':0, 'S':0}
xfm['Hex']['NR']['1,3'] = {'C':2, 'H':4, 'O':2, 'N':0, 'S':0}
xfm['Hex']['NR']['1,5'] = {'C':5, 'H':10, 'O':4, 'N':0, 'S':0}
xfm['Hex']['NR']['2,4'] = {'C':2, 'H':4, 'O':2, 'N':0, 'S':0}
xfm['Hex']['NR']['1,4'] = {'C':3, 'H':6, 'O':3, 'N':0, 'S':0}
xfm['Hex']['NR']['2,5'] = {'C':4, 'H':8, 'O':3, 'N':0, 'S':0}
xfm['Hex']['RE']['0,2'] = {'C':2, 'H':4, 'O':2, 'N':0, 'S':0}
xfm['Hex']['RE']['1,3'] = {'C':4, 'H':8, 'O':4, 'N':0, 'S':0}
xfm['Hex']['RE']['1,5'] = {'C':1, 'H':2, 'O':2, 'N':0, 'S':0}
xfm['Hex']['RE']['2,4'] = {'C':4, 'H':8, 'O':4, 'N':0, 'S':0}
xfm['Hex']['RE']['1,4'] = {'C':3, 'H':6, 'O':3, 'N':0, 'S':0}
xfm['Hex']['RE']['2,5'] = {'C':2, 'H':4, 'O':3, 'N':0, 'S':0}
### cross-ring weights ###
## HexA
xwt['HexA'] = {}
# initialize dictionaries for non-reducing end and reducing end
xwt['HexA']['NR'] = {}
xwt['HexA']['RE'] = {}
# add weights
xwt['HexA']['NR']['0,2'] = 4*wt['monoC'] + 6*wt['monoH'] + 5*wt['monoO']
xwt['HexA']['NR']['1,3'] = 2*wt['monoC'] + 4*wt['monoH'] + 2*wt['monoO']
xwt['HexA']['NR']['1,5'] = 5*wt['monoC'] + 8*wt['monoH'] + 5*wt['monoO']
xwt['HexA']['NR']['2,4'] = 2*wt['monoC'] + 4*wt['monoH'] + 2*wt['monoO']
xwt['HexA']['NR']['3,5'] = 3*wt['monoC'] + 4*wt['monoH'] + 3*wt['monoO']
xwt['HexA']['NR']['0,3'] = 3*wt['monoC'] + 4*wt['monoH'] + 4*wt['monoO']
xwt['HexA']['NR']['1,4'] = 3*wt['monoC'] + 6*wt['monoH'] + 3*wt['monoO']
xwt['HexA']['NR']['2,5'] = 4*wt['monoC'] + 6*wt['monoH'] + 4*wt['monoO']
xwt['HexA']['RE']['0,2'] = 2*wt['monoC'] + 4*wt['monoH'] + 2*wt['monoO']
xwt['HexA']['RE']['1,3'] = 4*wt['monoC'] + 6*wt['monoH'] + 5*wt['monoO']
xwt['HexA']['RE']['1,5'] = 1*wt['monoC'] + 2*wt['monoH'] + 2*wt['monoO']
xwt['HexA']['RE']['2,4'] = 4*wt['monoC'] + 6*wt['monoH'] + 5*wt['monoO']
xwt['HexA']['RE']['3,5'] = 3*wt['monoC'] + 6*wt['monoH'] + 4*wt['monoO']
xwt['HexA']['RE']['0,3'] = 3*wt['monoC'] + 6*wt['monoH'] + 3*wt['monoO']
xwt['HexA']['RE']['1,4'] = 3*wt['monoC'] + 4*wt['monoH'] + 4*wt['monoO']
xwt['HexA']['RE']['2,5'] = 2*wt['monoC'] + 4*wt['monoH'] + 3*wt['monoO']
## dHexA
xwt['dHexA'] = {}
# initialize dictionary for non-reducing end and reducing end
xwt['dHexA']['RE'] = {}
# add weights
xwt['dHexA']['RE']['0,2'] = 2*wt['monoC'] + 4*wt['monoH'] + 2*wt['monoO']
xwt['dHexA']['RE']['1,3'] = 4*wt['monoC'] + 4*wt['monoH'] + 4*wt['monoO']
xwt['dHexA']['RE']['1,5'] = wt['monoC'] + 2*wt['monoH'] + 2*wt['monoO']
xwt['dHexA']['RE']['2,4'] = 4*wt['monoC'] + 5*wt['monoH'] + 5*wt['monoO']
xwt['dHexA']['RE']['3,5'] = 3*wt['monoC'] + 6*wt['monoH'] + 4*wt['monoO']
xwt['dHexA']['RE']['0,3'] = 3*wt['monoC'] + 6*wt['monoH'] + 3*wt['monoO']
xwt['dHexA']['RE']['1,4'] = 3*wt['monoC'] + 3*wt['monoH'] + 4*wt['monoO']
xwt['dHexA']['RE']['2,5'] = 2*wt['monoC'] + 4*wt['monoH'] + 3*wt['monoO']
## HexN
xwt['HexN'] = {}
# initialize dictionaries for non-reducing end and reducing end
xwt['HexN']['NR'] = {}
xwt['HexN']['RE'] = {}
# add weights
xwt['HexN']['NR']['0,2'] = 4*wt['monoC'] + 8*wt['monoH'] + 4*wt['monoO']
xwt['HexN']['NR']['1,3'] = 2*wt['monoC'] + 5*wt['monoH'] + wt['monoO'] + wt['monoN']
xwt['HexN']['NR']['1,5'] = 5*wt['monoC'] + 11*wt['monoH'] + 3*wt['monoO'] + wt['monoN']
xwt['HexN']['NR']['2,4'] = 2*wt['monoC'] + 4*wt['monoH'] + 2*wt['monoO']
xwt['HexN']['NR']['3,5'] = 3*wt['monoC'] + 6*wt['monoH'] + 2*wt['monoO']
xwt['HexN']['NR']['0,3'] = 3*wt['monoC'] + 7*wt['monoH'] + 2*wt['monoO'] + wt['monoN']
xwt['HexN']['NR']['1,4'] = 3*wt['monoC'] + 7*wt['monoH'] + 2*wt['monoO'] + wt['monoN']
xwt['HexN']['NR']['2,5'] = 4*wt['monoC'] + 5*wt['monoH'] + 3*wt['monoO']
xwt['HexN']['RE']['0,2'] = 2*wt['monoC'] + 5*wt['monoH'] + 1*wt['monoO'] + wt['monoN']
xwt['HexN']['RE']['1,3'] = 4*wt['monoC'] + 8*wt['monoH'] + 4*wt['monoO']
xwt['HexN']['RE']['1,5'] = 1*wt['monoC'] + 2*wt['monoH'] + 2*wt['monoO']
xwt['HexN']['RE']['2,4'] = 4*wt['monoC'] + 9*wt['monoH'] + 3*wt['monoO'] + wt['monoN']
xwt['HexN']['RE']['3,5'] = 3*wt['monoC'] + 7*wt['monoH'] + 3*wt['monoO'] + wt['monoN']
xwt['HexN']['RE']['0,3'] = 3*wt['monoC'] + 6*wt['monoH'] + 3*wt['monoO']
xwt['HexN']['RE']['1,4'] = 3*wt['monoC'] + 6*wt['monoH'] + 3*wt['monoO']
xwt['HexN']['RE']['2,5'] = 2*wt['monoC'] + 5*wt['monoH'] + 2*wt['monoO'] + wt['monoN']
## Hex
xwt['Hex'] = {}
# initialize dictionaries for non-reducing end and reducing end
xwt['Hex']['NR'] = {}
xwt['Hex']['RE'] = {}
# add fragments
xwt['Hex']['NR']['0,2'] = 4*wt['monoC'] + 8*wt['monoH'] + 4*wt['monoO']
xwt['Hex']['NR']['1,3'] = 2*wt['monoC'] + 4*wt['monoH'] + 2*wt['monoO']
xwt['Hex']['NR']['1,5'] = 5*wt['monoC'] + 10*wt['monoH'] + 4*wt['monoO']
xwt['Hex']['NR']['2,4'] = 2*wt['monoC'] + 4*wt['monoH'] + 2*wt['monoO']
xwt['Hex']['NR']['1,4'] = 3*wt['monoC'] + 6*wt['monoH'] + 3*wt['monoO']
xwt['Hex']['NR']['2,5'] = 4*wt['monoC'] + 8*wt['monoH'] + 3*wt['monoO']
xwt['Hex']['RE']['0,2'] = 2*wt['monoC'] + 4*wt['monoH'] + 2*wt['monoO']
xwt['Hex']['RE']['1,3'] = 4*wt['monoC'] + 8*wt['monoH'] + 4*wt['monoO']
xwt['Hex']['RE']['1,5'] = wt['monoC'] + 2*wt['monoH'] + 2*wt['monoO']
xwt['Hex']['RE']['2,4'] = 4*wt['monoC'] + 8*wt['monoH'] + 4*wt['monoO']
xwt['Hex']['RE']['1,4'] = 3*wt['monoC'] + 6*wt['monoH'] + 3*wt['monoO']
xwt['Hex']['RE']['2,5'] = 2*wt['monoC'] + 4*wt['monoH'] + 3*wt['monoO']
### cross-ring sulfation/acetylation/adduct possibilities, and COOH places
## HS/Heparin
xmod['HS'] = {}
# HexA
xmod['HS']['HexA'] = {}
# initialize dictionaries for non-reducing end and reducing end
xmod['HS']['HexA']['NR'] = {}
xmod['HS']['HexA']['RE'] = {}
# add modification possibilities
xmod['HS']['HexA']['NR']['0,2'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['HS']['HexA']['NR']['1,5'] = {'SO3':1, 'Ac':0, 'COOH':1}
xmod['HS']['HexA']['NR']['2,4'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['HS']['HexA']['NR']['3,5'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['HS']['HexA']['NR']['0,3'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['HS']['HexA']['NR']['1,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['HexA']['NR']['2,5'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['HS']['HexA']['RE']['0,2'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['HexA']['RE']['1,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['HS']['HexA']['RE']['2,4'] = {'SO3':1, 'Ac':0, 'COOH':1}
xmod['HS']['HexA']['RE']['3,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['HexA']['RE']['0,3'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['HexA']['RE']['1,4'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['HS']['HexA']['RE']['2,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
# dHexA
xmod['HS']['dHexA'] = {}
# initialize dictionary for non-reducing end and reducing end
xmod['HS']['dHexA']['RE'] = {}
# add weights
xmod['HS']['dHexA']['RE']['0,2'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['dHexA']['RE']['1,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['HS']['dHexA']['RE']['2,4'] = {'SO3':1, 'Ac':0, 'COOH':1}
xmod['HS']['dHexA']['RE']['3,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['dHexA']['RE']['0,3'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['dHexA']['RE']['1,4'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['HS']['dHexA']['RE']['2,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
# HexN
xmod['HS']['HexN'] = {}
# initialize dictionaries for non-reducing end and reducing end
xmod['HS']['HexN']['NR'] = {}
xmod['HS']['HexN']['RE'] = {}
# add modification possibilities
xmod['HS']['HexN']['NR']['0,2'] = {'SO3':2, 'Ac':0, 'COOH':0}
xmod['HS']['HexN']['NR']['1,5'] = {'SO3':3, 'Ac':1, 'COOH':0}
xmod['HS']['HexN']['NR']['2,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['HexN']['NR']['3,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['HexN']['NR']['0,3'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['HexN']['NR']['1,4'] = {'SO3':2, 'Ac':1, 'COOH':0}
xmod['HS']['HexN']['NR']['2,5'] = {'SO3':2, 'Ac':0, 'COOH':0}
xmod['HS']['HexN']['RE']['0,2'] = {'SO3':1, 'Ac':1, 'COOH':0}
xmod['HS']['HexN']['RE']['1,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['HS']['HexN']['RE']['2,4'] = {'SO3':2, 'Ac':1, 'COOH':0}
xmod['HS']['HexN']['RE']['3,5'] = {'SO3':2, 'Ac':1, 'COOH':0}
xmod['HS']['HexN']['RE']['0,3'] = {'SO3':2, 'Ac':1, 'COOH':0}
xmod['HS']['HexN']['RE']['1,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['HS']['HexN']['RE']['2,5'] = {'SO3':1, 'Ac':1, 'COOH':0}
## CS/DS
xmod['CS'] = {}
# HexA
xmod['CS']['HexA'] = {}
# initialize dictionaries for non-reducing end and reducing end
xmod['CS']['HexA']['NR'] = {}
xmod['CS']['HexA']['RE'] = {}
# add modification possibilities
xmod['CS']['HexA']['NR']['0,2'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['CS']['HexA']['NR']['1,5'] = {'SO3':1, 'Ac':0, 'COOH':1}
xmod['CS']['HexA']['NR']['2,4'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['CS']['HexA']['NR']['3,5'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['CS']['HexA']['NR']['0,3'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['CS']['HexA']['NR']['1,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['HexA']['NR']['2,5'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['CS']['HexA']['RE']['0,2'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['HexA']['RE']['1,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['CS']['HexA']['RE']['2,4'] = {'SO3':1, 'Ac':0, 'COOH':1}
xmod['CS']['HexA']['RE']['3,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['HexA']['RE']['0,3'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['HexA']['RE']['1,4'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['CS']['HexA']['RE']['2,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
# dHexA
xmod['CS']['dHexA'] = {}
# initialize dictionary for non-reducing end and reducing end
xmod['CS']['dHexA']['RE'] = {}
# add weights
xmod['CS']['dHexA']['RE']['0,2'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['dHexA']['RE']['1,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['CS']['dHexA']['RE']['2,4'] = {'SO3':1, 'Ac':0, 'COOH':1}
xmod['CS']['dHexA']['RE']['3,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['dHexA']['RE']['0,3'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['dHexA']['RE']['1,4'] = {'SO3':0, 'Ac':0, 'COOH':1}
xmod['CS']['dHexA']['RE']['2,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
# HexN
xmod['CS']['HexN'] = {}
# initialize dictionaries for non-reducing end and reducing end
xmod['CS']['HexN']['NR'] = {}
xmod['CS']['HexN']['RE'] = {}
# add modification possibilities
xmod['CS']['HexN']['NR']['0,2'] = {'SO3':2, 'Ac':0, 'COOH':0}
xmod['CS']['HexN']['NR']['1,3'] = {'SO3':2, 'Ac':0, 'COOH':0}
xmod['CS']['HexN']['NR']['1,5'] = {'SO3':2, 'Ac':1, 'COOH':0}
xmod['CS']['HexN']['NR']['2,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['HexN']['NR']['1,4'] = {'SO3':1, 'Ac':1, 'COOH':0}
xmod['CS']['HexN']['NR']['2,5'] = {'SO3':2, 'Ac':0, 'COOH':0}
xmod['CS']['HexN']['RE']['0,2'] = {'SO3':0, 'Ac':1, 'COOH':0}
xmod['CS']['HexN']['RE']['1,3'] = {'SO3':0, 'Ac':1, 'COOH':0}
xmod['CS']['HexN']['RE']['1,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['CS']['HexN']['RE']['2,4'] = {'SO3':1, 'Ac':1, 'COOH':0}
xmod['CS']['HexN']['RE']['1,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['CS']['HexN']['RE']['2,5'] = {'SO3':0, 'Ac':1, 'COOH':0}
## HS
xmod['KS'] = {}
# Hex
xmod['KS']['Hex'] = {}
# initialize dictionaries for non-reducing end and reducing end
xmod['KS']['Hex']['NR'] = {}
xmod['KS']['Hex']['RE'] = {}
# add modification possibilities
xmod['KS']['Hex']['NR']['0,2'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['NR']['1,3'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['NR']['1,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['NR']['2,4'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['NR']['1,4'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['NR']['2,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['RE']['0,2'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['RE']['1,3'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['RE']['1,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['RE']['2,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['RE']['1,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['Hex']['RE']['2,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
# HexN
xmod['KS']['HexN'] = {}
# initialize dictionaries for non-reducing end and reducing end
xmod['KS']['HexN']['NR'] = {}
xmod['KS']['HexN']['RE'] = {}
# add modification possibilities
xmod['KS']['HexN']['NR']['0,2'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['HexN']['NR']['1,5'] = {'SO3':1, 'Ac':1, 'COOH':0}
xmod['KS']['HexN']['NR']['2,4'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['KS']['HexN']['NR']['3,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['HexN']['NR']['0,3'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['HexN']['NR']['1,4'] = {'SO3':0, 'Ac':1, 'COOH':0}
xmod['KS']['HexN']['NR']['2,5'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['HexN']['RE']['0,2'] = {'SO3':0, 'Ac':1, 'COOH':0}
xmod['KS']['HexN']['RE']['1,5'] = {'SO3':0, 'Ac':0, 'COOH':0}
xmod['KS']['HexN']['RE']['2,4'] = {'SO3':1, 'Ac':1, 'COOH':0}
xmod['KS']['HexN']['RE']['3,5'] = {'SO3':0, 'Ac':1, 'COOH':0}
xmod['KS']['HexN']['NR']['0,3'] = {'SO3':0, 'Ac':1, 'COOH':0}
xmod['KS']['HexN']['NR']['1,4'] = {'SO3':1, 'Ac':0, 'COOH':0}
xmod['KS']['HexN']['NR']['2,5'] = {'SO3':0, 'Ac':1, 'COOH':0}
### cross-ring modification locations
# initialize dictionaries
xmodlocs = {}
xmodlocs['HS'] = {'D':{'NR':{}, 'RE':{}}, 'U':{'NR':{}, 'RE':{}}, 'N':{'NR':{}, 'RE':{}}}
xmodlocs['CS'] = {'D':{'NR':{}, 'RE':{}}, 'U':{'NR':{}, 'RE':{}}, 'N':{'NR':{}, 'RE':{}}}
xmodlocs['KS'] = {'X':{'NR':{}, 'RE':{}}, 'N':{'NR':{}, 'RE':{}}}
## HS
# dHexA
xmodlocs['HS']['D']['NR']['0,2'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['D']['NR']['1,5'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['D']['NR']['2,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['D']['NR']['3,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['D']['NR']['0,3'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['D']['NR']['1,4'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['D']['NR']['2,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['D']['RE']['0,2'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['D']['RE']['1,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['D']['RE']['2,4'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['D']['RE']['3,5'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['D']['RE']['0,3'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['D']['RE']['1,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['D']['RE']['2,5'] = {'SO3':[2], 'Ac':[]}
# HexA
xmodlocs['HS']['U']['NR']['0,2'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['U']['NR']['1,5'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['U']['NR']['2,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['U']['NR']['3,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['U']['NR']['0,3'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['U']['NR']['1,4'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['U']['NR']['2,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['U']['RE']['0,2'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['U']['RE']['1,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['U']['RE']['2,4'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['U']['RE']['3,5'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['U']['RE']['0,3'] = {'SO3':[2], 'Ac':[]}
xmodlocs['HS']['U']['RE']['1,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['U']['RE']['2,5'] = {'SO3':[2], 'Ac':[]}
# HexN
xmodlocs['HS']['N']['NR']['0,2'] = {'SO3':[3,6], 'Ac':[]}
xmodlocs['HS']['N']['NR']['1,5'] = {'SO3':[2,3,6], 'Ac':[2]}
xmodlocs['HS']['N']['NR']['2,4'] = {'SO3':[3], 'Ac':[]}
xmodlocs['HS']['N']['NR']['3,5'] = {'SO3':[6], 'Ac':[]}
xmodlocs['HS']['N']['NR']['0,3'] = {'SO3':[6], 'Ac':[]}
xmodlocs['HS']['N']['NR']['1,4'] = {'SO3':[2,3], 'Ac':[2]}
xmodlocs['HS']['N']['NR']['2,5'] = {'SO3':[3,6], 'Ac':[]}
xmodlocs['HS']['N']['RE']['0,2'] = {'SO3':[2], 'Ac':[2]}
xmodlocs['HS']['N']['RE']['1,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['HS']['N']['RE']['2,4'] = {'SO3':[2,6], 'Ac':[2]}
xmodlocs['HS']['N']['RE']['3,5'] = {'SO3':[2,3], 'Ac':[2]}
xmodlocs['HS']['N']['RE']['0,3'] = {'SO3':[2,3], 'Ac':[2]}
xmodlocs['HS']['N']['RE']['1,4'] = {'SO3':[6], 'Ac':[]}
xmodlocs['HS']['N']['RE']['2,5'] = {'SO3':[2], 'Ac':[2]}
## CS/DS
# dHexA
xmodlocs['CS']['D']['NR']['0,2'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['D']['NR']['1,5'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['D']['NR']['2,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['D']['NR']['3,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['D']['NR']['0,3'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['D']['NR']['1,4'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['D']['NR']['2,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['D']['RE']['0,2'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['D']['RE']['1,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['D']['RE']['2,4'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['D']['RE']['3,5'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['D']['RE']['0,3'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['D']['RE']['1,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['D']['RE']['2,5'] = {'SO3':[2], 'Ac':[]}
# HexA
xmodlocs['CS']['U']['NR']['0,2'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['U']['NR']['1,5'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['U']['NR']['2,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['U']['NR']['3,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['U']['NR']['0,3'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['U']['NR']['1,4'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['U']['NR']['2,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['U']['RE']['0,2'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['U']['RE']['1,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['U']['RE']['2,4'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['U']['RE']['3,5'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['U']['RE']['0,3'] = {'SO3':[2], 'Ac':[]}
xmodlocs['CS']['U']['RE']['1,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['U']['RE']['2,5'] = {'SO3':[2], 'Ac':[]}
# HexN
xmodlocs['CS']['N']['NR']['0,2'] = {'SO3':[4,6], 'Ac':[]}
xmodlocs['CS']['N']['NR']['1,3'] = {'SO3':[], 'Ac':[2]}
xmodlocs['CS']['N']['NR']['1,5'] = {'SO3':[4,6], 'Ac':[2]}
xmodlocs['CS']['N']['NR']['2,4'] = {'SO3':[4], 'Ac':[]}
xmodlocs['CS']['N']['NR']['1,4'] = {'SO3':[4], 'Ac':[2]}
xmodlocs['CS']['N']['NR']['2,5'] = {'SO3':[4,6], 'Ac':[]}
xmodlocs['CS']['N']['RE']['0,2'] = {'SO3':[], 'Ac':[2]}
xmodlocs['CS']['N']['RE']['1,3'] = {'SO3':[4,6], 'Ac':[]}
xmodlocs['CS']['N']['RE']['1,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['CS']['N']['RE']['2,4'] = {'SO3':[6], 'Ac':[2]}
xmodlocs['CS']['N']['RE']['1,4'] = {'SO3':[6], 'Ac':[]}
xmodlocs['CS']['N']['RE']['2,5'] = {'SO3':[], 'Ac':[2]}
## KS
# HexA
xmodlocs['KS']['X']['NR']['0,2'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['X']['NR']['1,3'] = {'SO3':[], 'Ac':[]}
xmodlocs['KS']['X']['NR']['1,5'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['X']['NR']['2,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['KS']['X']['NR']['1,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['KS']['X']['NR']['2,5'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['X']['RE']['0,2'] = {'SO3':[], 'Ac':[]}
xmodlocs['KS']['X']['RE']['1,3'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['X']['RE']['1,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['KS']['X']['RE']['2,4'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['X']['RE']['1,4'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['X']['RE']['2,5'] = {'SO3':[], 'Ac':[]}
# HexN
xmodlocs['KS']['N']['NR']['0,2'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['N']['NR']['1,5'] = {'SO3':[6], 'Ac':[2]}
xmodlocs['KS']['N']['NR']['2,4'] = {'SO3':[], 'Ac':[]}
xmodlocs['KS']['N']['NR']['3,5'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['N']['NR']['0,3'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['N']['NR']['1,4'] = {'SO3':[], 'Ac':[2]}
xmodlocs['KS']['N']['NR']['2,5'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['N']['RE']['0,2'] = {'SO3':[], 'Ac':[2]}
xmodlocs['KS']['N']['RE']['1,5'] = {'SO3':[], 'Ac':[]}
xmodlocs['KS']['N']['RE']['2,4'] = {'SO3':[6], 'Ac':[2]}
xmodlocs['KS']['N']['RE']['3,5'] = {'SO3':[], 'Ac':[2]}
xmodlocs['KS']['N']['RE']['0,3'] = {'SO3':[], 'Ac':[2]}
xmodlocs['KS']['N']['RE']['1,4'] = {'SO3':[6], 'Ac':[]}
xmodlocs['KS']['N']['RE']['2,5'] = {'SO3':[], 'Ac':[2]}
# scaling factors
p1 = 5.4 # for scaling edge width
p2 = 5.1 # for scaling fragments' prior probabilities
p3 = 0.4 # for scaling sequences' prior probabilities
# variable for debugging
debug = False
### CLASSES AND FUNCTIONS ###
# dict for going between monosaccharide abbreviations
ms_hash = {'D': 'dHexA', 'U': 'HexA', 'X': 'Hex', 'N': 'HexN', 'dHexA': 'D', 'HexA': 'U', 'Hex': 'X', 'HexN': 'N'}
# function for converting dictionary to chemical formula or composition
def dict2fmla (dt, type):
fs = '' # formula string
# check whether chemical formula or composition
if type == 'formula':
symbols = elems
elif type == 'composition':
symbols = ['D', 'U', 'X', 'N', 'A', 'S']
else:
print "Incorrect type entered. Please enter either 'formula' or 'composition'."
sys.exit()
for sym in symbols:
if sym in dt:
if dt[sym] > 0:
if dt[sym] > 1:
fs += sym + str(dt[sym])
else:
fs += sym
# return
return fs
# function for converting chemical formula or composition to dictionary
def fmla2dict (fm, type):
# check whether chemical formula or composition
if type == 'formula':
symbols = elems
dt = {'C':0, 'H':0, 'O':0, 'N':0, 'S':0} #, 'Na':0, 'K':0, 'Li':0, 'Mg':0, 'Ca':0}
elif type == 'composition':
symbols = ['D', 'U', 'X', 'N', 'A', 'S']
dt = {'D':0, 'U':0, 'X':0, 'N':0, 'A':0, 'S':0}
else:
print "Incorrect type entered. Please enter either 'formula' or 'composition'."
sys.exit()
parts = re.findall(r'([A-Z][a-z]*)(\d*)', fm.upper()) # split formula by symbol
for q in parts:
if q[0] not in dt: # invalid symbol entered
if q[0] not in symbols:
print "Invalid chemical formula entered."
sys.exit()
else:
dt[q[0]] = 0
if q[1] == '': # only one of this atom
dt[q[0]] += 1
else:
dt[q[0]] += int(q[1])
# return
return dt
# function for shuffling
def choose_iter(elements, length):
for i in xrange(len(elements)):
if length == 1:
yield (elements[i],)
else:
for next in choose_iter(elements[i+1:len(elements)], length-1):
yield (elements[i],) + next
# function for getting a shuffled list
def choose(l, k):
return list(choose_iter(l, k))
# function for getting the reducing and non-reducing end info
def get_ends(pd, gag_class):
n = pd['D'] + pd['U'] + pd['X'] + pd['N'] # length of GAG
# check if dHexA exists (has to be NR end)
if pd['D'] > 0:
nonred = 'D'
if pd['U'] == pd['N']: # we know that the reducing end is HexA because (HexA+dHexA > HexN)
redend = 'U'
else: # we know that the reducing end is HexN because (HexA+dHexA == HexN)
redend = 'N'
else:
if gag_class == 4: # KS
if pd['N'] > pd['X']: # we know that both ends are HexN
nonred = 'N'
redend = 'N'
elif pd['N'] < pd['X']: # we know that both ends are Hex
nonred = 'X'
redend = 'X'
else: # we cannot know which end is which just yet
nonred = '?'
redend = '?'
else: # HS or CS
if pd['N'] > pd['U']: # we know that both ends are HexN
nonred = 'N'
redend = 'N'
elif pd['N'] < pd['U']: # we know that both ends are HexA
nonred = 'U'
redend = 'U'
else: # we cannot know which end is which just yet
nonred = '?'
redend = '?'
# return
return [nonred, redend, n]
# function for getting GAG chemical name from backbone, ac positions, and so3 positions
def generateGAGstring(bbone, adict=None, sdict=None):
monos = [] # list to store each mono
# range through length
for i in range(len(bbone)):
j = str(i+1) # dictionary keys are strings
tf = False # boolean for whether to add position on HexN or not
if bbone[j] == 'D': # current mono is dHexA
cur = 'dHexA'
elif bbone[j] == 'U': # current mono is HexA
cur = 'HexA'
elif bbone[j] == 'X': # current mono is Hex
cur = 'Hex'
else: # current mono is HexN
cur = 'HexN'
tf = True
# there is at least one acetyl group in this GAG and it is on this mono
if adict and j in adict:
cur += 'Ac'
# there is at least one sulfate group in this GAG and it is on this mono
if sdict and j in sdict:
for p in sorted(sdict[j]):
#
if p == '2' and tf:
cur += 'S'
else:
cur += p + 'S'
monos.append(cur)
gstring = ''
for m in monos:
gstring += m + '-'
return gstring[0:len(gstring)-1]
# function for making a GAG string a GAG dict
def gstring2gdict(gcl, gstring):
# split string into monos
monos = gstring.split('-')
# GAG dictionary
gdict = {'backbone': {}, 'Ac':{}, 'SO3':{}}
# go through each mono
for m in range(len(monos)):
j = str(m+1)
# string manipulation
if monos[m][:5] == 'dHexA':
curm = 'dHexA'
mods = monos[m][5:]
elif monos[m][:4] == 'HexA' or monos[m][:4] == 'HexN':
curm = monos[m][:4]
mods = monos[m][4:]
else: # Hexose
curm = 'Hex'
mods = monos[m][3:]
# add to backbone
gdict['backbone'][j] = ms_hash[curm]
# temporary dicts
if curm == 'HexN':
tadict = {'2': 0}
tsdict = {}
# add positions to temporary SO3 dict
for p in modlocs[gcl]['N']['SO3']:
tsdict[str(p)] = 0
# actually add modifications
if 'Ac' in mods: # acetyl present
tadict['2'] = 1
if mods != '':
start = 0 # to determine where to start
if mods[0] == 'S':
tsdict['2'] = 1
start = 1 # skip the first letter
for x in [mods[i:i+2] for i in range(start, len(mods), 2)]:
if x == 'Ac':
tadict['2'] = 1
else:
tsdict[x[0]] = 1
# add dicts to gdict
gdict['Ac'][j] = tadict
gdict['SO3'][j] = tsdict
else:
tsdict = {}
# add positions to temporary SO3 dict
for p in modlocs[gcl][ms_hash[curm]]['SO3']:
tsdict[str(p)] = 0
# actually add modifications
for x in [mods[i:i+2] for i in range(0, len(mods), 2)]:
tsdict[x[0]] = 1
# add dicts to gdict
gdict['SO3'][j] = tsdict
return gdict
# function for encoding BiRank algorithm
def BiRank(G, alpha=0.85, beta=0.85, n_iter=1000, s_0=None, f_0=None):
# check if G is a bipartite graph
if not nx.is_bipartite(G):
#print 'The graph for BiRank must be bipartite'
return 0
# get the two sides of the bipartite graph
grp1, grp2 = nx.bipartite.sets(G)
grp1 = list(grp1)
grp2 = list(grp2)
# get list of sequences and fragments
if 'frag' in grp1[0]:
fragments = grp1
sequences = grp2
else:
sequences = grp1
fragments = grp2
# add initial sequence info
if s_0 is None:
s_0 = []
for idx in sequences:
s_0.append(1./len(sequences))
elif type(s_0) is dict:
tmp = []
for idx in sequences:
tmp.append(s_0[idx])
s_0 = np.array(tmp)
seqs = [s_0]
# add initial fragment info
if f_0 is None:
f_0 = []
for idx in fragments:
f_0.append(1./len(fragments))
elif type(f_0) is dict:
tmp = []
for idx in fragments:
tmp.append(f_0[idx])
f_0 = np.array(tmp)
frgs = [f_0]
# get degree info
a_deg = nx.degree(G, weight='weight')
s_deg = []
f_deg = []
deg = a_deg
# populate sequence degrees and fragment degrees
for s in sequences:
s_deg.append(a_deg[s] ** -0.5)
for f in fragments:
f_deg.append(a_deg[f] ** -0.5)
# make degree matrices
Df = sp.sparse.diags(f_deg)
Ds = sp.sparse.diags(s_deg)
# get weight matrix
W = nx.bipartite.biadjacency_matrix(G, sequences, fragments)
# get symmetrically normalized version of weight matrix
S = Ds * W * Df
# iterate
go = True
k = 1 # for index control
while go:
# update ranks
cur_seq = (alpha * S * frgs[k-1]) + ((1 - alpha) * s_0)
cur_frg = (beta * S.transpose() * seqs[k-1]) + ((1 - beta) * f_0)
if np.array_equal(cur_seq, seqs[len(seqs)-1]) or np.sum(abs(cur_seq - seqs[len(seqs)-1])) < 1.e-10 or k == n_iter:
go = False
else:
# add new rankings to the list of rankings
seqs.append(cur_seq)
frgs.append(cur_frg)
k += 1 # add to iterator
# get back into dictionary format
s_dict, f_dict = ({}, {})
for idx in range(len(sequences)):
s_dict[sequences[idx]] = seqs[len(seqs)-1][idx]
for idx in range(len(fragments)):
f_dict[fragments[idx]] = frgs[len(frgs)-1][idx]
# return
return s_dict, f_dict
# return a sequence's likelihood score
def scoreSeq(seq, scf):
lik = 1 # likelihood score
monos = seq.split('-') # get each individual monosaccharide
for m in monos: # loop through monosaccharides
val = 1
if 'N3' in m or 'N6' in m or m[len(m)-1] == 'N': # free amine
val -= 0.6
if '3S' in m and '6S' not in m: # 3S is more rare than 6S
val -= 0.3
lik *= val
# return scaled likelihood score
return lik ** scf
# function for reading gagfinder output
def read_gf_results(res_file):
# load G scores
try:
res = open(res_file, 'r')
except:
print "\nIncorrect or missing file. Please try again."
sys.exit()
res.readline() # skip first line
# dictionaries
gd = {} # store G scores for each nonspecific fragment
fm = {} # map nonspecific fragments to specific fragment/charge state pair
mf = {} # map specific fragment/charge state pair to nonspecific fragments
fc = {} # keys are unique fragments and values are each charge state
af = [] # list of all fragments (similar to GAGs
# go through lines
line_ct = 0
for line in res.readlines():
if line_ct >= 68:
break
cells = line.strip('\n').split('\t') # split string to get values out
if 'M' in cells[3]: # full molecule fragments are not helpful
continue
# get G score, fragments, and charge
G = float(cells[1])/(float(cells[4]) ** p2)
fgs = cells[3].split('; ')
chg = int(cells[2])
# add G score to dictionary
gd['frag'+str(line_ct)] = G
# add fragment to allfrags list
af.append('frag'+str(line_ct))
# go through each fragment
for frg in fgs:
# add to fragment map
fm[(frg, chg)] = 'frag'+str(line_ct)
# add to map fragment
if 'frag'+str(line_ct) not in mf:
mf['frag'+str(line_ct)] = [(frg, chg)]
else:
mf['frag'+str(line_ct)].append((frg, chg))
if frg in fc: # this fragment hasn't been added yet
fc[frg].append(chg)
else: # this fragment has been added
fc[frg] = [chg]
line_ct += 1
# convert to a numpy array
gs = []
for key in gd:
gs.append((key, gd[key]))
dty = np.dtype([('frags', 'object'), ('G', 'float')])
gs = np.array(gs, dtype=dty)
gs = np.sort(gs, order='G')[::-1]
# return dicts etc.
return [gd, fm, mf, fc, af, gs]
# function for getting precursor composition
def get_pre(cl, pm, pz, rw, crsr):
# calculate precursor mass
pre_mass = (pm*abs(pz)) - (pz*wt['monoH'])
test_mass = pre_mass - rw
# get precursor info
crsr.execute('''SELECT p.value
FROM Precursors p, ClassPrecursorMap cpm, Formulae f
WHERE p.id = cpm.pId
AND f.id = p.fmId
AND cpm.cId = ?
ORDER BY ABS(f.monoMass - ?) ASC
LIMIT 1;''', (cl, test_mass))
row = crsr.fetchone()
# return
return row[0]
# function for getting the reducing and non-reducing end info
def get_ends(pd, gag_class):
n = pd['D'] + pd['U'] + pd['X'] + pd['N'] # length of GAG
# check if dHexA exists (has to be NR end)
if pd['D'] > 0:
nonred = 'D'
if pd['U'] == pd['N']: # we know that the reducing end is HexA because (HexA+dHexA > HexN)
redend = 'U'
else: # we know that the reducing end is HexN because (HexA+dHexA == HexN)
redend = 'N'
else:
if gag_class == 4: # KS
if pd['N'] > pd['X']:
nonred = 'N'
redend = 'N'
elif pd['N'] < pd['X']:
nonred = 'X'
redend = 'X'
else:
nonred = '?'
redend = '?'
else: # HS or CS
if pd['N'] > pd['U']: # we know that both ends are HexN
nonred = 'N'
redend = 'N'
elif pd['N'] < pd['U']: # we know that both ends are HexA
nonred = 'U'
redend = 'U'
else: # we cannot know which end is which just yet
nonred = '?'
redend = '?'
# return
return [nonred, redend, n]
# function for getting backbone and modification info
def get_bb(cl, nonred, lng):
# variables to keep possibilities
bb = []
ac = []
so = []
# generate backbones and modification possibile locations
if cl == 'KS': # working with KS
poss = ['X','N'] # possible monosaccharides
if nonred == '?': # unsure about end monosaccharides
for i in poss: # we need two backbones
cur_back = {} # current backbone
cur_Ac = [] # current acetyl modification positions
cur_SO3 = [] # current sulfate modification positions
odd = i # odd-numbered monosaccharide
even = list(set(poss) - set(odd))[0] # even-numbered monosaccharide
# go through each position in backbone
for j in range(lng):
if (j+1) % 2 == 0: # even
cur_back[str(j+1)] = even # add even-numbered monosaccharide to backbone
if even == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-6')
else: # odd
cur_back[str(j+1)] = odd # add odd-numbered monosaccharide to backbone
if odd == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-6')
# add current backbone and modifications
bb.append(cur_back)
ac.append(cur_Ac)
so.append(cur_SO3)
else: # we know end monosaccharides
cur_back = {} # current backbone
cur_Ac = [] # current acetyl modification positions
cur_SO3 = [] # current sulfate modification positions
odd = nonred
even = list(set(poss) - set(odd))[0] # even-numbered monosaccharide
# go through each position in backbone
for j in range(lng):
if (j+1) % 2 == 0: # even
cur_back[str(j+1)] = even # add even-numbered monosaccharide to backbone
if even == 'N':
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-6')
else: # odd
cur_back[str(j+1)] = odd # add odd-numbered monosaccharide to backbone
if odd == 'N':
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-6')
# add current backbone and modifications
bb.append(cur_back)
ac.append(cur_Ac)
so.append(cur_SO3)
elif cl == 'CS': # working with CS
poss = ['U','N'] # possible monosaccharides
if nonred == '?': # unsure about end monosaccharides
for i in poss: # we need two backbones
cur_back = {} # current backbone
cur_Ac = [] # current acetyl modification positions
cur_SO3 = [] # current sulfate modification positions
odd = i # odd-numbered monosaccharide
even = list(set(poss) - set(odd))[0] # even-numbered monosaccharide
# go through each position in backbone
for j in range(lng):
if (j+1) % 2 == 0: # even
cur_back[str(j+1)] = even # add even-numbered monosaccharide to backbone
if even == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-4')
cur_SO3.append(str(j+1)+'-6')
else: # HexA
cur_SO3.append(str(j+1)+'-2')
else: # odd
cur_back[str(j+1)] = odd # add odd-numbered monosaccharide to backbone
if odd == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-4')
cur_SO3.append(str(j+1)+'-6')
else: # HexA
cur_SO3.append(str(j+1)+'-2')
# add current backbone and modifications
bb.append(cur_back)
ac.append(cur_Ac)
so.append(cur_SO3)
else: # we know end monosaccharides
cur_back = {} # current backbone
cur_Ac = [] # current acetyl modification positions
cur_SO3 = [] # current sulfate modification positions
# do position 1 first
cur_back['1'] = nonred
# change odd to U if it's a dHexA
if nonred == 'D':
odd = 'U'
else:
odd = nonred
even = list(set(poss) - set(odd))[0] # even-numbered monosaccharide
if odd == 'N': # HexN
cur_Ac.append('1-2')
cur_SO3.append('1-4')
cur_SO3.append('1-6')
else: # HexA
cur_SO3.append('1-2')
# go through each position in backbone
for j in range(1, lng):
if (j+1) % 2 == 0: # even
cur_back[str(j+1)] = even # add even-numbered monosaccharide to backbone
if even == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-4')
cur_SO3.append(str(j+1)+'-6')
else: # HexA
cur_SO3.append(str(j+1)+'-2')
else: # odd
cur_back[str(j+1)] = odd # add odd-numbered monosaccharide to backbone
if odd == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-4')
cur_SO3.append(str(j+1)+'-6')
else: # HexA
cur_SO3.append(str(j+1)+'-2')
# add current backbone and modifications
bb.append(cur_back)
ac.append(cur_Ac)
so.append(cur_SO3)
else: # working with HS
poss = ['U','N'] # possible monosaccharides
if nonred == '?': # unsure about end monosaccharides
for i in poss: # we need two backbones
cur_back = {} # current backbone
cur_Ac = [] # current acetyl modification positions
cur_SO3 = [] # current sulfate modification positions
odd = i # odd-numbered monosaccharide
even = list(set(poss) - set(odd))[0] # even-numbered monosaccharide
# go through each position in backbone
for j in range(lng):
if (j+1) % 2 == 0: # even
cur_back[str(j+1)] = even # add even-numbered monosaccharide to backbone
if even == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-3')
cur_SO3.append(str(j+1)+'-6')
else: # HexA
cur_SO3.append(str(j+1)+'-2')
else: # odd
cur_back[str(j+1)] = odd # add odd-numbered monosaccharide to backbone
if odd == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-3')
cur_SO3.append(str(j+1)+'-6')
else: # HexA
cur_SO3.append(str(j+1)+'-2')
# add current backbone and modifications
bb.append(cur_back)
ac.append(cur_Ac)
so.append(cur_SO3)
else: # we know end monosaccharides
cur_back = {} # current backbone
cur_Ac = [] # current acetyl modification positions
cur_SO3 = [] # current sulfate modification positions
# do position 1 first
cur_back['1'] = nonred
# change odd to U if it's a dHexA
if nonred == 'D':
odd = 'U'
else:
odd = nonred
even = list(set(poss) - set(odd))[0] # even-numbered monosaccharide
if odd == 'N': # HexN
cur_Ac.append('1-2')
cur_SO3.append('1-2')
cur_SO3.append('1-3')
cur_SO3.append('1-6')
else: # HexA
cur_SO3.append('1-2')
# go through each position in backbone
for j in range(1, lng):
if (j+1) % 2 == 0: # even
cur_back[str(j+1)] = even # add even-numbered monosaccharide to backbone
if even == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-3')
cur_SO3.append(str(j+1)+'-6')
else: # HexA
cur_SO3.append(str(j+1)+'-2')
else: # odd
cur_back[str(j+1)] = odd # add odd-numbered monosaccharide to backbone
if odd == 'N': # HexN
cur_Ac.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-2')
cur_SO3.append(str(j+1)+'-3')
cur_SO3.append(str(j+1)+'-6')
else: # HexA
cur_SO3.append(str(j+1)+'-2')
# add current backbone and modifications
bb.append(cur_back)
ac.append(cur_Ac)
so.append(cur_SO3)
# return
return [bb, ac, so]
# function for generating GAG strings
def get_GAGs(bb, ac, so, pd):
# lists for acetyl and sulfate permutations
g = []
# loop through backbones (either one or two)
for idx in range(len(bb)):
these_Ac = choose(ac[idx], pd['A'])
these_SO3 = []
if len(these_Ac) == 0:
qqq = choose(so[idx], pd['S'])
these_SO3.append(qqq)
Ac_dict = None
# check if there is at least one sulfate
if len(qqq) > 0: # there is at least one sulfate
# loop through sulfate mods
for sfp in qqq:
SO3_dict = {}
# loop through sulfate mods
for sspot in sfp:
sunit, sloc = sspot.split('-')
if sunit in SO3_dict:
SO3_dict[sunit].append(sloc)
else:
SO3_dict[sunit] = [sloc]
g.append(generateGAGstring(bb[idx], Ac_dict, SO3_dict))
else: # there are no sulfates
g.append(generateGAGstring(bb[idx], Ac_dict, None))
else:
for a in range(len(these_Ac)):
qqq = choose(list(set(so[idx]) - set(these_Ac[a])), pd['S'])
these_SO3.append(qqq)
Ac_dict = {}
# loop through Ac mods
for aspot in these_Ac[a]:
aunit, aloc = aspot.split('-')
Ac_dict[aunit] = [aloc]
# check if there is at least one sulfate
if len(qqq) > 0: # there is at least one sulfate
# loop through sulfate mods
for sfp in qqq:
SO3_dict = {}
for sspot in sfp:
sunit, sloc = sspot.split('-')
if sunit in SO3_dict:
SO3_dict[sunit].append(sloc)
else:
SO3_dict[sunit] = [sloc]
g.append(generateGAGstring(bb[idx], Ac_dict, SO3_dict))
else: # there are no sulfates
g.append(generateGAGstring(bb[idx], Ac_dict, None))
# return
return g
# function for building bipartite graph
def get_graph(gg, cl, loss, ref, fc, fm):
# initialize graph
gph = nx.Graph()
# edge weights
glyco = 1.
intrl = 0.2
xring = 1.
# loop through all structures
for g in gg:
# set up variables
cur_dict = gstring2gdict(cl, g) # convert GAG string to GAG dictionary
monos = sorted(cur_dict['backbone'].keys(), key=lambda x: float(x)) # list of monosaccharides
backbone = cur_dict['backbone']
actl = cur_dict['Ac']
slft = cur_dict['SO3']
# go through structure
nn = 1 # variable for fragment length
while nn < len(monos):
# loop through fragments of length nn
for i in range(len(monos)-nn+1):
j = monos[i:i+nn] # monosaccharides in this fragment
# get the complete monosaccharides
fml = {'D':0, 'U':0, 'X':0, 'N':0, 'A':0, 'S':0}
for k in j:
fml[backbone[k]] += 1 # add current monosaccharide to formula
# count the sulfates
for p in slft[k]:
fml['S'] += slft[k][p]
# count the acetyls
if k in actl:
for p in actl[k]:
fml['A'] += actl[k][p]
## glycosidic fragments first
# loop through possible sulfate losses
for s in range(loss+1):
fml1 = dict(fml) # copy of original
fml1['S'] = max(0, fml1['S']-s) # remove sulfate (if possible)
# copy dictionary
sf = dict2fmla(fml1, 'composition')
if i == (len(monos)-nn) and ref is not None:
sf += '+RE' # add RE to let user know it's reducing end fragment
if sf in fc.keys(): # make sure this fragment has been found in the spectrum
for cg in fc[sf]:
if i == 0 or i == (len(monos)-nn): # terminal fragment
if gph.has_edge(g, fm[(sf, cg)]): # edge exists
gph[g][fm[(sf, cg)]]['weight'] = max(gph[g][fm[(sf,cg)]]['weight'], glyco ** p1)
else: # edge does not exist
gph.add_edge(g, fm[(sf, cg)], weight=glyco ** p1)
else: # internal fragment
if gph.has_edge(g, fm[(sf, cg)]): # edge exists
gph[g][fm[(sf, cg)]]['weight'] = max(gph[g][fm[(sf,cg)]]['weight'], intrl ** p1)
else: # edge does not exist
gph.add_edge(g, fm[(sf, cg)], weight=intrl ** p1)
# loop through neutral losses
for k in range(2): # water loss
for l in range(3): # H loss
sf1 = sf # avoid repeating
# water loss
if k == 1:
sf1 += '-H2O'
# H loss(es)
if l == 1:
sf1 += '-H'
elif l == 2:
sf1 += '-2H'
if sf1 in fc.keys(): # make sure this fragment has been found in the spectrum
for cg in fc[sf1]:
if i == 0 or i == (len(monos)-nn): # terminal fragment
if gph.has_edge(g, fm[(sf1, cg)]): # edge exists
gph[g][fm[(sf1, cg)]]['weight'] = max(gph[g][fm[(sf1,cg)]]['weight'], glyco ** p1)
else: # edge does not exist
gph.add_edge(g, fm[(sf1, cg)], weight=glyco ** p1)
else: # internal fragment
if gph.has_edge(g, fm[(sf1, cg)]): # edge exists
gph[g][fm[(sf1, cg)]]['weight'] = max(gph[g][fm[(sf1,cg)]]['weight'], intrl ** p1)
else: # edge does not exist
gph.add_edge(g, fm[(sf1, cg)], weight=intrl ** p1)
# check if we're done with sulfate losses
if fml1['S'] == 0:
break
## crossring fragments second
if i == 0: # non-reducing end
addto = monos[nn] # which index to add to
msch = backbone[addto] # which monosaccharide to add to
# loop through cross-ring cleavages
for x in xmodlocs[cl][msch]['NR']:
fml1 = dict(fml) # copy of original
# count the sulfates
for y in xmodlocs[cl][msch]['NR'][x]['SO3']:
fml1['S'] += slft[addto][str(y)]
# count the acetyls
if len(xmodlocs[cl][msch]['NR'][x]['Ac']) > 0: # acetyl possible
fml1['A'] += actl[addto]['2']
# sulfate losses
for s in range(loss+1):
fml2 = dict(fml1) # copy of altered copy
fml2['S'] = max(0, fml2['S'] - s) # remove sulfate (if possible)
# generate string
sf = dict2fmla(fml2, 'composition') + '+' + msch + 'NR' + x
if sf in fc.keys(): # make sure this fragment has been found in the spectrum
for cg in fc[sf]:
if gph.has_edge(g, fm[(sf, cg)]): # edge exists
gph[g][fm[(sf, cg)]]['weight'] = max(gph[g][fm[(sf,cg)]]['weight'], xring ** p1)
else: # edge does not exist
gph.add_edge(g, fm[(sf, cg)], weight=xring ** p1)
# loop through neutral losses
for l in range(1): # H loss
sf1 = sf # avoid repeating
# H loss(es)
if l == 1:
sf1 += '-H'
elif l == 2:
sf1 += '-2H'
if sf1 in fc.keys(): # make sure this fragment has been found in the spectrum
for cg in fc[sf1]:
if gph.has_edge(g, fm[(sf1, cg)]): # edge exists
gph[g][fm[(sf1, cg)]]['weight'] = max(gph[g][fm[(sf1,cg)]]['weight'], xring ** p1)
else: # edge does not exist
gph.add_edge(g, fm[(sf1, cg)], weight=xring ** p1)
# check if we're done with sulfate losses
if fml2['S'] == 0:
break
elif i == (len(monos)-nn): # reducing end
addto = monos[i-1] # which index to add to
msch = backbone[addto] # which monosaccharide to add to
# loop through cross-ring cleavages
for x in xmodlocs[cl][msch]['RE']:
fml1 = dict(fml) # copy of original
# count the sulfates
for y in xmodlocs[cl][msch]['RE'][x]['SO3']:
fml1['S'] += slft[addto][str(y)]
# count the acetyls
if len(xmodlocs[cl][msch]['RE'][x]['Ac']) > 0: # acetyl possible
fml1['A'] += actl[addto]['2']
# sulfate losses
for s in range(loss+1):
fml2 = dict(fml1) # copy of altered copy
fml2['S'] = max(0, fml2['S'] - s) # remove sulfate (if possible)
# generate string
sf = dict2fmla(fml2, 'composition') + '+' + msch + 'RE' + x
if ref is not None:
sf += '+RE'
if sf in fc.keys(): # make sure this fragment has been found in the spectrum
for cg in fc[sf]:
if gph.has_edge(g, fm[(sf, cg)]): # edge exists
gph[g][fm[(sf, cg)]]['weight'] = max(gph[g][fm[(sf,cg)]]['weight'], xring ** p1)
else: # edge does not exist
gph.add_edge(g, fm[(sf, cg)], weight=xring ** p1)
# loop through neutral losses
for l in range(1): # H loss
sf1 = sf # avoid repeating
# H loss(es)
if l == 1:
sf1 += '-H'
elif l == 2:
sf1 += '-2H'
if sf1 in fc.keys(): # make sure this fragment has been found in the spectrum
for cg in fc[sf1]:
if gph.has_edge(g, fm[(sf1, cg)]): # edge exists
gph[g][fm[(sf1, cg)]]['weight'] = max(gph[g][fm[(sf1,cg)]]['weight'], xring ** p1)
else: # edge does not exist
gph.add_edge(g, fm[(sf1, cg)], weight=xring ** p1)
# check if we're done with sulfate losses
if fml2['S'] == 0:
break
# increment fragment length
nn += 1
return gph
# function for ranking the nodes in the graph
def rank_nodes(gg, gph, g_scores, gd):
# sequences that are in the network
poss_seqs = []
for seq in gg:
if seq in gph.nodes():
poss_seqs.append(seq)
### BiRank
# get sum of g-scores
g_sum = np.sum(g_scores['G'])
# get prior for fragments
prior_frg = {}
for frag in g_scores['frags']:
prior_frg[frag] = gd[frag] / g_sum
# get prior for sequences
prior_seq = {}
seq_sum = 0.
for key in poss_seqs:
this = scoreSeq(key, p3)
prior_seq[key] = this
seq_sum += this
for key in prior_seq:
prior_seq[key] /= seq_sum
# run BiRank
br = BiRank(gph, alpha=.98, beta=.94, f_0=prior_frg, s_0=prior_seq)[0]
# convert to a numpy array for downstream analysis
b_score = []
for key in br:
b_score.append((key, br[key]))
dty = np.dtype([('seq', 'object'), ('br', 'float')])
b_score = np.array(b_score, dtype=dty)
b_score = np.sort(b_score, order='br')[::-1]
return b_score
# function for running the guts of GAGrank
def rank_gags(gf, gc, cn, rf, mz, z, sl, a=None, db_path='../lib/GAGfragDB.db'):
# get values ready for reducing end derivatization and reagent
df = {'C':0, 'H':0, 'O':0, 'N':0, 'S':0}
atoms = ['C','H','O','N','S']
# parse the reducing end derivatization formula
if rf:
parts = re.findall(r'([A-Z][a-z]*)(\d*)', rf.upper()) # split formula by symbol
for q in parts:
if q[0] not in atoms: # invalid symbol entered
print "Invalid chemical formula entered. Please enter only CHONS. Try 'python gagfinder.py --help'"
sys.exit()
else:
if q[1] == '': # only one of this atom
df[q[0]] += 1
else:
df[q[0]] += int(q[1])
# get derivatization weight
wt = {'C': 12.0,
'H': 1.0078250322,
'O': 15.994914620,
'N': 14.003074004,
'S': 31.972071174,
'Na': 22.98976928,
'K': 38.96370649,
'Li': 7.01600344,
'Ca': 39.9625909,
'Mg': 23.98504170}
dw = 0
for q in df:
dw += df[q] * wt[q]
# print the reducing end derivatization back out to the user
if debug:
formula = ''
for key in df:
val = df[key]
if val > 0:
formula += key
if val > 1:
formula += str(val)
print "atoms in reducing end derivatization: %s" % (formula)
###########################################################
# Step 2: Load GAGfinder results and connect to GAGfragDB #
###########################################################
print "Loading GAGfinder results file...",
gdict, fragmap, mapfrag, f_chgs, allfrags, gsc = read_gf_results(gf)
# connect to GAGfragDB
conn = sq.connect(db_path)
c = conn.cursor()
print "Done!"
######################################
# Step 3: Find precursor composition #
######################################
print "Determining precursor composition...",
pComp = get_pre(cn, mz, z, dw, c)
print "Done!"
#########################################################
# Step 4: Get reducing end/non-reducing end information #
#########################################################
print "Determining reducing end/non-reducing end information...",
# convert composition into a dictionary
pDict = fmla2dict(pComp, 'composition')
NR, RE, n_pre = get_ends(pDict, gc)
print "Done!"
#########################################
# Step 5: Set up possible GAG backbones #
#########################################
print "Generating possible backbones and modification positions...",
backbones, all_Ac, all_SO3 = get_bb(gc, NR, n_pre)
print "Done!"
################################################
# Step 6: Generate all possible GAG structures #
################################################
print "Generating possible GAG structures...",
GAGs = get_GAGs(backbones, all_Ac, all_SO3, pDict)
print "Done!"
#################################
# Step 7: Build bipartite graph #
#################################
print "Building bipartite graph...",
gr = get_graph(GAGs, gc, sl, rf, f_chgs, fragmap)
print "Done!"
########################################################
# Step 8: Get the different rankings for each sequence #
########################################################
print "Calculating enrichment score for each GAG structure...",
bsc = rank_nodes(GAGs, gr, gsc, gdict)
print "Done!"
# return
return bsc
# function for writing to file
def write_result_to_file(input_path, scores):
print "Printing output to file...",
# write to file
oFile = input_path[:-4] + '_GAGrank_results.tsv'
f = open(oFile, 'w')
f.write("Sequence\tGAGrank score\n")
for q in scores:
out = str(q[0]) + '\t' + str(q[1]) + '\n'
f.write(out)
f.close()
print "Done!"
# main function
def main():
################################
# Step 1: check user arguments #
################################
# initiate parser
parser = argparse.ArgumentParser(description='Find isotopic clusters in GAG tandem mass spectra.')
# add arguments
parser.add_argument('-c', required=True, help='GAG class (required)')
parser.add_argument('-i', required=True, help='Input GAGfinder results file (required)')
parser.add_argument('-r', required=False, help='Reducing end derivatization (optional)')
parser.add_argument('-m', type=float, required=True, help='Precursor m/z (required)')
parser.add_argument('-z', type=int, required=True, help='Precursor charge (required)')
parser.add_argument('-s', type=int, required=False, help='Number of sulfate losses to consider (optional, default 0)')
parser.add_argument('-a', required=False, help='Actual sequence, for testing purposes (optional)')
# parse arguments
args = parser.parse_args()
print "Checking user arguments...",
# get arguments into proper variables
gClass = args.c
rFile = args.i
fmla = args.r
pre_mz = args.m
pre_z = args.z
s_loss = args.s
actual = args.a
# check to make sure a proper GAG class was added
if gClass not in ['HS', 'CS', 'KS']:
print "You must denote a GAG class, either HS, CS, or KS. Try 'python gagfinder.py --help'"
sys.exit()
# pick a proper class number
if gClass == 'HS':
cNum = 3
elif gClass == 'CS':
cNum = 1
else:
cNum = 4
# check to see if the user wants to consider sulfate loss
if not s_loss:
s_loss = 0
# print the system arguments back out to the user
if debug:
print "class: %s" % (gClass)
print "GAGfinder results file: %s" % (rFile)
print "Done!"
# run the guts of GAGrank
result = rank_gags(rFile, gClass, cNum, fmla, pre_mz, pre_z, s_loss, actual)
# for debugging
if actual:
actual = actual.split(':')
for sqn in actual:
print '\tBiRank ranking #' + str(int(len(result['br']) - rankdata(result['br'], 'max')[np.where(result['seq'] == sqn)[0][0]] + 1)) + ' to #' + str(int(len(result['br']) - rankdata(result['br'], 'min')[np.where(result['seq'] == sqn)[0][0]] + 1)) + ' out of ' + str(len(result['seq']))
#########################
# Step 9: write to file #
#########################
# write result to file
write_result_to_file(rFile, result)
print "Finished!"
print time.time() - start_time
# run main
if __name__ == '__main__':
main()
```
|
{
"source": "jdhorne/temperature-converter-indigo-plugin",
"score": 3
}
|
#### File: Server Plugin/pyrescaler/length_scale.py
```python
from pyrescaler import *
SCALE_TYPE = "length"
# Internal canonical representation is meters
#
class LengthScale(PredefinedScaledMeasurement):
def __init__(self, input_scale=None, precision=1):
PredefinedScaledMeasurement.__init__(self, input_scale, precision=precision)
print "%s: %s" % (self.suffix(), self.__class__)
class Inches(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# in -> m
def _to_canonical(self, x):
return float(x) * 0.0254
# m -> in
def _from_canonical(self, x):
return float(x) / 0.0254
def suffix(self):
return u"in"
register_scale(SCALE_TYPE, "Inches", "in", Inches)
class Feet(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# ft -> m
def _to_canonical(self, x):
return float(x) * 0.3048
# m -> ft
def _from_canonical(self, x):
return float(x) * 3.2808
def suffix(self):
return u"ft"
register_scale(SCALE_TYPE, "Feet", "ft", Feet)
class Yards(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# yd -> m
def _to_canonical(self, x):
return float(x) / 1.0936
# m -> yd
def _from_canonical(self, x):
return float(x) * 1.0936
def suffix(self):
return u"yd"
register_scale(SCALE_TYPE, "Yards", "yd", Yards)
# class Furlongs(LengthScale):
# def __init__(self, input_scale=None):
# LengthScale.__init__(self, input_scale)
class Miles(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# mi -> m
def _to_canonical(self, x):
return float(x) * 1609.34
# m -> mi
def _from_canonical(self, x):
return float(x) / 1609.34
def suffix(self):
return u"mi"
register_scale(SCALE_TYPE, "Miles", "mi", Miles)
class Centimeters(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# cm -> m
def _to_canonical(self, x):
return float(x) / 100
# m -> cm
def _from_canonical(self, x):
return float(x) * 100
def suffix(self):
return u"cm"
register_scale(SCALE_TYPE, "Centimeters", "cm", Centimeters)
class Meters(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# no change
def _to_canonical(self, x):
return float(x)
# no change
def _from_canonical(self, x):
return float(x)
def suffix(self):
return u"m"
register_scale(SCALE_TYPE, "Meters", "m", Meters)
class Kilometers(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# km -> m
def _to_canonical(self, x):
return float(x) * 1000
# m -> km
def _from_canonical(self, x):
return float(x) / 1000
def suffix(self):
return u"km"
register_scale(SCALE_TYPE, "Kilometers", "km", Kilometers)
class NauticalMiles(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# nmi -> m
def _to_canonical(self, x):
return float(x) * 1852
# m -> nmi
def _from_canonical(self, x):
return float(x) / 1852
def suffix(self):
return u"nmi"
register_scale(SCALE_TYPE, "Nautical Miles", "nmi", NauticalMiles)
class Fathoms(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# fm -> m
def _to_canonical(self, x):
return float(x) * 1.8288
# m -> fm
def _from_canonical(self, x):
return float(x) / 1.8288
def suffix(self):
return u"fm"
register_scale(SCALE_TYPE, "Fathoms", "fm", Fathoms)
class Cubits(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# cbt -> m
def _to_canonical(self, x):
return float(x) / 2.18723
# m -> cbt
def _from_canonical(self, x):
return float(x) * 2.18723
def suffix(self):
return u"cbt"
register_scale(SCALE_TYPE, "Cubits", "cbt", Cubits)
class Hands(LengthScale):
def __init__(self, input_scale=None, precision=1):
LengthScale.__init__(self, input_scale, precision=precision)
# h -> m
def _to_canonical(self, x):
return float(x) / 9.84252
# m -> h
def _from_canonical(self, x):
return float(x) * 9.84252
def suffix(self):
return u"h"
register_scale(SCALE_TYPE, "Hands", "h", Hands)
#
# class Parsecs(LengthScale):
# def __init__(self, input_scale=None):
# LengthScale.__init__(self, input_scale)
#
# class LightYears(LengthScale):
# def __init__(self, input_scale=None):
# LengthScale.__init__(self, input_scale)
#
# class Angstroms(LengthScale):
# def __init__(self, input_scale=None):
# LengthScale.__init__(self, input_scale)
```
#### File: Server Plugin/pyrescaler/power_scale.py
```python
from pyrescaler import *
SCALE_TYPE = "power"
# Internal canonical representation is watts
#
class PowerScale(PredefinedScaledMeasurement):
def __init__(self, input_scale=None, precision=1):
PredefinedScaledMeasurement.__init__(self, input_scale, precision=precision)
print "%s: %s" % (self.suffix(), self.__class__)
class Watts(PowerScale):
def __init__(self, input_scale=None, precision=1):
PowerScale.__init__(self, input_scale, precision=precision)
# nothing to do; canonical representation
def _to_canonical(self, x):
return float(x)
# nothing to do; canonical representation
def _from_canonical(self, x):
return float(x)
def suffix(self):
return u"W"
register_scale(SCALE_TYPE, "Watts", "W", Watts)
class Kilowatts(PowerScale):
def __init__(self, input_scale=None, precision=1):
PowerScale.__init__(self, input_scale, precision=precision)
# kW -> W
def _to_canonical(self, x):
return float(x) * 1000
# W -> kW
def _from_canonical(self, x):
return float(x) / 1000
def suffix(self):
return u"kW"
register_scale(SCALE_TYPE, "Kilowatts", "kW", Kilowatts)
class Horsepower(PowerScale):
def __init__(self, input_scale=None, precision=1):
PowerScale.__init__(self, input_scale, precision=precision)
# hp -> W
def _to_canonical(self, x):
return float(x) * 745.69987158227
# W -> hp
def _from_canonical(self, x):
return float(x) / 745.69987158227
def suffix(self):
return u"hp"
register_scale(SCALE_TYPE, "Horsepower", "hp", Horsepower)
```
#### File: Server Plugin/pyrescaler/temperature_scale.py
```python
from pyrescaler import *
SCALE_TYPE = "temperature"
# Internal canonical representation is Kelvin
#
class TemperatureScale(PredefinedScaledMeasurement):
def __init__(self, input_scale=None, precision=1):
PredefinedScaledMeasurement.__init__(self, input_scale, precision=precision)
class Fahrenheit(TemperatureScale):
def __init__(self, input_scale=None, precision=1):
TemperatureScale.__init__(self, input_scale, precision=precision)
# F -> K
def _to_canonical(self, f_temp):
return (459.67 + float(f_temp)) * 5 / 9
# K -> F
def _from_canonical(self, k_temp):
return (1.8 * float(k_temp)) - 459.67
def suffix(self):
return u"°F"
register_scale(SCALE_TYPE, "Fahrenheit", "F", Fahrenheit)
class Celsius(TemperatureScale):
def __init__(self, input_scale=None, precision=1):
TemperatureScale.__init__(self, input_scale, precision=precision)
# C -> K
def _to_canonical(self, c_temp):
return float(c_temp) + 273.15
# K -> C
def _from_canonical(self, k_temp):
return float(k_temp) - 273.15
def suffix(self):
return u"°C"
register_scale(SCALE_TYPE, "Celsius", "C", Celsius)
class Kelvin(TemperatureScale):
def __init__(self, input_scale=None, precision=1):
TemperatureScale.__init__(self, input_scale, precision=precision)
def _to_canonical(self, k_temp):
# Kelvin is the canonical representation, so nothing to do
return float(k_temp)
def _from_canonical(self, k_temp):
# Kelvin is the canonical representation, so nothing to do
return float(k_temp)
def suffix(self):
return u"K"
register_scale(SCALE_TYPE, "Kelvin", "K", Kelvin)
class Rankine(TemperatureScale):
def __init__(self, input_scale=None, precision=1):
TemperatureScale.__init__(self, input_scale, precision=precision)
# R -> K
def _to_canonical(self, r_temp):
return float(r_temp) * 5 / 9
# K -> R
def _from_canonical(self, k_temp):
return 1.8 * float(k_temp)
def suffix(self):
return u"°Ra"
register_scale(SCALE_TYPE, "Rankine", "R", Rankine)
```
|
{
"source": "jdhornsby/checkov",
"score": 2
}
|
#### File: checks/utilities/base_check.py
```python
import logging
from abc import ABC, abstractmethod
from checkov.terraform.models.enums import CheckResult
class BaseCheck(ABC):
id = ""
name = ""
categories = []
supported_entities = []
def __init__(self, name, id, categories, supported_entities, block_type):
self.name = name
self.id = id
self.categories = categories
self.block_type = block_type
self.supported_entities = supported_entities
self.logger = logging.getLogger("{}".format(self.__module__))
def run(self, scanned_file, entity_configuration, entity_name, entity_type, skip_info):
check_result = {}
if skip_info:
check_result['result'] = CheckResult.SKIPPED
check_result['suppress_comment'] = skip_info['suppress_comment']
message = "File {}, {} \"{}.{}\" check \"{}\" Result: {}, Suppression comment: {} ".format(
scanned_file, self.block_type, entity_type,
entity_name,
self.name,
check_result, check_result['suppress_comment'])
else:
try:
check_result['result'] = self.scan_entity_conf(entity_configuration)
message = "File {}, {} \"{}.{}\" check \"{}\" Result: {} ".format(scanned_file, self.block_type,
entity_type,
entity_name,
self.name,
check_result)
self.logger.debug(message)
except Exception as e:
self.logger.error(
"Failed to run check {} for configuration {} ".format(self.name, str(entity_configuration)))
raise e
return check_result
@abstractmethod
def scan_entity_conf(self, conf):
raise NotImplementedError()
```
#### File: checks/utilities/base_registry.py
```python
import logging
import sys
import os
import importlib
class Registry(object):
checks = {}
def __init__(self):
self.logger = logging.getLogger(__name__)
self.checks = {}
def register(self, check):
for entity in check.supported_entities:
if entity not in self.checks.keys():
self.checks[entity] = []
self.checks[entity].append(check)
def get_checks(self, entity):
if entity in self.checks.keys():
return self.checks[entity]
return []
def scan(self, block, scanned_file, skipped_checks):
entity = list(block.keys())[0]
entity_conf = block[entity]
results = {}
checks = self.get_checks(entity)
for check in checks:
skip_info = {}
if skipped_checks:
if check.id in [x['id'] for x in skipped_checks]:
skip_info = [x for x in skipped_checks if x['id'] == check.id][0]
entity_name = list(entity_conf.keys())[0]
entity_conf_def = entity_conf[entity_name]
self.logger.debug("Running check: {} on file {}".format(check.name, scanned_file))
result = check.run(scanned_file=scanned_file, entity_configuration=entity_conf_def,
entity_name=entity_name, entity_type=entity, skip_info=skip_info)
results[check] = result
return results
def _directory_has_init_py(self, directory):
""" Check if a given directory contains a file named __init__.py.
__init__.py is needed to ensure the directory is a Python module, thus
can be imported.
"""
if os.path.exists("{}/__init__.py".format(directory)):
return True
return False
def _file_can_be_imported(self, entry):
""" Verify if a directory entry is a non-magic Python file."""
if entry.is_file() and not entry.name.startswith('__') and entry.name.endswith('.py'):
return True
return False
def load_external_checks(self, directory):
""" Browse a directory looking for .py files to import.
Log an error when the directory does not contains an __init__.py or
when a .py file has syntax error
"""
directory = os.path.expanduser(directory)
self.logger.debug("Loading external checks from {}".format(directory))
sys.path.insert(1, directory)
with os.scandir(directory) as directory_content:
if not self._directory_has_init_py(directory):
self.logger.info("No __init__.py found in {}. Cannot load any check here.".format(directory))
else:
for entry in directory_content:
if self._file_can_be_imported(entry):
check_name = entry.name.replace('.py', '')
try:
self.logger.debug("Importing external check '{}'".format(check_name))
importlib.import_module(check_name)
except SyntaxError as e:
self.logger.error(
"Cannot load external check '{check_name}' from {check_full_path} : {error_message} ("
"{error_line}:{error_column}) "
.format(
check_name=check_name,
check_full_path=e.args[1][0],
error_message=e.args[0],
error_line=e.args[1][1],
error_column=e.args[1][2]
)
)
```
|
{
"source": "jdhp-sap/data-pipeline-standalone-scripts",
"score": 2
}
|
#### File: datapipe/optimization/bruteforce.py
```python
__all__ = []
import json
from scipy import optimize
from datapipe.optimization.objectivefunc.wavelets_mrfilter_delta_psi import ObjectiveFunction as WaveletObjectiveFunction
from datapipe.optimization.objectivefunc.tailcut_delta_psi import ObjectiveFunction as TailcutObjectiveFunction
# For wavelets
import datapipe.denoising.cdf
from datapipe.denoising.inverse_transform_sampling import EmpiricalDistribution
def main():
algo = "wavelet_mrfilter"
#algo = "tailcut"
instrument = "astri"
#instrument = "astri_konrad"
#instrument = "digicam"
#instrument = "flashcam"
#instrument = "nectarcam"
#instrument = "lstcam"
print("algo:", algo)
print("instrument:", instrument)
if instrument == "astri":
input_files = ["/dev/shm/.jd/astri/gamma/"]
noise_distribution = EmpiricalDistribution(datapipe.denoising.cdf.ASTRI_CDF_FILE)
elif instrument == "astri_konrad":
input_files = ["/dev/shm/.jd/astri_konrad/gamma/"]
noise_distribution = EmpiricalDistribution(datapipe.denoising.cdf.ASTRI_CDF_FILE)
elif instrument == "digicam":
input_files = ["/dev/shm/.jd/digicam/gamma/"]
noise_distribution = EmpiricalDistribution(datapipe.denoising.cdf.DIGICAM_CDF_FILE)
elif instrument == "flashcam":
input_files = ["/dev/shm/.jd/flashcam/gamma/"]
noise_distribution = EmpiricalDistribution(datapipe.denoising.cdf.FLASHCAM_CDF_FILE)
elif instrument == "nectarcam":
input_files = ["/dev/shm/.jd/nectarcam/gamma/"]
noise_distribution = EmpiricalDistribution(datapipe.denoising.cdf.NECTARCAM_CDF_FILE)
elif instrument == "lstcam":
input_files = ["/dev/shm/.jd/lstcam/gamma/"]
noise_distribution = EmpiricalDistribution(datapipe.denoising.cdf.LSTCAM_CDF_FILE)
else:
raise Exception("Unknown instrument", instrument)
if algo == "wavelet_mrfilter":
func = WaveletObjectiveFunction(input_files=input_files,
noise_distribution=noise_distribution,
max_num_img=None,
aggregation_method="mean") # "mean" or "median"
s1_slice = slice(1, 5, 1)
s2_slice = slice(1, 5, 1)
s3_slice = slice(1, 5, 1)
s4_slice = slice(1, 5, 1)
search_ranges = (s1_slice,
s2_slice,
s3_slice,
s4_slice)
elif algo == "tailcut":
func = TailcutObjectiveFunction(input_files=input_files,
max_num_img=None,
aggregation_method="mean") # "mean" or "median"
s1_slice = slice(-2., 10., 0.5)
s2_slice = slice(-2., 10., 0.5)
search_ranges = (s1_slice,
s2_slice)
else:
raise ValueError("Unknown algorithm", algo)
res = optimize.brute(func,
search_ranges,
full_output=True,
finish=None) #optimize.fmin)
print("x* =", res[0])
print("f(x*) =", res[1])
# SAVE RESULTS ############################################################
res_dict = {
"best_solution": res[0].tolist(),
"best_score": float(res[1]),
"solutions": res[2].tolist(),
"scores": res[3].tolist()
}
with open("optimize_sigma.json", "w") as fd:
json.dump(res_dict, fd, sort_keys=True, indent=4) # pretty print format
if __name__ == "__main__":
main()
```
#### File: data-pipeline-standalone-scripts/tests/test_denoising_tailcut.py
```python
from datapipe.denoising.tailcut import Tailcut
import numpy as np
import unittest
class TestTailcut(unittest.TestCase):
"""
Contains unit tests for the "denoising.tailcut" module.
"""
# Test the "tailcut" function #############################################
def test_example1(self):
"""Check the output of the tailcut function."""
# Input image #################
# [[ 0 0 0 0 0 0]
# [128 128 128 128 128 128]
# [128 64 192 192 64 128]
# [128 64 192 192 64 128]
# [128 128 128 128 128 128]
# [ 0 0 0 0 0 0]]
input_img = np.zeros([6, 6], dtype=np.uint8)
input_img[1:5, :] = 128 # 0.5
input_img[2:4, 1:5] = 64 # 0.25
input_img[2:4, 2:4] = 192 # 0.75
# Output image ################
tailcut = Tailcut()
output_img = tailcut.clean_image(input_img,
high_threshold=0.7,
low_threshold=0.4)
# Expected output image #######
# [[ 0 0 0 0 0 0]
# [ 0 128 128 128 128 0]
# [ 0 0 192 192 0 0]
# [ 0 0 192 192 0 0]
# [ 0 128 128 128 128 0]
# [ 0 0 0 0 0 0]]
expected_output_img = np.zeros([6, 6], dtype=np.uint8)
expected_output_img[1:5, 1:5] = 128 # 0.5
expected_output_img[2:4, 1:5] = 0
expected_output_img[2:4, 2:4] = 192 # 0.75
np.testing.assert_array_equal(output_img, expected_output_img)
def test_example2(self):
"""Check the output of the tailcut function."""
# Input image #################
# [[ 0 0 0 0 0 0 0]
# [ 64 64 128 128 128 64 64]
# [ 64 192 128 128 128 192 64]
# [ 64 64 128 128 128 64 64]
# [ 0 0 0 0 0 0 0]]
input_img = np.zeros([5, 7], dtype=np.uint8)
input_img[1:4, :] = 64 # 0.25
input_img[1:4, 2:5] = 128 # 0.5
input_img[2, 1] = 192 # 0.75
input_img[2, 5] = 192 # 0.75
# Output image ################
tailcut = Tailcut()
output_img = tailcut.clean_image(input_img,
high_threshold=0.7,
low_threshold=0.4)
# Expected output image #######
# [[ 0 0 0 0 0 0 0]
# [ 0 0 128 0 128 0 0]
# [ 0 192 128 0 128 192 0]
# [ 0 0 128 0 128 0 0]
# [ 0 0 0 0 0 0 0]]
expected_output_img = np.zeros([5, 7], dtype=np.uint8)
expected_output_img[1:4, 2] = 128 # 0.5
expected_output_img[1:4, 4] = 128 # 0.5
expected_output_img[2, 1] = 192 # 0.75
expected_output_img[2, 5] = 192 # 0.75
np.testing.assert_array_equal(output_img, expected_output_img)
if __name__ == '__main__':
unittest.main()
```
#### File: data-pipeline-standalone-scripts/utils/search_input_by_metadata_range.py
```python
import common_functions as common
import argparse
import json
import sys
import numpy as np
def extract_input_path_and_meta_list(json_dict, key):
io_list = json_dict["io"]
json_data = [(image_dict["input_file_path"], image_dict[key], image_dict["score"]) for image_dict in io_list if "score" in image_dict]
return json_data
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("--highest", "-H", type=int, default=None, metavar="INT",
help="Select the N images having the highest metadata value")
parser.add_argument("--lowest", "-l", type=int, default=None, metavar="INT",
help="Select the N images having the lowest metadata value")
parser.add_argument("--min", "-m", type=float, default=None, metavar="FLOAT",
help="The lower bound of the selected range")
parser.add_argument("--max", "-M", type=float, default=None, metavar="FLOAT",
help="The upper bound of the selected range")
parser.add_argument("--key", "-k", metavar="KEY",
help="The name of the metadata to considere")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
num_highest = args.highest
num_lowest = args.lowest
min_value = args.min
max_value = args.max
key = args.key
json_file_path = args.fileargs[0]
if (num_highest is not None) and (num_lowest is not None):
raise Exception("--highest and --lowest options are not compatible")
# FETCH SCORE #############################################################
json_dict = common.parse_json_file(json_file_path)
data_list = extract_input_path_and_meta_list(json_dict, key)
# SETUP RANGE #############################################################
value_list = [item[1] for item in data_list]
if min_value is None:
min_value = min(value_list)
if max_value is None:
max_value = max(value_list)
# SEARCH INPUTS BY SCORE RANGE ############################################
filtered_data_list = [item for item in data_list if ((item[1] >= min_value) and (item[1] <= max_value))]
filtered_data_list = sorted(filtered_data_list, key=lambda item: item[1])
if num_highest is not None:
filtered_data_list = filtered_data_list[-num_highest:]
if num_lowest is not None:
filtered_data_list = filtered_data_list[:num_lowest]
print("Min:", min_value, file=sys.stderr)
print("Max:", max_value, file=sys.stderr)
for file_path, value, score in filtered_data_list:
#print(file_path)
print(file_path, value, score)
print(len(filtered_data_list), "inputs", file=sys.stderr)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.